bg sender first draft
Some checks failed
continuous-integration/drone/push Build is failing

This commit is contained in:
ycc
2026-02-26 18:50:46 +01:00
parent cfa20861c5
commit eb7fdc9b03
5 changed files with 962 additions and 0 deletions

View File

@@ -0,0 +1,155 @@
package helpers
import (
"errors"
"os"
"path/filepath"
"sync"
"time"
"forge.redroom.link/yves/meowlib"
"forge.redroom.link/yves/meowlib/client"
)
const maxRetriesPerServer = 3
const defaultSendTimeout = 3600 * 24 // seconds, used when job.Timeout is 0
// WriteSendJob enqueues a SendJob from the main Flutter isolate.
// It is a thin wrapper over client.PushSendJob and is safe to call
// concurrently with ProcessSendQueues.
func WriteSendJob(storagePath string, job *client.SendJob) error {
return client.PushSendJob(storagePath, job)
}
// ProcessSendQueues discovers every queue DB file under storagePath/queues/
// and processes each queue concurrently in its own goroutine.
// Call this from the send isolate on wake-up notification or on a periodic timer.
func ProcessSendQueues(storagePath string) {
queueDir := filepath.Join(storagePath, "queues")
entries, err := os.ReadDir(queueDir)
if err != nil {
logger.Warn().Err(err).Str("dir", queueDir).Msg("ProcessSendQueues: ReadDir")
return
}
var wg sync.WaitGroup
for _, entry := range entries {
if entry.IsDir() {
continue
}
wg.Add(1)
queue := entry.Name()
go func(q string) {
defer wg.Done()
processSendQueue(storagePath, q)
}(queue)
}
wg.Wait()
}
// processSendQueue processes pending jobs for a single named queue sequentially.
//
// For each pending job it will:
// - immediately mark it failed if its timeout has elapsed
// - attempt delivery, cycling through servers until one succeeds
// - mark it sent on success or failed when all servers are exhausted
// - stop and return when a job still has retries left (will resume on next call)
func processSendQueue(storagePath, queue string) {
for {
job, _, err := client.PeekSendJob(storagePath, queue)
if err != nil {
logger.Error().Err(err).Str("queue", queue).Msg("processSendQueue: PeekSendJob")
return
}
if job == nil {
return // no more pending jobs
}
// Hard timeout: job has been sitting too long
if job.Timeout > 0 && time.Since(job.InsertedAt) > time.Duration(job.Timeout)*time.Second {
job.Status = client.SendStatusFailed
if err := client.UpdateSendJob(storagePath, queue, job); err != nil {
logger.Error().Err(err).Int64("id", job.ID).Msg("processSendQueue: UpdateSendJob timeout")
}
continue // try the next pending job
}
serverIdx, sendErr := attemptSendJob(job)
if sendErr == nil {
now := time.Now()
job.Status = client.SendStatusSent
job.SentAt = &now
job.SuccessfulServer = &serverIdx
if err := client.UpdateSendJob(storagePath, queue, job); err != nil {
logger.Error().Err(err).Int64("id", job.ID).Msg("processSendQueue: UpdateSendJob sent")
}
continue // job delivered look for the next one
}
// Persist updated retry counts regardless of outcome
if err := client.UpdateSendJob(storagePath, queue, job); err != nil {
logger.Error().Err(err).Int64("id", job.ID).Msg("processSendQueue: UpdateSendJob retries")
}
if allServersExhausted(job) {
job.Status = client.SendStatusFailed
if err := client.UpdateSendJob(storagePath, queue, job); err != nil {
logger.Error().Err(err).Int64("id", job.ID).Msg("processSendQueue: UpdateSendJob failed")
}
continue // all servers dead for this job try the next one
}
// Job still has remaining retries on some server; stop and wait for the next poll
return
}
}
// attemptSendJob reads the pre-built packed message from job.File and tries
// each server in order, skipping any server that has already reached
// maxRetriesPerServer failures.
// On the first successful POST it returns the server index.
// All retry counts are incremented in-place inside job.Retries.
func attemptSendJob(job *client.SendJob) (int, error) {
data, err := os.ReadFile(job.File)
if err != nil {
return -1, err
}
// Ensure the retries slice is aligned with the servers slice
for len(job.Retries) < len(job.Servers) {
job.Retries = append(job.Retries, 0)
}
timeout := job.Timeout
if timeout <= 0 {
timeout = defaultSendTimeout
}
for i, srv := range job.Servers {
if job.Retries[i] >= maxRetriesPerServer {
continue // this server is exhausted
}
_, err := meowlib.HttpPostMessage(srv.Url, data, timeout)
if err != nil {
logger.Warn().Err(err).Str("url", srv.Url).Int("retry", job.Retries[i]+1).Msg("attemptSendJob: POST failed")
job.Retries[i]++
continue
}
return i, nil
}
return -1, errors.New("all servers failed or exhausted")
}
// allServersExhausted returns true when every server in the job has been tried
// maxRetriesPerServer times without success.
func allServersExhausted(job *client.SendJob) bool {
if len(job.Servers) == 0 {
return true
}
for i := range job.Servers {
if i >= len(job.Retries) || job.Retries[i] < maxRetriesPerServer {
return false
}
}
return true
}