Compare commits

..

21 Commits

Author SHA1 Message Date
ycc
d23ab73cf9 message ack receice and reactions protobuf
Some checks failed
continuous-integration/drone/push Build is failing
2026-03-06 11:59:47 +01:00
ycc
f6531e344e MarkMessageProcessed added
Some checks failed
continuous-integration/drone/push Build is failing
2026-03-05 21:33:03 +01:00
ycc
32cc9ff848 double ratchet key persitence bugfix
Some checks failed
continuous-integration/drone/push Build is failing
2026-03-05 09:06:16 +01:00
ycc
f4fb42d72e double ratchet first implementation
Some checks failed
continuous-integration/drone/push Build is failing
2026-03-04 22:30:22 +01:00
ycc
c0dcfe997c Adding inner symetric encryption
Some checks failed
continuous-integration/drone/push Build is failing
2026-03-04 21:40:26 +01:00
ycc
5748ead926 received timestamp added to bd
Some checks failed
continuous-integration/drone/push Build is failing
2026-03-04 11:58:14 +01:00
ycc
14a07dcb5c delivery ack added
Some checks failed
continuous-integration/drone/push Build is failing
2026-03-04 10:44:17 +01:00
ycc
fab5818ec7 adding peer/contactcard attributes
Some checks failed
continuous-integration/drone/push Build is failing
2026-03-03 10:46:25 +01:00
ycc
8836d5c591 fix add srvr key fetch from bg send
Some checks failed
continuous-integration/drone/push Build is failing
2026-03-01 22:50:54 +01:00
ycc
0fdf5dd9c7 simplify send message bg helper
Some checks failed
continuous-integration/drone/push Build is failing
2026-03-01 21:15:17 +01:00
ycc
7d06f0ff3e timeout fix
Some checks failed
continuous-integration/drone/push Build is failing
2026-03-01 14:23:07 +01:00
ycc
b722a916a9 change impl of last fix
Some checks failed
continuous-integration/drone/push Build is failing
2026-02-28 21:22:15 +01:00
ycc
cd9ee54f6d jobs ack fix
Some checks failed
continuous-integration/drone/push Build is failing
2026-02-28 21:04:13 +01:00
ycc
67823237e6 remove useless password
Some checks failed
continuous-integration/drone/push Build is failing
2026-02-28 19:01:49 +01:00
ycc
aa91d2cc0f adding message sent date
Some checks failed
continuous-integration/drone/push Build is failing
2026-02-28 18:35:27 +01:00
ycc
66a6674a6a ProcessSentMessages added for server acks
Some checks failed
continuous-integration/drone/push Build is failing
2026-02-28 10:08:55 +01:00
ycc
a322f3fccf add fields for server delivery tracking
Some checks failed
continuous-integration/drone/push Build is failing
2026-02-27 21:45:44 +01:00
ycc
e6f9bc796e count sends
Some checks failed
continuous-integration/drone/push Build is failing
2026-02-27 20:58:15 +01:00
ycc
f76213d55a nil pointer fix
Some checks failed
continuous-integration/drone/push Build is failing
2026-02-27 20:13:58 +01:00
ycc
aeeebf6f58 add missing server encryption
Some checks failed
continuous-integration/drone/push Build is failing
2026-02-26 22:08:45 +01:00
ycc
423c5d6d64 refactor to separate uder packing from server packing
Some checks failed
continuous-integration/drone/push Build is failing
2026-02-26 21:07:38 +01:00
24 changed files with 2289 additions and 450 deletions

View File

@@ -20,6 +20,8 @@ func DbMessageToInternalUserMessage(id int64, dbFile string, dbm *meowlib.DbMess
ium.Messagetype = dbm.Type ium.Messagetype = dbm.Type
ium.Appdata = dbm.Appdata ium.Appdata = dbm.Appdata
ium.FilePaths = dbm.FilePaths ium.FilePaths = dbm.FilePaths
ium.ServerDeliveryUuid = dbm.ServerDeliveryUuid
ium.ServerDeliveryTimestamp = dbm.ServerDeliveryTimestamp
return &ium return &ium
} }
@@ -33,6 +35,8 @@ func InternalUserMessageToDbMessage(ium *InternalUserMessage) *meowlib.DbMessage
dbm.CurrentLocation = ium.CurrentLocation dbm.CurrentLocation = ium.CurrentLocation
dbm.Status = ium.Status dbm.Status = ium.Status
dbm.FilePaths = ium.FilePaths dbm.FilePaths = ium.FilePaths
dbm.ServerDeliveryUuid = ium.ServerDeliveryUuid
dbm.ServerDeliveryTimestamp = ium.ServerDeliveryTimestamp
return &dbm return &dbm
} }

174
client/drsession.go Normal file
View File

@@ -0,0 +1,174 @@
package client
import (
"encoding/hex"
"encoding/json"
"encoding/base64"
"fmt"
doubleratchet "github.com/status-im/doubleratchet"
)
// drLocalPair implements doubleratchet.DHPair using raw byte slices.
type drLocalPair struct {
priv doubleratchet.Key
pub doubleratchet.Key
}
func (p drLocalPair) PrivateKey() doubleratchet.Key { return p.priv }
func (p drLocalPair) PublicKey() doubleratchet.Key { return p.pub }
// serializedDRState is an intermediate JSON-friendly representation of doubleratchet.State.
type serializedDRState struct {
DHrPublic []byte `json:"dhr_pub"`
DHsPrivate []byte `json:"dhs_priv"`
DHsPublic []byte `json:"dhs_pub"`
RootChCK []byte `json:"root_ch_ck"`
SendChCK []byte `json:"send_ch_ck"`
SendChN uint32 `json:"send_ch_n"`
RecvChCK []byte `json:"recv_ch_ck"`
RecvChN uint32 `json:"recv_ch_n"`
PN uint32 `json:"pn"`
MkSkipped map[string]map[uint][]byte `json:"mk_skipped"`
MaxSkip uint `json:"max_skip"`
MaxKeep uint `json:"max_keep"`
MaxMessageKeysPerSession int `json:"max_mks_per_session"`
Step uint `json:"step"`
KeysCount uint `json:"keys_count"`
}
// drSessionStorage implements doubleratchet.SessionStorage, persisting state into peer.DrStateJson.
type drSessionStorage struct{ peer *Peer }
func (s *drSessionStorage) Save(id []byte, state *doubleratchet.State) error {
all, err := state.MkSkipped.All()
if err != nil {
return fmt.Errorf("drSessionStorage.Save: MkSkipped.All: %w", err)
}
mkSkipped := make(map[string]map[uint][]byte, len(all))
for k, msgs := range all {
inner := make(map[uint][]byte, len(msgs))
for num, mk := range msgs {
inner[num] = []byte(mk)
}
mkSkipped[k] = inner
}
ss := serializedDRState{
DHrPublic: []byte(state.DHr),
DHsPrivate: []byte(state.DHs.PrivateKey()),
DHsPublic: []byte(state.DHs.PublicKey()),
RootChCK: []byte(state.RootCh.CK),
SendChCK: []byte(state.SendCh.CK),
SendChN: state.SendCh.N,
RecvChCK: []byte(state.RecvCh.CK),
RecvChN: state.RecvCh.N,
PN: state.PN,
MkSkipped: mkSkipped,
MaxSkip: state.MaxSkip,
MaxKeep: state.MaxKeep,
MaxMessageKeysPerSession: state.MaxMessageKeysPerSession,
Step: state.Step,
KeysCount: state.KeysCount,
}
b, err := json.Marshal(ss)
if err != nil {
return fmt.Errorf("drSessionStorage.Save: json.Marshal: %w", err)
}
s.peer.DrStateJson = string(b)
return nil
}
func (s *drSessionStorage) Load(id []byte) (*doubleratchet.State, error) {
if s.peer.DrStateJson == "" {
return nil, nil
}
var ss serializedDRState
if err := json.Unmarshal([]byte(s.peer.DrStateJson), &ss); err != nil {
return nil, fmt.Errorf("drSessionStorage.Load: json.Unmarshal: %w", err)
}
c := doubleratchet.DefaultCrypto{}
mkStorage := &doubleratchet.KeysStorageInMemory{}
seq := uint(0)
for k, msgs := range ss.MkSkipped {
pubKey, err := hex.DecodeString(k)
if err != nil {
return nil, fmt.Errorf("drSessionStorage.Load: decode skipped key hex: %w", err)
}
for num, mk := range msgs {
if err := mkStorage.Put(id, doubleratchet.Key(pubKey), num, doubleratchet.Key(mk), seq); err != nil {
return nil, fmt.Errorf("drSessionStorage.Load: Put: %w", err)
}
seq++
}
}
state := &doubleratchet.State{
Crypto: c,
DHr: doubleratchet.Key(ss.DHrPublic),
DHs: drLocalPair{priv: doubleratchet.Key(ss.DHsPrivate), pub: doubleratchet.Key(ss.DHsPublic)},
PN: ss.PN,
MkSkipped: mkStorage,
MaxSkip: ss.MaxSkip,
MaxKeep: ss.MaxKeep,
MaxMessageKeysPerSession: ss.MaxMessageKeysPerSession,
Step: ss.Step,
KeysCount: ss.KeysCount,
}
state.RootCh.CK = doubleratchet.Key(ss.RootChCK)
state.RootCh.Crypto = c
state.SendCh.CK = doubleratchet.Key(ss.SendChCK)
state.SendCh.N = ss.SendChN
state.SendCh.Crypto = c
state.RecvCh.CK = doubleratchet.Key(ss.RecvChCK)
state.RecvCh.N = ss.RecvChN
state.RecvCh.Crypto = c
return state, nil
}
// GetDRSession returns an active DR session for the peer, creating one if needed.
func (p *Peer) GetDRSession() (doubleratchet.Session, error) {
store := &drSessionStorage{peer: p}
// If we already have a saved state, load it
if p.DrStateJson != "" {
return doubleratchet.Load([]byte(p.Uid), store)
}
// Initiator: has own DH keypair + root key, no state yet
if p.DrInitiator && p.DrKpPrivate != "" {
privBytes, err := base64.StdEncoding.DecodeString(p.DrKpPrivate)
if err != nil {
return nil, fmt.Errorf("GetDRSession: decode DrKpPrivate: %w", err)
}
pubBytes, err := base64.StdEncoding.DecodeString(p.DrKpPublic)
if err != nil {
return nil, fmt.Errorf("GetDRSession: decode DrKpPublic: %w", err)
}
rootKeyBytes, err := base64.StdEncoding.DecodeString(p.DrRootKey)
if err != nil {
return nil, fmt.Errorf("GetDRSession: decode DrRootKey: %w", err)
}
kp := drLocalPair{priv: doubleratchet.Key(privBytes), pub: doubleratchet.Key(pubBytes)}
return doubleratchet.New([]byte(p.Uid), doubleratchet.Key(rootKeyBytes), kp, store)
}
// Responder: has remote DH public key + root key
if !p.DrInitiator && p.ContactDrPublicKey != "" {
remotePubBytes, err := base64.StdEncoding.DecodeString(p.ContactDrPublicKey)
if err != nil {
return nil, fmt.Errorf("GetDRSession: decode ContactDrPublicKey: %w", err)
}
rootKeyBytes, err := base64.StdEncoding.DecodeString(p.DrRootKey)
if err != nil {
return nil, fmt.Errorf("GetDRSession: decode DrRootKey: %w", err)
}
return doubleratchet.NewWithRemoteKey([]byte(p.Uid), doubleratchet.Key(rootKeyBytes), doubleratchet.Key(remotePubBytes), store)
}
return nil, fmt.Errorf("GetDRSession: peer %s has no DR keys configured", p.Uid)
}

View File

@@ -2,6 +2,7 @@ package helpers
import ( import (
"errors" "errors"
"fmt"
"os" "os"
"path/filepath" "path/filepath"
"strconv" "strconv"
@@ -142,11 +143,21 @@ func ConsumeInboxFile(messageFilename string) ([]string, []string, string, error
return nil, nil, "ReadMessage: GetFromMyLookupKey", errors.New("no visible peer for that message") return nil, nil, "ReadMessage: GetFromMyLookupKey", errors.New("no visible peer for that message")
} }
// Unpack the message // Unpack the message
usermsg, err := peer.ProcessInboundUserMessage(packedUserMessage.Payload, packedUserMessage.Signature) usermsg, err := peer.ProcessInboundUserMessage(packedUserMessage)
if err != nil { if err != nil {
return nil, nil, "ReadMessage: ProcessInboundUserMessage", err return nil, nil, "ReadMessage: ProcessInboundUserMessage", err
} }
// Check for received or processed already filled => it's an ack for one of our sent messages
if len(usermsg.Data) == 0 && usermsg.Status != nil && usermsg.Status.Uuid != "" &&
(usermsg.Status.Received != 0 || usermsg.Status.Processed != 0) {
password, _ := client.GetConfig().GetMemPass()
if ackErr := client.UpdateMessageAck(peer, usermsg.Status.Uuid, usermsg.Status.Received, usermsg.Status.Processed, password); ackErr != nil {
logger.Warn().Err(ackErr).Str("uuid", usermsg.Status.Uuid).Msg("ConsumeInboxFile: UpdateMessageAck")
}
continue
}
//fmt.Println("From:", usermsg.From) //fmt.Println("From:", usermsg.From)
//jsonUserMessage, _ := json.Marshal(usermsg) //jsonUserMessage, _ := json.Marshal(usermsg)
//fmt.Println(string(jsonUserMessage)) //fmt.Println(string(jsonUserMessage))
@@ -172,6 +183,14 @@ func ConsumeInboxFile(messageFilename string) ([]string, []string, string, error
// user message // user message
messagesOverview = append(messagesOverview, peer.Name+" > "+string(usermsg.Data)) messagesOverview = append(messagesOverview, peer.Name+" > "+string(usermsg.Data))
// stamp the received time before storing
receivedAt := time.Now().UTC().Unix()
if usermsg.Status == nil {
usermsg.Status = &meowlib.ConversationStatus{}
}
usermsg.Status.Received = uint64(receivedAt)
// add message to storage // add message to storage
err = peer.StoreMessage(usermsg, filenames) err = peer.StoreMessage(usermsg, filenames)
if err != nil { if err != nil {
@@ -179,6 +198,20 @@ func ConsumeInboxFile(messageFilename string) ([]string, []string, string, error
} }
filenames = []string{} filenames = []string{}
// Persist peer to save updated DR state (DrStateJson)
if peer.DrRootKey != "" {
if storeErr := identity.Peers.StorePeer(peer); storeErr != nil {
logger.Warn().Err(storeErr).Str("peer", peer.Uid).Msg("ConsumeInboxFile: StorePeer (DR state)")
}
}
// Send delivery ack if the peer requested it
if peer.SendDeliveryAck && usermsg.Status.Uuid != "" {
storagePath := filepath.Join(client.GetConfig().StoragePath, identity.Uuid)
if ackErr := sendDeliveryAck(storagePath, peer, usermsg.Status.Uuid, receivedAt); ackErr != nil {
logger.Warn().Err(ackErr).Str("peer", peer.Uid).Msg("ConsumeInboxFile: sendDeliveryAck")
}
}
} }
} }
@@ -240,3 +273,45 @@ func LongPollAllServerJobs(storage_path string, jobs []client.RequestsJob, timeo
} }
} }
// sendDeliveryAck builds a delivery acknowledgment for messageUuid and enqueues
// it for sending to the peer's contact pull servers.
func sendDeliveryAck(storagePath string, peer *client.Peer, messageUuid string, receivedAt int64) error {
packedMsg, _, err := BuildReceivedMessage(messageUuid, peer.Uid, receivedAt)
if err != nil {
return fmt.Errorf("sendDeliveryAck: BuildReceivedMessage: %w", err)
}
data, err := proto.Marshal(packedMsg)
if err != nil {
return fmt.Errorf("sendDeliveryAck: proto.Marshal: %w", err)
}
outboxDir := filepath.Join(storagePath, "outbox")
if err := os.MkdirAll(outboxDir, 0700); err != nil {
return fmt.Errorf("sendDeliveryAck: MkdirAll: %w", err)
}
outboxFile := filepath.Join(outboxDir, "ack_"+uuid.New().String())
if err := os.WriteFile(outboxFile, data, 0600); err != nil {
return fmt.Errorf("sendDeliveryAck: WriteFile: %w", err)
}
var servers []client.Server
for _, srvUid := range peer.ContactPullServers {
srv, loadErr := client.GetConfig().GetIdentity().MessageServers.LoadServer(srvUid)
if loadErr == nil && srv != nil {
servers = append(servers, *srv)
}
}
if len(servers) == 0 {
os.Remove(outboxFile)
return errors.New("sendDeliveryAck: no contact servers found")
}
return client.PushSendJob(storagePath, &client.SendJob{
Queue: peer.Uid,
File: outboxFile,
Servers: servers,
})
}

View File

@@ -2,6 +2,7 @@ package helpers
import ( import (
"errors" "errors"
"fmt"
"os" "os"
"path/filepath" "path/filepath"
"sync" "sync"
@@ -9,30 +10,60 @@ import (
"forge.redroom.link/yves/meowlib" "forge.redroom.link/yves/meowlib"
"forge.redroom.link/yves/meowlib/client" "forge.redroom.link/yves/meowlib/client"
"google.golang.org/protobuf/proto"
) )
const maxRetriesPerServer = 3 const maxRetriesPerServer = 3
const defaultSendTimeout = 3600 * 24 // seconds, used when job.Timeout is 0 const defaultSendTimeout = 3600 * 24 // seconds, used when job.Timeout is 0
const defaultPostTimeout = 200
// WriteSendJob enqueues a SendJob from the main Flutter isolate. // CreateUserMessageAndSendJob is the single entry point for sending a message.
// It is a thin wrapper over client.PushSendJob and is safe to call // It creates and stores the user message, serialises the packed form to
// concurrently with ProcessSendQueues. // storagePath/outbox/{dbFile}_{dbId}, and enqueues a SendJob in
func WriteSendJob(storagePath string, job *client.SendJob) error { // storagePath/queues/{peerUid}.
return client.PushSendJob(storagePath, job) func CreateUserMessageAndSendJob(storagePath, message, peerUid, replyToUid string, filelist []string, servers []client.Server, timeout int) error {
packedMsg, dbFile, dbId, errTxt, err := CreateAndStoreUserMessage(message, peerUid, replyToUid, filelist)
if err != nil {
return fmt.Errorf("%s: %w", errTxt, err)
}
data, err := proto.Marshal(packedMsg)
if err != nil {
return fmt.Errorf("CreateUserMessageAndSendJob: proto.Marshal: %w", err)
}
outboxDir := filepath.Join(storagePath, "outbox")
if err := os.MkdirAll(outboxDir, 0700); err != nil {
return fmt.Errorf("CreateUserMessageAndSendJob: MkdirAll: %w", err)
}
outboxFile := filepath.Join(outboxDir, fmt.Sprintf("%s_%d", dbFile, dbId))
if err := os.WriteFile(outboxFile, data, 0600); err != nil {
return fmt.Errorf("CreateUserMessageAndSendJob: WriteFile: %w", err)
}
return client.PushSendJob(storagePath, &client.SendJob{
Queue: peerUid,
File: outboxFile,
Servers: servers,
Timeout: timeout,
})
} }
// ProcessSendQueues discovers every queue DB file under storagePath/queues/ // ProcessSendQueues discovers every queue DB file under storagePath/queues/
// and processes each queue concurrently in its own goroutine. // and processes each queue concurrently in its own goroutine.
// Call this from the send isolate on wake-up notification or on a periodic timer. // Call this from the send isolate on wake-up notification or on a periodic timer.
func ProcessSendQueues(storagePath string) { // It returns the total number of successfully sent messages across all queues.
func ProcessSendQueues(storagePath string) int {
queueDir := filepath.Join(storagePath, "queues") queueDir := filepath.Join(storagePath, "queues")
entries, err := os.ReadDir(queueDir) entries, err := os.ReadDir(queueDir)
if err != nil { if err != nil {
logger.Warn().Err(err).Str("dir", queueDir).Msg("ProcessSendQueues: ReadDir") logger.Warn().Err(err).Str("dir", queueDir).Msg("ProcessSendQueues: ReadDir")
return return 0
} }
var wg sync.WaitGroup var wg sync.WaitGroup
counts := make(chan int, len(entries))
for _, entry := range entries { for _, entry := range entries {
if entry.IsDir() { if entry.IsDir() {
continue continue
@@ -41,32 +72,50 @@ func ProcessSendQueues(storagePath string) {
queue := entry.Name() queue := entry.Name()
go func(q string) { go func(q string) {
defer wg.Done() defer wg.Done()
processSendQueue(storagePath, q) counts <- processSendQueue(storagePath, q)
}(queue) }(queue)
} }
wg.Wait() wg.Wait()
close(counts)
total := 0
for n := range counts {
total += n
}
return total
} }
// processSendQueue processes pending jobs for a single named queue sequentially. // processSendQueue processes pending jobs for a single named queue sequentially.
// It returns the number of successfully sent messages.
// //
// For each pending job it will: // For each pending job it will:
// - immediately mark it failed if its timeout has elapsed // - immediately mark it failed if its TTL (job.Timeout) has elapsed this is the
// only criterion for permanent failure; retry exhaustion is never a failure cause
// - attempt delivery, cycling through servers until one succeeds // - attempt delivery, cycling through servers until one succeeds
// - mark it sent on success or failed when all servers are exhausted // - mark it sent on success
// - stop and return when a job still has retries left (will resume on next call) // - stop and return when all servers fail this run (will resume on next call)
func processSendQueue(storagePath, queue string) { //
// Per-server retry counts (maxRetriesPerServer) are local to each call so that
// past failures in previous runs never prevent future delivery attempts.
func processSendQueue(storagePath, queue string) int {
sent := 0
for { for {
job, _, err := client.PeekSendJob(storagePath, queue) job, _, err := client.PeekSendJob(storagePath, queue)
if err != nil { if err != nil {
logger.Error().Err(err).Str("queue", queue).Msg("processSendQueue: PeekSendJob") logger.Error().Err(err).Str("queue", queue).Msg("processSendQueue: PeekSendJob")
return return sent
} }
if job == nil { if job == nil {
return // no more pending jobs return sent // no more pending jobs
} }
// Hard timeout: job has been sitting too long // Hard timeout: the only criterion for permanent failure.
if job.Timeout > 0 && time.Since(job.InsertedAt) > time.Duration(job.Timeout)*time.Second { // Use defaultSendTimeout when the job carries no explicit TTL.
ttl := job.Timeout
if ttl <= 0 {
ttl = defaultSendTimeout
}
if time.Since(job.InsertedAt) > time.Duration(ttl)*time.Second {
job.Status = client.SendStatusFailed job.Status = client.SendStatusFailed
if err := client.UpdateSendJob(storagePath, queue, job); err != nil { if err := client.UpdateSendJob(storagePath, queue, job); err != nil {
logger.Error().Err(err).Int64("id", job.ID).Msg("processSendQueue: UpdateSendJob timeout") logger.Error().Err(err).Int64("id", job.ID).Msg("processSendQueue: UpdateSendJob timeout")
@@ -74,82 +123,68 @@ func processSendQueue(storagePath, queue string) {
continue // try the next pending job continue // try the next pending job
} }
serverIdx, sendErr := attemptSendJob(job) // runRetries is allocated fresh every call so it never accumulates
// across processSendQueue invocations.
runRetries := make([]int, len(job.Servers))
serverIdx, sendErr := attemptSendJob(job, runRetries)
if sendErr == nil { if sendErr == nil {
now := time.Now() now := time.Now().UTC()
job.Status = client.SendStatusSent job.Status = client.SendStatusSent
job.SentAt = &now job.SentAt = &now
job.SuccessfulServer = &serverIdx job.SuccessfulServer = &serverIdx
if err := client.UpdateSendJob(storagePath, queue, job); err != nil { if err := client.UpdateSendJob(storagePath, queue, job); err != nil {
logger.Error().Err(err).Int64("id", job.ID).Msg("processSendQueue: UpdateSendJob sent") logger.Error().Err(err).Int64("id", job.ID).Msg("processSendQueue: UpdateSendJob sent")
} }
sent++
continue // job delivered look for the next one continue // job delivered look for the next one
} }
// Persist updated retry counts regardless of outcome // All servers failed this run; stop and wait for the next poll.
if err := client.UpdateSendJob(storagePath, queue, job); err != nil { // Permanent failure is decided solely by the TTL check above.
logger.Error().Err(err).Int64("id", job.ID).Msg("processSendQueue: UpdateSendJob retries") return sent
}
if allServersExhausted(job) {
job.Status = client.SendStatusFailed
if err := client.UpdateSendJob(storagePath, queue, job); err != nil {
logger.Error().Err(err).Int64("id", job.ID).Msg("processSendQueue: UpdateSendJob failed")
}
continue // all servers dead for this job try the next one
}
// Job still has remaining retries on some server; stop and wait for the next poll
return
} }
} }
// attemptSendJob reads the pre-built packed message from job.File and tries // attemptSendJob reads the pre-built packed message from job.File and tries
// each server in order, skipping any server that has already reached // each server in order, skipping any server that has already reached
// maxRetriesPerServer failures. // maxRetriesPerServer failures within the current run.
// On the first successful POST it returns the server index. // On the first successful POST it returns the server index.
// All retry counts are incremented in-place inside job.Retries. // Retry counts are tracked in the caller-supplied retries slice (run-local,
func attemptSendJob(job *client.SendJob) (int, error) { // never persisted) so that previous runs do not influence this attempt.
func attemptSendJob(job *client.SendJob, retries []int) (int, error) {
data, err := os.ReadFile(job.File) data, err := os.ReadFile(job.File)
if err != nil { if err != nil {
return -1, err return -1, err
} }
// Ensure the retries slice is aligned with the servers slice.
// Ensure the retries slice is aligned with the servers slice for len(retries) < len(job.Servers) {
for len(job.Retries) < len(job.Servers) { retries = append(retries, 0)
job.Retries = append(job.Retries, 0)
}
timeout := job.Timeout
if timeout <= 0 {
timeout = defaultSendTimeout
} }
for i, srv := range job.Servers { for i, srv := range job.Servers {
if job.Retries[i] >= maxRetriesPerServer { if retries[i] >= maxRetriesPerServer {
continue // this server is exhausted continue // this server is exhausted for the current run
} }
_, err := meowlib.HttpPostMessage(srv.Url, data, timeout)
// Unmarshal the stored PackedUserMessage and wrap it for this server.
packedUsrMsg := &meowlib.PackedUserMessage{}
if err := proto.Unmarshal(data, packedUsrMsg); err != nil {
return -1, err
}
serverData, errTxt, packErr := PackMessageForServer(packedUsrMsg, srv.GetUid())
if packErr != nil {
logger.Error().Err(packErr).Str("errTxt", errTxt).Str("url", srv.Url).Msg("attemptSendJob: PackMessageForServer")
retries[i]++
continue
}
_, err = meowlib.HttpPostMessage(srv.Url, serverData, defaultPostTimeout)
if err != nil { if err != nil {
logger.Warn().Err(err).Str("url", srv.Url).Int("retry", job.Retries[i]+1).Msg("attemptSendJob: POST failed") logger.Warn().Err(err).Str("url", srv.Url).Int("retry", retries[i]+1).Msg("attemptSendJob: POST failed")
job.Retries[i]++ retries[i]++
continue continue
} }
return i, nil return i, nil
} }
return -1, errors.New("all servers failed or exhausted") return -1, errors.New("all servers failed or exhausted")
} }
// allServersExhausted returns true when every server in the job has been tried
// maxRetriesPerServer times without success.
func allServersExhausted(job *client.SendJob) bool {
if len(job.Servers) == 0 {
return true
}
for i := range job.Servers {
if i >= len(job.Retries) || job.Retries[i] < maxRetriesPerServer {
return false
}
}
return true
}

View File

@@ -6,13 +6,17 @@ import (
"net/http/httptest" "net/http/httptest"
"os" "os"
"path/filepath" "path/filepath"
"strconv"
"strings"
"sync/atomic" "sync/atomic"
"testing" "testing"
"time" "time"
"forge.redroom.link/yves/meowlib"
"forge.redroom.link/yves/meowlib/client" "forge.redroom.link/yves/meowlib/client"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
_ "github.com/mattn/go-sqlite3" _ "github.com/mattn/go-sqlite3"
) )
@@ -37,14 +41,35 @@ func closedServerURL(t *testing.T) string {
return srv.URL return srv.URL
} }
// writeMsgFile writes dummy bytes to a temp file and returns the path. // writeMsgFile writes a valid serialised empty PackedUserMessage to a temp file
// and returns the path. The file content satisfies proto.Unmarshal inside
// attemptSendJob; the httptest endpoints ignore the encrypted payload.
func writeMsgFile(t *testing.T, dir, name string) string { func writeMsgFile(t *testing.T, dir, name string) string {
t.Helper() t.Helper()
p := filepath.Join(dir, name) p := filepath.Join(dir, name)
require.NoError(t, os.WriteFile(p, []byte("packed-server-message"), 0600)) data, err := proto.Marshal(&meowlib.PackedUserMessage{})
require.NoError(t, err)
require.NoError(t, os.WriteFile(p, data, 0600))
return p return p
} }
// newTestServer creates a client.Server for the given URL, generates a
// throwaway keypair so that AsymEncryptMessage succeeds, and stores the server
// in the current identity's MessageServers so that PackMessageForServer can
// look it up via LoadServer. Returns the registered server.
//
// Call setupMsgHelperConfig before this so an identity is in place.
func newTestServer(t *testing.T, url string) client.Server {
t.Helper()
srv, err := client.CreateServerFromUrl(url)
require.NoError(t, err)
kp, err := meowlib.NewKeyPair()
require.NoError(t, err)
srv.PublicKey = kp.Public
require.NoError(t, client.GetConfig().GetIdentity().MessageServers.StoreServer(srv))
return *srv
}
// pushJob is a convenience wrapper around client.PushSendJob. // pushJob is a convenience wrapper around client.PushSendJob.
func pushJob(t *testing.T, dir, queue, file string, servers []client.Server, timeout int) { func pushJob(t *testing.T, dir, queue, file string, servers []client.Server, timeout int) {
t.Helper() t.Helper()
@@ -67,50 +92,23 @@ func serverSlice(urls ...string) []client.Server {
// --- unit tests --------------------------------------------------------- // --- unit tests ---------------------------------------------------------
func TestAllServersExhausted_NoServers(t *testing.T) {
job := &client.SendJob{}
assert.True(t, allServersExhausted(job))
}
func TestAllServersExhausted_NoneExhausted(t *testing.T) {
job := &client.SendJob{
Servers: serverSlice("http://s1", "http://s2"),
Retries: []int{0, 0},
}
assert.False(t, allServersExhausted(job))
}
func TestAllServersExhausted_PartiallyExhausted(t *testing.T) {
job := &client.SendJob{
Servers: serverSlice("http://s1", "http://s2"),
Retries: []int{maxRetriesPerServer, 0},
}
assert.False(t, allServersExhausted(job))
}
func TestAllServersExhausted_AllExhausted(t *testing.T) {
job := &client.SendJob{
Servers: serverSlice("http://s1", "http://s2"),
Retries: []int{maxRetriesPerServer, maxRetriesPerServer},
}
assert.True(t, allServersExhausted(job))
}
// TestAttemptSendJob_Success verifies a successful POST to the first server. // TestAttemptSendJob_Success verifies a successful POST to the first server.
func TestAttemptSendJob_Success(t *testing.T) { func TestAttemptSendJob_Success(t *testing.T) {
dir := t.TempDir() dir, _ := setupMsgHelperConfig(t)
var received int64 var received int64
srv := acceptServer(t, &received) srv := acceptServer(t, &received)
defer srv.Close() defer srv.Close()
newTestServer(t, srv.URL)
job := &client.SendJob{ job := &client.SendJob{
File: writeMsgFile(t, dir, "msg"), File: writeMsgFile(t, dir, "msg"),
Servers: serverSlice(srv.URL), Servers: serverSlice(srv.URL),
Timeout: 5, Timeout: 5,
Retries: []int{0},
} }
retries := make([]int, len(job.Servers))
idx, err := attemptSendJob(job) idx, err := attemptSendJob(job, retries)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 0, idx) assert.Equal(t, 0, idx)
assert.Equal(t, int64(1), atomic.LoadInt64(&received)) assert.Equal(t, int64(1), atomic.LoadInt64(&received))
@@ -119,62 +117,74 @@ func TestAttemptSendJob_Success(t *testing.T) {
// TestAttemptSendJob_Fallback verifies that when the first server refuses the // TestAttemptSendJob_Fallback verifies that when the first server refuses the
// connection, the second server is tried and succeeds. // connection, the second server is tried and succeeds.
func TestAttemptSendJob_Fallback(t *testing.T) { func TestAttemptSendJob_Fallback(t *testing.T) {
dir := t.TempDir() dir, _ := setupMsgHelperConfig(t)
var received int64 var received int64
good := acceptServer(t, &received) good := acceptServer(t, &received)
defer good.Close() defer good.Close()
deadURL := closedServerURL(t)
newTestServer(t, deadURL)
newTestServer(t, good.URL)
job := &client.SendJob{ job := &client.SendJob{
File: writeMsgFile(t, dir, "msg"), File: writeMsgFile(t, dir, "msg"),
Servers: serverSlice(closedServerURL(t), good.URL), Servers: serverSlice(deadURL, good.URL),
Timeout: 5, Timeout: 5,
Retries: []int{0, 0},
} }
retries := make([]int, len(job.Servers))
idx, err := attemptSendJob(job) idx, err := attemptSendJob(job, retries)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 1, idx, "second server should have been used") assert.Equal(t, 1, idx, "second server should have been used")
assert.Equal(t, int64(1), atomic.LoadInt64(&received)) assert.Equal(t, int64(1), atomic.LoadInt64(&received))
assert.Equal(t, 1, job.Retries[0], "first server retry should be incremented") assert.Equal(t, 1, retries[0], "first server retry should be incremented")
assert.Equal(t, 0, job.Retries[1], "second server retry must stay at zero") assert.Equal(t, 0, retries[1], "second server retry must stay at zero")
} }
// TestAttemptSendJob_AllFail verifies that all retry counts are incremented // TestAttemptSendJob_AllFail verifies that all retry counts are incremented
// and an error is returned when every server refuses connections. // and an error is returned when every server refuses connections.
func TestAttemptSendJob_AllFail(t *testing.T) { func TestAttemptSendJob_AllFail(t *testing.T) {
dir := t.TempDir() dir, _ := setupMsgHelperConfig(t)
dead1 := closedServerURL(t)
dead2 := closedServerURL(t)
newTestServer(t, dead1)
newTestServer(t, dead2)
job := &client.SendJob{ job := &client.SendJob{
File: writeMsgFile(t, dir, "msg"), File: writeMsgFile(t, dir, "msg"),
Servers: serverSlice(closedServerURL(t), closedServerURL(t)), Servers: serverSlice(dead1, dead2),
Timeout: 5, Timeout: 5,
Retries: []int{0, 0},
} }
retries := make([]int, len(job.Servers))
_, err := attemptSendJob(job) _, err := attemptSendJob(job, retries)
assert.Error(t, err) assert.Error(t, err)
assert.Equal(t, 1, job.Retries[0]) assert.Equal(t, 1, retries[0])
assert.Equal(t, 1, job.Retries[1]) assert.Equal(t, 1, retries[1])
} }
// TestAttemptSendJob_SkipsExhaustedServer verifies that a server already at // TestAttemptSendJob_SkipsExhaustedServer verifies that a server already at
// maxRetriesPerServer is not contacted. // maxRetriesPerServer is not contacted.
func TestAttemptSendJob_SkipsExhaustedServer(t *testing.T) { func TestAttemptSendJob_SkipsExhaustedServer(t *testing.T) {
dir := t.TempDir() dir, _ := setupMsgHelperConfig(t)
var received int64 var received int64
good := acceptServer(t, &received) good := acceptServer(t, &received)
defer good.Close() defer good.Close()
deadURL := closedServerURL(t)
newTestServer(t, good.URL) // only good server needs to be reachable
job := &client.SendJob{ job := &client.SendJob{
File: writeMsgFile(t, dir, "msg"), File: writeMsgFile(t, dir, "msg"),
Servers: serverSlice( Servers: serverSlice(
closedServerURL(t), // exhausted must be skipped deadURL, // exhausted must be skipped (no need to store in identity)
good.URL, good.URL,
), ),
Timeout: 5, Timeout: 5,
Retries: []int{maxRetriesPerServer, 0},
} }
retries := []int{maxRetriesPerServer, 0} // first server already exhausted this run
idx, err := attemptSendJob(job) idx, err := attemptSendJob(job, retries)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 1, idx) assert.Equal(t, 1, idx)
assert.Equal(t, int64(1), atomic.LoadInt64(&received)) assert.Equal(t, int64(1), atomic.LoadInt64(&received))
@@ -182,30 +192,56 @@ func TestAttemptSendJob_SkipsExhaustedServer(t *testing.T) {
// --- integration tests -------------------------------------------------- // --- integration tests --------------------------------------------------
// TestWriteSendJob verifies the thin WriteSendJob wrapper enqueues the job. // TestCreateUserMessageAndSendJob verifies that the packed message is written to
func TestWriteSendJob(t *testing.T) { // outbox/{dbFile}_{dbId} and a pending send job is enqueued for the peer.
dir := t.TempDir() func TestCreateUserMessageAndSendJob(t *testing.T) {
err := WriteSendJob(dir, &client.SendJob{ dir, id := setupMsgHelperConfig(t)
Queue: "q1",
File: "/tmp/f", peer := newFullyKeyedPeer(t, "peer-create-send")
Servers: serverSlice("http://s1"), require.NoError(t, id.Peers.StorePeer(peer))
})
srv := newTestServer(t, "http://test-srv.example")
err := CreateUserMessageAndSendJob(
dir,
"hello from integration",
"peer-create-send",
"",
nil,
[]client.Server{srv},
60,
)
require.NoError(t, err) require.NoError(t, err)
got, _, err := client.PeekSendJob(dir, "q1") // A pending job must be in the queue.
job, _, err := client.PeekSendJob(dir, "peer-create-send")
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, got) require.NotNil(t, job, "a send job must be enqueued")
assert.Equal(t, "/tmp/f", got.File)
// The outbox file must exist under storagePath/outbox/.
assert.FileExists(t, job.File)
assert.True(t, strings.HasPrefix(job.File, filepath.Join(dir, "outbox")),
"outbox file must be under storagePath/outbox/")
// The basename must follow the {dbFile}_{dbId} naming convention.
base := filepath.Base(job.File)
sep := strings.LastIndex(base, "_")
require.Greater(t, sep, 0, "filename must contain an underscore separating dbFile from dbId")
dbId, parseErr := strconv.ParseInt(base[sep+1:], 10, 64)
assert.NoError(t, parseErr, "suffix after underscore must be a numeric db ID")
assert.Greater(t, dbId, int64(0), "db ID must be positive")
} }
// TestProcessSendQueues_Success verifies that a pending job is delivered and // TestProcessSendQueues_Success verifies that a pending job is delivered and
// marked as sent when the server accepts it. // marked as sent when the server accepts it.
func TestProcessSendQueues_Success(t *testing.T) { func TestProcessSendQueues_Success(t *testing.T) {
dir := t.TempDir() dir, _ := setupMsgHelperConfig(t)
var received int64 var received int64
srv := acceptServer(t, &received) srv := acceptServer(t, &received)
defer srv.Close() defer srv.Close()
newTestServer(t, srv.URL)
msgPath := writeMsgFile(t, dir, "msg") msgPath := writeMsgFile(t, dir, "msg")
pushJob(t, dir, "q1", msgPath, serverSlice(srv.URL), 10) pushJob(t, dir, "q1", msgPath, serverSlice(srv.URL), 10)
@@ -229,13 +265,17 @@ func TestProcessSendQueues_Success(t *testing.T) {
// TestProcessSendQueues_ServerFallback verifies that when the first server is // TestProcessSendQueues_ServerFallback verifies that when the first server is
// unreachable, the second server is tried successfully in the same pass. // unreachable, the second server is tried successfully in the same pass.
func TestProcessSendQueues_ServerFallback(t *testing.T) { func TestProcessSendQueues_ServerFallback(t *testing.T) {
dir := t.TempDir() dir, _ := setupMsgHelperConfig(t)
var received int64 var received int64
good := acceptServer(t, &received) good := acceptServer(t, &received)
defer good.Close() defer good.Close()
deadURL := closedServerURL(t)
newTestServer(t, deadURL)
newTestServer(t, good.URL)
msgPath := writeMsgFile(t, dir, "msg") msgPath := writeMsgFile(t, dir, "msg")
pushJob(t, dir, "q1", msgPath, serverSlice(closedServerURL(t), good.URL), 10) pushJob(t, dir, "q1", msgPath, serverSlice(deadURL, good.URL), 10)
_, id, err := client.PeekSendJob(dir, "q1") _, id, err := client.PeekSendJob(dir, "q1")
require.NoError(t, err) require.NoError(t, err)
@@ -252,39 +292,43 @@ func TestProcessSendQueues_ServerFallback(t *testing.T) {
assert.Equal(t, 1, *job.SuccessfulServer, "second server should be recorded as successful") assert.Equal(t, 1, *job.SuccessfulServer, "second server should be recorded as successful")
} }
// TestProcessSendQueues_AllServersExhausted verifies that after maxRetriesPerServer // TestProcessSendQueues_FailedRunsStayPending verifies that repeated delivery
// failed attempts per server the job is marked as failed. // failures do NOT mark a job as permanently failed. Only a TTL timeout can do
func TestProcessSendQueues_AllServersExhausted(t *testing.T) { // that; retry exhaustion merely stops the current run.
dir := t.TempDir() func TestProcessSendQueues_FailedRunsStayPending(t *testing.T) {
dir, _ := setupMsgHelperConfig(t)
deadURL := closedServerURL(t) deadURL := closedServerURL(t)
newTestServer(t, deadURL)
msgPath := writeMsgFile(t, dir, "msg") msgPath := writeMsgFile(t, dir, "msg")
// timeout=0 → uses defaultSendTimeout (24 h), so the job won't expire here.
pushJob(t, dir, "q1", msgPath, serverSlice(deadURL), 0) pushJob(t, dir, "q1", msgPath, serverSlice(deadURL), 0)
_, id, err := client.PeekSendJob(dir, "q1") _, id, err := client.PeekSendJob(dir, "q1")
require.NoError(t, err) require.NoError(t, err)
// Each call to ProcessSendQueues increments the retry counter by 1. // Run several times per-server retry counts reset each run, so the job
// After maxRetriesPerServer calls, all servers are exhausted → failed. // must remain pending no matter how many runs fail.
for i := 0; i < maxRetriesPerServer; i++ { for i := 0; i < maxRetriesPerServer+2; i++ {
ProcessSendQueues(dir) ProcessSendQueues(dir)
} }
job, err := client.GetSendJob(dir, "q1", id) job, err := client.GetSendJob(dir, "q1", id)
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, job) require.NotNil(t, job)
assert.Equal(t, client.SendStatusFailed, job.Status) assert.Equal(t, client.SendStatusPending, job.Status, "repeated failures must not cause permanent failure only timeout does")
assert.Equal(t, maxRetriesPerServer, job.Retries[0])
} }
// TestProcessSendQueues_JobTimeout verifies that a job whose timeout has elapsed // TestProcessSendQueues_JobTimeout verifies that a job whose timeout has elapsed
// is immediately marked as failed without any send attempt. // is immediately marked as failed without any send attempt.
func TestProcessSendQueues_JobTimeout(t *testing.T) { func TestProcessSendQueues_JobTimeout(t *testing.T) {
dir := t.TempDir() dir, _ := setupMsgHelperConfig(t)
var received int64 var received int64
srv := acceptServer(t, &received) srv := acceptServer(t, &received)
defer srv.Close() defer srv.Close()
newTestServer(t, srv.URL)
msgPath := writeMsgFile(t, dir, "msg") msgPath := writeMsgFile(t, dir, "msg")
// Timeout of 1 second; we will backdate inserted_at so the job looks expired. // Timeout of 1 second; we will backdate inserted_at so the job looks expired.
pushJob(t, dir, "q1", msgPath, serverSlice(srv.URL), 1) pushJob(t, dir, "q1", msgPath, serverSlice(srv.URL), 1)
@@ -309,17 +353,25 @@ func TestProcessSendQueues_JobTimeout(t *testing.T) {
// TestProcessSendQueues_MultipleQueues verifies that jobs in different queue // TestProcessSendQueues_MultipleQueues verifies that jobs in different queue
// files are processed concurrently and independently. // files are processed concurrently and independently.
func TestProcessSendQueues_MultipleQueues(t *testing.T) { func TestProcessSendQueues_MultipleQueues(t *testing.T) {
dir := t.TempDir() dir, _ := setupMsgHelperConfig(t)
var received int64 var received int64
srv := acceptServer(t, &received) srv := acceptServer(t, &received)
defer srv.Close() defer srv.Close()
newTestServer(t, srv.URL)
for _, q := range []string{"qa", "qb", "qc"} { for _, q := range []string{"qa", "qb", "qc"} {
msgPath := writeMsgFile(t, dir, "msg_"+q) msgPath := writeMsgFile(t, dir, "msg_"+q)
pushJob(t, dir, q, msgPath, serverSlice(srv.URL), 10) pushJob(t, dir, q, msgPath, serverSlice(srv.URL), 10)
} }
// Concurrent goroutines for each queue all try to open the same BadgerDB for
// server lookup; only one can hold the lock at a time. Jobs that lose the
// race stay pending and are retried on the next call. Three passes guarantee
// every queue gets at least one uncontested turn.
for i := 0; i < 3; i++ {
ProcessSendQueues(dir) ProcessSendQueues(dir)
}
assert.Equal(t, int64(3), atomic.LoadInt64(&received), "all three queues should have delivered their message") assert.Equal(t, int64(3), atomic.LoadInt64(&received), "all three queues should have delivered their message")
} }

View File

@@ -36,6 +36,7 @@ func ReadCallRequestResponseMessage(data []byte, srvuid string) (*meowlib.VideoD
return serverMsg.VideoData, "", nil return serverMsg.VideoData, "", nil
} }
/*
func BuildCallMessage(videodata *meowlib.VideoData, srvuid string, peer_uid string, replyToUid string, filelist []string) ([]byte, string, error) { func BuildCallMessage(videodata *meowlib.VideoData, srvuid string, peer_uid string, replyToUid string, filelist []string) ([]byte, string, error) {
peer := client.GetConfig().GetIdentity().Peers.GetFromUid(peer_uid) peer := client.GetConfig().GetIdentity().Peers.GetFromUid(peer_uid)
@@ -53,3 +54,4 @@ func BuildCallMessage(videodata *meowlib.VideoData, srvuid string, peer_uid stri
func BuildCancelCallMessage() { func BuildCancelCallMessage() {
} }
*/

View File

@@ -28,7 +28,7 @@ func invitationGetAnswerReadResponse(invitation *meowlib.Invitation) (*client.Pe
if peer != nil { if peer != nil {
// process the packed user message // process the packed user message
usermsg, err := peer.ProcessInboundUserMessage(invitationAnswer.Payload, invitationAnswer.Signature) usermsg, err := peer.ProcessInboundUserMessage(&invitationAnswer)
if err != nil { if err != nil {
return nil, "InvitationGetAnswerReadResponse: ProcessInboundUserMessage", err return nil, "InvitationGetAnswerReadResponse: ProcessInboundUserMessage", err
} }

View File

@@ -1,86 +1,288 @@
package helpers package helpers
import ( import (
"errors"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"forge.redroom.link/yves/meowlib" "forge.redroom.link/yves/meowlib"
"forge.redroom.link/yves/meowlib/client" "forge.redroom.link/yves/meowlib/client"
"github.com/google/uuid"
"google.golang.org/protobuf/proto"
) )
func messageBuildPackAndStore(msg *meowlib.UserMessage, srvuid string, peer *client.Peer) ([]byte, string, error) { func PackMessageForServer(packedMsg *meowlib.PackedUserMessage, srvuid string) ([]byte, string, error) {
// Get the message server // Get the message server
srv, err := client.GetConfig().GetIdentity().MessageServers.LoadServer(srvuid) srv, err := client.GetConfig().GetIdentity().MessageServers.LoadServer(srvuid)
if err != nil { if err != nil {
return nil, "messageBuildPostprocess : LoadServer", err return nil, "PackMessageForServer : LoadServer", err
} }
// Prepare cyphered + packed user message // Fetch and persist the server public key if it was never stored
packedMsg, err := peer.ProcessOutboundUserMessage(msg) // (servers added via invitation finalization only have a UserKp, no PublicKey)
if srv.PublicKey == "" {
srvdata, err := meowlib.HttpGetId(srv.Url)
if err != nil { if err != nil {
return nil, "messageBuildPostprocess : ProcessOutboundUserMessage", err return nil, "PackMessageForServer : HttpGetId", err
}
srv.PublicKey = srvdata["publicKey"]
client.GetConfig().GetIdentity().MessageServers.StoreServer(srv)
} }
// Creating Server message for transporting the user message // Creating Server message for transporting the user message
toServerMessage := srv.BuildToServerMessageFromUserMessage(packedMsg) toServerMessage := srv.BuildToServerMessageFromUserMessage(packedMsg)
data, err := srv.ProcessOutboundMessage(toServerMessage) data, err := srv.ProcessOutboundMessage(toServerMessage)
if err != nil { if err != nil {
return nil, "messageBuildPostprocess : ProcessOutboundMessage", err return nil, "PackMessageForServer : ProcessOutboundMessage", err
}
// Store message
err = peer.StoreMessage(msg, nil)
if err != nil {
return nil, "messageBuildPostprocess : StoreMessage", err
} }
return data, "", nil return data, "", nil
} }
func CreateUserMessage(message string, srvuid string, peer_uid string, replyToUid string, filelist []string) ([]byte, string, error) { func CreateStorePackUserMessageForServer(message string, srvuid string, peer_uid string, replyToUid string, filelist []string) ([]byte, string, error) {
usermessage, _, _, errtxt, err := CreateAndStoreUserMessage(message, peer_uid, replyToUid, filelist)
if err != nil {
return nil, errtxt, err
}
return PackMessageForServer(usermessage, srvuid)
}
// CreateAndStoreUserMessage creates, signs, and stores an outbound message for
// peer_uid. It returns the packed (encrypted) form ready for server transport,
// the peer DB file UUID (dbFile), the SQLite row ID (dbId), an error context
// string, and any error.
func CreateAndStoreUserMessage(message string, peer_uid string, replyToUid string, filelist []string) (*meowlib.PackedUserMessage, string, int64, string, error) {
peer := client.GetConfig().GetIdentity().Peers.GetFromUid(peer_uid) peer := client.GetConfig().GetIdentity().Peers.GetFromUid(peer_uid)
// Creating User message // Creating User message
usermessage, err := peer.BuildSimpleUserMessage([]byte(message)) usermessage, err := peer.BuildSimpleUserMessage([]byte(message))
if err != nil { if err != nil {
return nil, "PrepareServerMessage : BuildSimpleUserMessage", err return nil, "", 0, "PrepareServerMessage : BuildSimpleUserMessage", err
} }
for _, file := range filelist { for _, file := range filelist {
err = usermessage.AddFile(file, client.GetConfig().Chunksize) err = usermessage.AddFile(file, client.GetConfig().Chunksize)
if err != nil { if err != nil {
return nil, "PrepareServerMessage : AddFile", err return nil, "", 0, "PrepareServerMessage : AddFile", err
} }
} }
usermessage.Status.AnswerToUuid = replyToUid usermessage.Status.Sent = uint64(time.Now().UTC().Unix())
usermessage.Status.ReplyToUuid = replyToUid
return messageBuildPackAndStore(usermessage, srvuid, peer) // Store message
err = peer.StoreMessage(usermessage, nil)
if err != nil {
return nil, "", 0, "messageBuildPostprocess : StoreMessage", err
}
dbFile := peer.LastMessage.Dbfile
dbId := peer.LastMessage.Dbid
// Prepare cyphered + packed user message
packedMsg, err := peer.ProcessOutboundUserMessage(usermessage)
if err != nil {
return nil, "", 0, "messageBuildPostprocess : ProcessOutboundUserMessage", err
}
// Persist peer to save updated DR state (DrStateJson)
if peer.DrRootKey != "" {
if storeErr := client.GetConfig().GetIdentity().Peers.StorePeer(peer); storeErr != nil {
logger.Warn().Err(storeErr).Str("peer", peer.Uid).Msg("messageBuildPostprocess: StorePeer (DR state)")
}
}
return packedMsg, dbFile, dbId, "", nil
} }
func BuildAckMessage(messageUid string, srvuid string, peer_uid string, received int64, processed int64) ([]byte, string, error) { func BuildReceivedMessage(messageUid string, peer_uid string, received int64) (*meowlib.PackedUserMessage, string, error) {
peer := client.GetConfig().GetIdentity().Peers.GetFromUid(peer_uid) peer := client.GetConfig().GetIdentity().Peers.GetFromUid(peer_uid)
srv, err := client.GetConfig().GetIdentity().MessageServers.LoadServer(srvuid)
if err != nil {
return nil, "PrepareServerMessage : LoadServer", err
}
// Creating User message // Creating User message
usermessage, err := peer.BuildSimpleUserMessage(nil) usermessage, err := peer.BuildSimpleUserMessage(nil)
if err != nil { if err != nil {
return nil, "PrepareServerMessage : BuildSimpleUserMessage", err return nil, "BuildReceivedMessage : BuildSimpleUserMessage", err
} }
usermessage.Status.Uuid = messageUid usermessage.Status.Uuid = messageUid
usermessage.Status.Received = uint64(received) usermessage.Status.Received = uint64(received)
// Prepare cyphered + packed user message
packedMsg, err := peer.ProcessOutboundUserMessage(usermessage)
if err != nil {
return nil, "BuildReceivedMessage : ProcessOutboundUserMessage", err
}
// Persist peer to save updated DR state (DrStateJson)
if peer.DrRootKey != "" {
client.GetConfig().GetIdentity().Peers.StorePeer(peer)
}
return packedMsg, "", nil
}
func BuildProcessedMessage(messageUid string, peer_uid string, processed int64) (*meowlib.PackedUserMessage, string, error) {
peer := client.GetConfig().GetIdentity().Peers.GetFromUid(peer_uid)
// Creating User message
usermessage, err := peer.BuildSimpleUserMessage(nil)
if err != nil {
return nil, "BuildProcessedMessage : BuildSimpleUserMessage", err
}
usermessage.Status.Uuid = messageUid
usermessage.Status.Processed = uint64(processed) usermessage.Status.Processed = uint64(processed)
// Prepare cyphered + packed user message // Prepare cyphered + packed user message
packedMsg, err := peer.ProcessOutboundUserMessage(usermessage) packedMsg, err := peer.ProcessOutboundUserMessage(usermessage)
if err != nil { if err != nil {
return nil, "PrepareServerMessage : ProcessOutboundUserMessage", err return nil, "BuildProcessedMessage : ProcessOutboundUserMessage", err
} }
// Creating Server message for transporting the user message // Persist peer to save updated DR state (DrStateJson)
toServerMessage := srv.BuildToServerMessageFromUserMessage(packedMsg) if peer.DrRootKey != "" {
data, err := srv.ProcessOutboundMessage(toServerMessage) client.GetConfig().GetIdentity().Peers.StorePeer(peer)
if err != nil {
return nil, "PrepareServerMessage : ProcessOutboundMessage", err
} }
return packedMsg, "", nil
return data, "", nil
} }
func ReadAckMessageResponse() { func ReadAckMessageResponse() {
//! update the status in message store //! update the status in message store
} }
// MarkMessageProcessed stamps the stored message with a processed timestamp of
// now(), persists the updated record, and — if the peer has SendProcessingAck
// enabled and the message carries a UUID — enqueues a processed acknowledgment
// to the peer's contact pull servers.
func MarkMessageProcessed(peerUid string, dbFile string, dbId int64) error {
password, _ := client.GetConfig().GetMemPass()
processedAt := time.Now().UTC().Unix()
dbm, err := client.GetDbMessage(dbFile, dbId, password)
if err != nil {
return fmt.Errorf("MarkMessageProcessed: GetDbMessage: %w", err)
}
if dbm.Status == nil {
dbm.Status = &meowlib.ConversationStatus{}
}
dbm.Status.Processed = uint64(processedAt)
if err := client.UpdateDbMessage(dbm, dbFile, dbId, password); err != nil {
return fmt.Errorf("MarkMessageProcessed: UpdateDbMessage: %w", err)
}
peer := client.GetConfig().GetIdentity().Peers.GetFromUid(peerUid)
if peer == nil || !peer.SendProcessingAck || dbm.Status.Uuid == "" {
return nil
}
identity := client.GetConfig().GetIdentity()
storagePath := filepath.Join(client.GetConfig().StoragePath, identity.Uuid)
return sendProcessingAck(storagePath, peer, dbm.Status.Uuid, processedAt)
}
// sendProcessingAck builds a processing acknowledgment for messageUuid and
// enqueues it for sending to the peer's contact pull servers.
func sendProcessingAck(storagePath string, peer *client.Peer, messageUuid string, processedAt int64) error {
packedMsg, _, err := BuildProcessedMessage(messageUuid, peer.Uid, processedAt)
if err != nil {
return fmt.Errorf("sendProcessingAck: BuildProcessedMessage: %w", err)
}
data, err := proto.Marshal(packedMsg)
if err != nil {
return fmt.Errorf("sendProcessingAck: proto.Marshal: %w", err)
}
outboxDir := filepath.Join(storagePath, "outbox")
if err := os.MkdirAll(outboxDir, 0700); err != nil {
return fmt.Errorf("sendProcessingAck: MkdirAll: %w", err)
}
outboxFile := filepath.Join(outboxDir, "ack_"+uuid.New().String())
if err := os.WriteFile(outboxFile, data, 0600); err != nil {
return fmt.Errorf("sendProcessingAck: WriteFile: %w", err)
}
var servers []client.Server
for _, srvUid := range peer.ContactPullServers {
srv, loadErr := client.GetConfig().GetIdentity().MessageServers.LoadServer(srvUid)
if loadErr == nil && srv != nil {
servers = append(servers, *srv)
}
}
if len(servers) == 0 {
os.Remove(outboxFile)
return errors.New("sendProcessingAck: no contact servers found")
}
return client.PushSendJob(storagePath, &client.SendJob{
Queue: peer.Uid,
File: outboxFile,
Servers: servers,
})
}
// ProcessSentMessages scans every send queue under storagePath/queues/, updates
// the message storage entry with server delivery info for each sent job, then
// removes the job from the queue. Returns the number of messages updated.
//
// The message DB location is recovered from the job's File basename, which must
// follow the naming convention produced by CreateUserMessageAndSendJob:
//
// outbox/{dbFile}_{dbId}
func ProcessSentMessages(storagePath string) int {
password, _ := client.GetConfig().GetMemPass()
queueDir := filepath.Join(storagePath, "queues")
entries, err := os.ReadDir(queueDir)
if err != nil {
logger.Warn().Err(err).Str("dir", queueDir).Msg("ProcessSentMessages: ReadDir")
return 0
}
updated := 0
for _, entry := range entries {
if entry.IsDir() {
continue
}
queue := entry.Name()
jobs, err := client.GetSentJobs(storagePath, queue)
if err != nil {
logger.Error().Err(err).Str("queue", queue).Msg("ProcessSentMessages: GetSentJobs")
continue
}
for _, job := range jobs {
if job.SuccessfulServer == nil || job.SentAt == nil {
// No delivery info discard the job so it doesn't block the queue
if err := client.DeleteSendJob(storagePath, queue, job.ID); err != nil {
logger.Error().Err(err).Int64("id", job.ID).Msg("ProcessSentMessages: DeleteSendJob (incomplete)")
}
continue
}
// Recover dbFile and dbId from the outbox filename: {dbFile}_{dbId}
base := filepath.Base(job.File)
sep := strings.LastIndex(base, "_")
if sep <= 0 {
logger.Error().Int64("id", job.ID).Str("file", job.File).
Msg("ProcessSentMessages: cannot parse dbFile/dbId from job filename — use CreateUserMessageAndSendJob to build jobs")
continue
}
dbFile := base[:sep]
dbId, parseErr := strconv.ParseInt(base[sep+1:], 10, 64)
if parseErr != nil || dbFile == "" || dbId == 0 {
logger.Error().Int64("id", job.ID).Str("file", job.File).
Msg("ProcessSentMessages: invalid dbFile/dbId in job filename")
continue
}
serverUid := job.Servers[*job.SuccessfulServer].GetUid()
receiveTime := uint64(job.SentAt.Unix())
if err := client.SetMessageServerDelivery(dbFile, dbId, serverUid, receiveTime, password); err != nil {
logger.Error().Err(err).Str("queue", queue).
Str("dbFile", dbFile).Int64("dbId", dbId).
Msg("ProcessSentMessages: SetMessageServerDelivery")
continue
}
if err := client.DeleteSendJob(storagePath, queue, job.ID); err != nil {
logger.Error().Err(err).Int64("id", job.ID).Msg("ProcessSentMessages: DeleteSendJob")
}
updated++
}
}
return updated
}

View File

@@ -0,0 +1,302 @@
package helpers
import (
"fmt"
"os"
"path/filepath"
"testing"
"time"
"forge.redroom.link/yves/meowlib"
"forge.redroom.link/yves/meowlib/client"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
_ "github.com/mattn/go-sqlite3"
)
// setupMsgHelperConfig wires the global client.Config singleton to a fresh
// temporary directory and returns it. Original values are restored in t.Cleanup.
func setupMsgHelperConfig(t *testing.T) (dir string, id *client.Identity) {
t.Helper()
dir = t.TempDir()
cfg := client.GetConfig()
origStorage := cfg.StoragePath
origSuffix := cfg.DbSuffix
origChunk := cfg.Chunksize
cfg.StoragePath = dir
cfg.DbSuffix = ".sqlite"
cfg.Chunksize = 1024 * 1024
require.NoError(t, cfg.SetMemPass("testpassword"))
var err error
id, err = client.CreateIdentity("testuser")
require.NoError(t, err)
t.Cleanup(func() {
cfg.StoragePath = origStorage
cfg.DbSuffix = origSuffix
cfg.Chunksize = origChunk
})
return dir, id
}
// newFullyKeyedPeer returns a Peer with all three keypairs and contact keys set,
// ready to store messages.
func newFullyKeyedPeer(t *testing.T, uid string) *client.Peer {
t.Helper()
var err error
peer := &client.Peer{
Uid: uid,
Name: "TestPeer-" + uid,
}
peer.MyIdentity, err = meowlib.NewKeyPair()
require.NoError(t, err)
peer.MyEncryptionKp, err = meowlib.NewKeyPair()
require.NoError(t, err)
peer.MyLookupKp, err = meowlib.NewKeyPair()
require.NoError(t, err)
k, err := meowlib.NewKeyPair()
require.NoError(t, err)
peer.ContactPublicKey = k.Public
k, err = meowlib.NewKeyPair()
require.NoError(t, err)
peer.ContactEncryption = k.Public
k, err = meowlib.NewKeyPair()
require.NoError(t, err)
peer.ContactLookupKey = k.Public
return peer
}
// storeTestMessage stores a single outbound message for peer.
func storeTestMessage(t *testing.T, peer *client.Peer, text string) {
t.Helper()
um := &meowlib.UserMessage{
Data: []byte(text),
From: peer.MyIdentity.Public,
Status: &meowlib.ConversationStatus{Uuid: "uuid-" + text},
}
require.NoError(t, peer.StoreMessage(um, nil))
require.NotNil(t, peer.LastMessage, "StoreMessage must set LastMessage")
}
// pushAndMarkSent pushes a send job for the given peer and marks it as delivered
// by the given server. Returns the job after the status update.
// The outbox file is named {dbFile}_{dbId} so that ProcessSentMessages can
// recover the message DB location from the filename, matching the convention
// used by CreateUserMessageAndSendJob.
func pushAndMarkSent(t *testing.T, dir string, peer *client.Peer, srv client.Server) *client.SendJob {
t.Helper()
dbFile := peer.LastMessage.Dbfile
dbId := peer.LastMessage.Dbid
outboxDir := filepath.Join(dir, "outbox")
require.NoError(t, os.MkdirAll(outboxDir, 0700))
msgFile := filepath.Join(outboxDir, fmt.Sprintf("%s_%d", dbFile, dbId))
require.NoError(t, os.WriteFile(msgFile, []byte("packed-server-message"), 0600))
require.NoError(t, client.PushSendJob(dir, &client.SendJob{
Queue: peer.Uid,
File: msgFile,
Servers: []client.Server{srv},
Timeout: 60,
}))
job, _, err := client.PeekSendJob(dir, peer.Uid)
require.NoError(t, err)
require.NotNil(t, job)
sentAt := time.Now()
srvIdx := 0
job.Status = client.SendStatusSent
job.SentAt = &sentAt
job.SuccessfulServer = &srvIdx
require.NoError(t, client.UpdateSendJob(dir, peer.Uid, job))
return job
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
// TestProcessSentMessages_UpdatesDeliveryInfo is the main round-trip test.
// It verifies that after ProcessSentMessages runs:
// - the function returns 1 (one message updated)
// - the send job is removed from the queue
// - a subsequent LoadMessagesHistory returns ServerDeliveryUuid and
// ServerDeliveryTimestamp for the message
func TestProcessSentMessages_UpdatesDeliveryInfo(t *testing.T) {
dir, id := setupMsgHelperConfig(t)
peer := newFullyKeyedPeer(t, "peer-uid-main")
require.NoError(t, id.Peers.StorePeer(peer))
storeTestMessage(t, peer, "hello world")
srv := client.Server{Url: "http://test-server.example"}
job := pushAndMarkSent(t, dir, peer, srv)
// --- call under test ---
updated := ProcessSentMessages(dir)
assert.Equal(t, 1, updated, "exactly one message should be updated")
// The job must be removed from the queue after processing.
jobAfter, err := client.GetSendJob(dir, peer.Uid, job.ID)
require.NoError(t, err)
assert.Nil(t, jobAfter, "job should be deleted after processing")
// Reload message history and verify delivery metadata was persisted.
msgs, err := peer.LoadMessagesHistory(0, 0, 50)
require.NoError(t, err)
require.Len(t, msgs, 1, "expected exactly one message in history")
assert.Equal(t, srv.GetUid(), msgs[0].ServerDeliveryUuid,
"ServerDeliveryUuid should match the server that accepted the message")
assert.NotZero(t, msgs[0].ServerDeliveryTimestamp,
"ServerDeliveryTimestamp should be set after ProcessSentMessages")
assert.Equal(t, uint64(job.SentAt.Unix()), msgs[0].ServerDeliveryTimestamp,
"ServerDeliveryTimestamp should match job.SentAt")
}
// TestProcessSentMessages_SkipsJobWithoutDeliveryInfo verifies that a Sent job
// missing SentAt or SuccessfulServer is discarded (not counted, not updating
// the message DB).
func TestProcessSentMessages_SkipsJobWithoutDeliveryInfo(t *testing.T) {
dir, id := setupMsgHelperConfig(t)
peer := newFullyKeyedPeer(t, "peer-uid-incomplete")
require.NoError(t, id.Peers.StorePeer(peer))
storeTestMessage(t, peer, "incomplete job")
dbFile := peer.LastMessage.Dbfile
dbId := peer.LastMessage.Dbid
outboxDir := filepath.Join(dir, "outbox")
require.NoError(t, os.MkdirAll(outboxDir, 0700))
msgFile := filepath.Join(outboxDir, fmt.Sprintf("%s_%d", dbFile, dbId))
require.NoError(t, os.WriteFile(msgFile, []byte("packed"), 0600))
require.NoError(t, client.PushSendJob(dir, &client.SendJob{
Queue: peer.Uid,
File: msgFile,
Servers: []client.Server{{Url: "http://test-server.example"}},
Timeout: 60,
}))
job, _, err := client.PeekSendJob(dir, peer.Uid)
require.NoError(t, err)
require.NotNil(t, job)
// Mark as Sent but intentionally leave SentAt and SuccessfulServer nil.
job.Status = client.SendStatusSent
require.NoError(t, client.UpdateSendJob(dir, peer.Uid, job))
updated := ProcessSentMessages(dir)
assert.Equal(t, 0, updated, "incomplete job must not be counted as updated")
// Message should have no delivery info.
msgs, err := peer.LoadMessagesHistory(0, 0, 50)
require.NoError(t, err)
require.Len(t, msgs, 1)
assert.Empty(t, msgs[0].ServerDeliveryUuid, "delivery UUID must not be set")
assert.Zero(t, msgs[0].ServerDeliveryTimestamp, "delivery timestamp must not be set")
}
// TestProcessSentMessages_EmptyQueues verifies that an absent or empty queues
// directory results in 0 updates without error.
func TestProcessSentMessages_EmptyQueues(t *testing.T) {
dir, _ := setupMsgHelperConfig(t)
// queues/ directory does not exist yet.
updated := ProcessSentMessages(dir)
assert.Equal(t, 0, updated, "no queues → 0 updates")
// Also test with the directory present but empty.
require.NoError(t, os.MkdirAll(filepath.Join(dir, "queues"), 0700))
updated = ProcessSentMessages(dir)
assert.Equal(t, 0, updated, "empty queues → 0 updates")
}
// TestProcessSentMessages_UnparseableFilename verifies that a job whose filename
// does not follow the {dbFile}_{dbId} convention is skipped with a logged error
// and not counted as updated.
func TestProcessSentMessages_UnparseableFilename(t *testing.T) {
dir, id := setupMsgHelperConfig(t)
peer := newFullyKeyedPeer(t, "peer-uid-nodbinfo")
require.NoError(t, id.Peers.StorePeer(peer))
storeTestMessage(t, peer, "the real message")
// A filename with no underscore cannot be parsed as {dbFile}_{dbId}.
msgFile := filepath.Join(dir, "badname.bin")
require.NoError(t, os.WriteFile(msgFile, []byte("packed"), 0600))
require.NoError(t, client.PushSendJob(dir, &client.SendJob{
Queue: peer.Uid,
File: msgFile,
Servers: []client.Server{{Url: "http://test-server.example"}},
Timeout: 60,
}))
job, _, err := client.PeekSendJob(dir, peer.Uid)
require.NoError(t, err)
require.NotNil(t, job)
sentAt := time.Now()
srvIdx := 0
job.Status = client.SendStatusSent
job.SentAt = &sentAt
job.SuccessfulServer = &srvIdx
require.NoError(t, client.UpdateSendJob(dir, peer.Uid, job))
// Must NOT count as updated; the real message row must be untouched.
updated := ProcessSentMessages(dir)
assert.Equal(t, 0, updated, "job without db info must not be counted as updated")
msgs, err := peer.LoadMessagesHistory(0, 0, 50)
require.NoError(t, err)
require.Len(t, msgs, 1)
assert.Empty(t, msgs[0].ServerDeliveryUuid, "delivery UUID must not be set")
assert.Zero(t, msgs[0].ServerDeliveryTimestamp, "delivery timestamp must not be set")
}
// TestProcessSentMessages_MultipleMessages verifies that all jobs in the same
// queue are processed and that each message gets its own delivery info.
func TestProcessSentMessages_MultipleMessages(t *testing.T) {
dir, id := setupMsgHelperConfig(t)
peer := newFullyKeyedPeer(t, "peer-uid-multi")
require.NoError(t, id.Peers.StorePeer(peer))
srv := client.Server{Url: "http://test-server.example"}
const n = 3
for i := range n {
storeTestMessage(t, peer, fmt.Sprintf("message-%d", i))
pushAndMarkSent(t, dir, peer, srv)
}
updated := ProcessSentMessages(dir)
assert.Equal(t, n, updated, "all %d messages should be updated", n)
msgs, err := peer.LoadMessagesHistory(0, 0, 50)
require.NoError(t, err)
require.Len(t, msgs, n)
for _, m := range msgs {
assert.Equal(t, srv.GetUid(), m.ServerDeliveryUuid,
"every message should have ServerDeliveryUuid set")
assert.NotZero(t, m.ServerDeliveryTimestamp,
"every message should have ServerDeliveryTimestamp set")
}
}

View File

@@ -1,5 +1,6 @@
package helpers package helpers
/*
import ( import (
"forge.redroom.link/yves/meowlib" "forge.redroom.link/yves/meowlib"
"forge.redroom.link/yves/meowlib/client" "forge.redroom.link/yves/meowlib/client"
@@ -28,3 +29,4 @@ func HttpSendMessage(serverUid string, message []byte, timeout int) ([]byte, err
} }
return response, nil return response, nil
} }
*/

View File

@@ -1,9 +1,11 @@
package client package client
import ( import (
"crypto/rand"
"encoding/base64"
"encoding/json" "encoding/json"
"errors" "errors"
"math/rand" mrand "math/rand"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@@ -13,6 +15,7 @@ import (
"forge.redroom.link/yves/meowlib" "forge.redroom.link/yves/meowlib"
"github.com/ProtonMail/gopenpgp/v2/helper" "github.com/ProtonMail/gopenpgp/v2/helper"
"github.com/google/uuid" "github.com/google/uuid"
doubleratchet "github.com/status-im/doubleratchet"
) )
const maxHiddenCount = 30 const maxHiddenCount = 30
@@ -20,7 +23,7 @@ const maxHiddenCount = 30
// Package-level random number generator with mutex for thread-safe access // Package-level random number generator with mutex for thread-safe access
var ( var (
rngMu sync.Mutex rngMu sync.Mutex
rng = rand.New(rand.NewSource(time.Now().UnixNano())) rng = mrand.New(mrand.NewSource(time.Now().UnixNano()))
) )
type Identity struct { type Identity struct {
@@ -96,6 +99,11 @@ func (id *Identity) InvitePeer(MyName string, ContactName string, MessageServerU
} }
peer.Name = ContactName peer.Name = ContactName
peer.InvitationId = uuid.New().String() // todo as param to identify then update url peer.InvitationId = uuid.New().String() // todo as param to identify then update url
symKeyBytes := make([]byte, 32)
if _, err = rand.Read(symKeyBytes); err != nil {
return nil, err
}
peer.MySymKey = base64.StdEncoding.EncodeToString(symKeyBytes)
/* if id.MessageServers.Servers == nil { /* if id.MessageServers.Servers == nil {
return nil, errors.New("no message servers defined in your identity") return nil, errors.New("no message servers defined in your identity")
} }
@@ -115,6 +123,21 @@ func (id *Identity) InvitePeer(MyName string, ContactName string, MessageServerU
peer.MyPullServers = MessageServerUids peer.MyPullServers = MessageServerUids
peer.MyName = MyName peer.MyName = MyName
peer.InvitationMessage = InvitationMessage peer.InvitationMessage = InvitationMessage
// Generate DR keypair and root key for the initiator side
drKp, err := doubleratchet.DefaultCrypto{}.GenerateDH()
if err != nil {
return nil, err
}
peer.DrKpPrivate = base64.StdEncoding.EncodeToString(drKp.PrivateKey())
peer.DrKpPublic = base64.StdEncoding.EncodeToString(drKp.PublicKey())
drRootKey := make([]byte, 32)
if _, err = rand.Read(drRootKey); err != nil {
return nil, err
}
peer.DrRootKey = base64.StdEncoding.EncodeToString(drRootKey)
peer.DrInitiator = true
id.Peers.StorePeer(&peer) id.Peers.StorePeer(&peer)
return &peer, nil return &peer, nil
@@ -161,6 +184,7 @@ func (id *Identity) AnswerInvitation(MyName string, ContactName string, MessageS
peer.ContactEncryption = ReceivedContact.EncryptionPublicKey peer.ContactEncryption = ReceivedContact.EncryptionPublicKey
peer.ContactLookupKey = ReceivedContact.LookupPublicKey peer.ContactLookupKey = ReceivedContact.LookupPublicKey
peer.ContactPublicKey = ReceivedContact.ContactPublicKey peer.ContactPublicKey = ReceivedContact.ContactPublicKey
peer.MySymKey = ReceivedContact.SymetricKey
peer.InvitationId = ReceivedContact.InvitationId peer.InvitationId = ReceivedContact.InvitationId
peer.InvitationMessage = ReceivedContact.InvitationMessage peer.InvitationMessage = ReceivedContact.InvitationMessage
for srv := range ReceivedContact.PullServers { for srv := range ReceivedContact.PullServers {
@@ -179,6 +203,10 @@ func (id *Identity) AnswerInvitation(MyName string, ContactName string, MessageS
peer.MyPullServers = MessageServerIdxs peer.MyPullServers = MessageServerIdxs
peer.MyName = MyName peer.MyName = MyName
peer.InvitationId = ReceivedContact.InvitationId peer.InvitationId = ReceivedContact.InvitationId
// Adopt DR material from the initiator's ContactCard
peer.DrRootKey = ReceivedContact.DrRootKey
peer.ContactDrPublicKey = ReceivedContact.DrPublicKey
peer.DrInitiator = false
id.Peers.StorePeer(&peer) id.Peers.StorePeer(&peer)
return &peer, nil return &peer, nil

View File

@@ -9,7 +9,7 @@ type InternalUserMessage struct {
Status *meowlib.ConversationStatus `json:"conversation_status,omitempty"` Status *meowlib.ConversationStatus `json:"conversation_status,omitempty"`
Contact *meowlib.ContactCard `json:"contact,omitempty"` Contact *meowlib.ContactCard `json:"contact,omitempty"`
ServerDeliveryUuid string `json:"server_delivery_uuid,omitempty"` ServerDeliveryUuid string `json:"server_delivery_uuid,omitempty"`
ServerDeliveryTimestamp int64 `json:"server_delivery_timestamp,omitempty"` ServerDeliveryTimestamp uint64 `json:"server_delivery_timestamp,omitempty"`
//Group group //Group group
FilePaths []string `json:"file_paths,omitempty"` FilePaths []string `json:"file_paths,omitempty"`
CurrentLocation *meowlib.Location `json:"current_location,omitempty"` CurrentLocation *meowlib.Location `json:"current_location,omitempty"`
@@ -44,6 +44,10 @@ func ProcessOutboundTextMessage(peer *Peer, text string, srv *Server) ([]byte, e
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Persist peer to save updated DR state (DrStateJson)
if peer.DrRootKey != "" {
GetConfig().GetIdentity().Peers.StorePeer(peer)
}
// Creating Server message for transporting the user message // Creating Server message for transporting the user message
toServerMessage := srv.BuildToServerMessageFromUserMessage(packedMsg) toServerMessage := srv.BuildToServerMessageFromUserMessage(packedMsg)
return srv.ProcessOutboundMessage(toServerMessage) return srv.ProcessOutboundMessage(toServerMessage)

View File

@@ -2,6 +2,7 @@ package client
import ( import (
"database/sql" "database/sql"
"fmt"
"math" "math"
"os" "os"
"path/filepath" "path/filepath"
@@ -256,8 +257,9 @@ func GetDbMessage(dbFile string, dbId int64, password string) (*meowlib.DbMessag
} }
defer rows.Close() defer rows.Close()
var dbm meowlib.DbMessage var dbm meowlib.DbMessage
found := false
for rows.Next() { for rows.Next() {
found = true
var id int64 var id int64
var m []byte var m []byte
err = rows.Scan(&id, &m) err = rows.Scan(&id, &m)
@@ -272,7 +274,9 @@ func GetDbMessage(dbFile string, dbId int64, password string) (*meowlib.DbMessag
if err != nil { if err != nil {
return nil, err return nil, err
} }
}
if !found {
return nil, fmt.Errorf("message row %d not found in %s", dbId, dbFile)
} }
return &dbm, nil return &dbm, nil
} }
@@ -353,6 +357,78 @@ func getMessageCount(dbid string) (int, error) {
return count, nil return count, nil
} }
// SetMessageServerDelivery updates the server delivery UUID and timestamp for an existing stored message.
func SetMessageServerDelivery(dbFile string, dbId int64, serverUid string, receiveTime uint64, password string) error {
dbm, err := GetDbMessage(dbFile, dbId, password)
if err != nil {
return err
}
dbm.ServerDeliveryUuid = serverUid
dbm.ServerDeliveryTimestamp = receiveTime
return UpdateDbMessage(dbm, dbFile, dbId, password)
}
// FindMessageByUuid scans all DB files for a peer (newest first) and returns
// the dbFile, row ID, and DbMessage for the message whose Status.Uuid matches.
func FindMessageByUuid(peer *Peer, messageUuid string, password string) (string, int64, *meowlib.DbMessage, error) {
cfg := GetConfig()
identity := cfg.GetIdentity()
for i := len(peer.DbIds) - 1; i >= 0; i-- {
dbid := peer.DbIds[i]
db, err := sql.Open("sqlite3", filepath.Join(cfg.StoragePath, identity.Uuid, dbid+GetConfig().DbSuffix))
if err != nil {
continue
}
rows, err := db.Query("SELECT id, m FROM message ORDER BY id DESC")
if err != nil {
db.Close()
continue
}
for rows.Next() {
var id int64
var m []byte
if err := rows.Scan(&id, &m); err != nil {
continue
}
decdata, err := meowlib.SymDecrypt(password, m)
if err != nil {
continue
}
var dbm meowlib.DbMessage
if err := proto.Unmarshal(decdata, &dbm); err != nil {
continue
}
if dbm.Status != nil && dbm.Status.Uuid == messageUuid {
rows.Close()
db.Close()
return dbid, id, &dbm, nil
}
}
rows.Close()
db.Close()
}
return "", 0, nil, fmt.Errorf("message with UUID %s not found", messageUuid)
}
// UpdateMessageAck finds a stored outbound message by UUID and stamps it with
// the received and/or processed timestamps from an inbound ACK message.
func UpdateMessageAck(peer *Peer, messageUuid string, receivedAt uint64, processedAt uint64, password string) error {
dbFile, dbId, dbm, err := FindMessageByUuid(peer, messageUuid, password)
if err != nil {
return err
}
if dbm.Status == nil {
dbm.Status = &meowlib.ConversationStatus{}
}
if receivedAt != 0 {
dbm.Status.Received = receivedAt
}
if processedAt != 0 {
dbm.Status.Processed = processedAt
}
return UpdateDbMessage(dbm, dbFile, dbId, password)
}
func createMessageTable(db *sql.DB) error { func createMessageTable(db *sql.DB) error {
createMessageTableSQL := `CREATE TABLE message ( createMessageTableSQL := `CREATE TABLE message (
"id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,

View File

@@ -1,11 +1,13 @@
package client package client
import ( import (
"encoding/json"
"io" "io"
"os" "os"
"time" "time"
"forge.redroom.link/yves/meowlib" "forge.redroom.link/yves/meowlib"
doubleratchet "github.com/status-im/doubleratchet"
"github.com/google/uuid" "github.com/google/uuid"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
) )
@@ -26,6 +28,7 @@ type Peer struct {
MyIdentity *meowlib.KeyPair `json:"my_identity,omitempty"` MyIdentity *meowlib.KeyPair `json:"my_identity,omitempty"`
MyEncryptionKp *meowlib.KeyPair `json:"my_encryption_kp,omitempty"` MyEncryptionKp *meowlib.KeyPair `json:"my_encryption_kp,omitempty"`
MyLookupKp *meowlib.KeyPair `json:"my_lookup_kp,omitempty"` MyLookupKp *meowlib.KeyPair `json:"my_lookup_kp,omitempty"`
MySymKey string `json:"my_sym_key,omitempty"`
MyPullServers []string `json:"my_pull_servers,omitempty"` MyPullServers []string `json:"my_pull_servers,omitempty"`
// Peer keys and infos // Peer keys and infos
//Contact meowlib.ContactCard `json:"contact,omitempty"` // todo : remove //Contact meowlib.ContactCard `json:"contact,omitempty"` // todo : remove
@@ -40,6 +43,8 @@ type Peer struct {
LastMessage *InternalUserMessage `json:"last_message,omitempty"` LastMessage *InternalUserMessage `json:"last_message,omitempty"`
// Internal management attributes // Internal management attributes
Visible bool `json:"visible,omitempty"` Visible bool `json:"visible,omitempty"`
SendDeliveryAck bool `json:"send_delivery_ack,omitempty"`
SendProcessingAck bool `json:"send_processing_ack,omitempty"`
VisiblePassword string `json:"visible_password,omitempty"` VisiblePassword string `json:"visible_password,omitempty"`
PasswordType string `json:"password_type,omitempty"` PasswordType string `json:"password_type,omitempty"`
Blocked bool `json:"blocked,omitempty"` Blocked bool `json:"blocked,omitempty"`
@@ -51,6 +56,13 @@ type Peer struct {
DbIds []string `json:"db_ids,omitempty"` DbIds []string `json:"db_ids,omitempty"`
Type string `json:"type,omitempty"` Type string `json:"type,omitempty"`
PersonnaeDbId string `json:"personnae_db_id,omitempty"` PersonnaeDbId string `json:"personnae_db_id,omitempty"`
// Double Ratchet state
DrKpPublic string `json:"dr_kp_public,omitempty"`
DrKpPrivate string `json:"dr_kp_private,omitempty"`
DrRootKey string `json:"dr_root_key,omitempty"`
DrInitiator bool `json:"dr_initiator,omitempty"`
ContactDrPublicKey string `json:"contact_dr_public_key,omitempty"`
DrStateJson string `json:"dr_state_json,omitempty"`
dbPassword string dbPassword string
} }
@@ -70,6 +82,9 @@ func (p *Peer) GetMyContact() *meowlib.ContactCard {
c.InvitationId = p.InvitationId c.InvitationId = p.InvitationId
c.InvitationMessage = p.InvitationMessage c.InvitationMessage = p.InvitationMessage
c.Name = p.MyName c.Name = p.MyName
c.SymetricKey = p.MySymKey
c.DrRootKey = p.DrRootKey
c.DrPublicKey = p.DrKpPublic
return &c return &c
} }
@@ -196,6 +211,24 @@ func (p *Peer) DeserializeUserMessage(data []byte) (*meowlib.UserMessage, error)
return &msg, nil return &msg, nil
} }
// SymEncryptPayload applies the shared symmetric key over already-encrypted data.
// If MySymKey is empty, data is returned unchanged (peer has no symkey configured).
func (p *Peer) SymEncryptPayload(data []byte) ([]byte, error) {
if p.MySymKey == "" {
return data, nil
}
return meowlib.SymEncrypt(p.MySymKey, data)
}
// SymDecryptPayload removes the outer symmetric encryption layer.
// If MySymKey is empty, data is returned unchanged.
func (p *Peer) SymDecryptPayload(data []byte) ([]byte, error) {
if p.MySymKey == "" {
return data, nil
}
return meowlib.SymDecrypt(p.MySymKey, data)
}
// AsymEncryptMessage prepares a message to send to a specific peer contact // AsymEncryptMessage prepares a message to send to a specific peer contact
func (p *Peer) AsymEncryptMessage(Message []byte) (*meowlib.EncryptedMessage, error) { func (p *Peer) AsymEncryptMessage(Message []byte) (*meowlib.EncryptedMessage, error) {
var enc *meowlib.EncryptedMessage var enc *meowlib.EncryptedMessage
@@ -256,19 +289,66 @@ func (p *Peer) ProcessOutboundUserMessage(usermessage *meowlib.UserMessage) (*me
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Encrypting it // Asymmetric encryption + signature (inner layer)
enc, err := p.AsymEncryptMessage(serializedMessage) enc, err := p.AsymEncryptMessage(serializedMessage)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Packing it // Symmetric encryption (middle layer, if symkey is configured)
packedMsg := p.PackUserMessage(enc.Data, enc.Signature) symEncrypted, err := p.SymEncryptPayload(enc.Data)
if err != nil {
return nil, err
}
// Double Ratchet encryption (outermost layer, if DR is configured)
if p.DrRootKey != "" {
session, err := p.GetDRSession()
if err != nil {
return nil, err
}
drMsg, err := session.RatchetEncrypt(symEncrypted, nil)
if err != nil {
return nil, err
}
headerBytes, err := json.Marshal(drMsg.Header)
if err != nil {
return nil, err
}
packed := p.PackUserMessage(drMsg.Ciphertext, enc.Signature)
packed.DrHeader = headerBytes
return packed, nil
}
// No DR layer
packedMsg := p.PackUserMessage(symEncrypted, enc.Signature)
return packedMsg, nil return packedMsg, nil
} }
// ProcessInboundUserMessage is a helper function that decrypts and deserializes a user message // ProcessInboundUserMessage is a helper function that decrypts and deserializes a user message
func (p *Peer) ProcessInboundUserMessage(message []byte, signature []byte) (*meowlib.UserMessage, error) { func (p *Peer) ProcessInboundUserMessage(packed *meowlib.PackedUserMessage) (*meowlib.UserMessage, error) {
dec, err := p.AsymDecryptMessage(message, signature) payload := packed.Payload
// Double Ratchet decryption (outermost layer), only when DR is configured and header present
if p.DrRootKey != "" && len(packed.DrHeader) > 0 {
session, err := p.GetDRSession()
if err != nil {
return nil, err
}
var header doubleratchet.MessageHeader
if err := json.Unmarshal(packed.DrHeader, &header); err != nil {
return nil, err
}
payload, err = session.RatchetDecrypt(
doubleratchet.Message{Header: header, Ciphertext: packed.Payload},
nil,
)
if err != nil {
return nil, err
}
}
// Symmetric decryption (middle layer, if symkey is configured)
symDecrypted, err := p.SymDecryptPayload(payload)
if err != nil {
return nil, err
}
dec, err := p.AsymDecryptMessage(symDecrypted, packed.Signature)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@@ -1,11 +1,14 @@
package client package client
import ( import (
"crypto/rand"
"encoding/base64"
"os" "os"
"strconv" "strconv"
"testing" "testing"
"forge.redroom.link/yves/meowlib" "forge.redroom.link/yves/meowlib"
doubleratchet "github.com/status-im/doubleratchet"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
@@ -421,7 +424,7 @@ func TestProcessOutboundInbound_RoundTrip(t *testing.T) {
assert.NotEmpty(t, packed.Signature) assert.NotEmpty(t, packed.Signature)
assert.Equal(t, bob.MyLookupKp.Public, packed.Destination) assert.Equal(t, bob.MyLookupKp.Public, packed.Destination)
received, err := bob.ProcessInboundUserMessage(packed.Payload, packed.Signature) received, err := bob.ProcessInboundUserMessage(packed)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, []byte("end to end test"), received.Data) assert.Equal(t, []byte("end to end test"), received.Data)
assert.Equal(t, alice.MyIdentity.Public, received.From) assert.Equal(t, alice.MyIdentity.Public, received.From)
@@ -436,7 +439,7 @@ func TestProcessOutboundInbound_EmptyMessage(t *testing.T) {
packed, err := alice.ProcessOutboundUserMessage(userMsg) packed, err := alice.ProcessOutboundUserMessage(userMsg)
assert.NoError(t, err) assert.NoError(t, err)
received, err := bob.ProcessInboundUserMessage(packed.Payload, packed.Signature) received, err := bob.ProcessInboundUserMessage(packed)
assert.NoError(t, err) assert.NoError(t, err)
assert.Empty(t, received.Data) assert.Empty(t, received.Data)
} }
@@ -452,6 +455,74 @@ func TestProcessOutboundUserMessage_InvalidKey(t *testing.T) {
assert.Error(t, err) assert.Error(t, err)
} }
// ---------------------------------------------------------------------------
// DR-encrypted round-trip
// ---------------------------------------------------------------------------
func makeDRPeerPair(t *testing.T) (alice *Peer, bob *Peer) {
t.Helper()
alice, bob = makePeerPair(t)
// Generate DR keypair for alice (initiator)
drKp, err := doubleratchet.DefaultCrypto{}.GenerateDH()
if err != nil {
t.Fatal(err)
}
drRootKeyBytes := make([]byte, 32)
if _, err = rand.Read(drRootKeyBytes); err != nil {
t.Fatal(err)
}
drRootKey := base64.StdEncoding.EncodeToString(drRootKeyBytes)
alice.DrKpPrivate = base64.StdEncoding.EncodeToString(drKp.PrivateKey())
alice.DrKpPublic = base64.StdEncoding.EncodeToString(drKp.PublicKey())
alice.DrRootKey = drRootKey
alice.DrInitiator = true
bob.DrRootKey = drRootKey
bob.ContactDrPublicKey = alice.DrKpPublic
bob.DrInitiator = false
return alice, bob
}
func TestProcessOutboundInbound_DR_RoundTrip(t *testing.T) {
alice, bob := makeDRPeerPair(t)
userMsg, err := alice.BuildSimpleUserMessage([]byte("dr round trip test"))
assert.NoError(t, err)
packed, err := alice.ProcessOutboundUserMessage(userMsg)
assert.NoError(t, err)
assert.NotEmpty(t, packed.DrHeader, "DR header should be set")
received, err := bob.ProcessInboundUserMessage(packed)
assert.NoError(t, err)
assert.Equal(t, []byte("dr round trip test"), received.Data)
// Verify DR state was updated
assert.NotEmpty(t, alice.DrStateJson, "alice DR state should be persisted")
assert.NotEmpty(t, bob.DrStateJson, "bob DR state should be persisted")
}
func TestProcessOutboundInbound_DR_MultipleMessages(t *testing.T) {
alice, bob := makeDRPeerPair(t)
for i := 0; i < 5; i++ {
msg := []byte("message " + strconv.Itoa(i))
userMsg, err := alice.BuildSimpleUserMessage(msg)
assert.NoError(t, err)
packed, err := alice.ProcessOutboundUserMessage(userMsg)
assert.NoError(t, err)
assert.NotEmpty(t, packed.DrHeader)
received, err := bob.ProcessInboundUserMessage(packed)
assert.NoError(t, err)
assert.Equal(t, msg, received.Data)
}
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// GetConversationRequest // GetConversationRequest
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------

View File

@@ -238,6 +238,9 @@ func (ps *PeerStorage) FinalizeInvitation(ReceivedContact *meowlib.ContactCard)
ps.cache[i].ContactEncryption = ReceivedContact.EncryptionPublicKey ps.cache[i].ContactEncryption = ReceivedContact.EncryptionPublicKey
ps.cache[i].ContactLookupKey = ReceivedContact.LookupPublicKey ps.cache[i].ContactLookupKey = ReceivedContact.LookupPublicKey
ps.cache[i].ContactPublicKey = ReceivedContact.ContactPublicKey ps.cache[i].ContactPublicKey = ReceivedContact.ContactPublicKey
if ps.cache[i].MySymKey == "" {
ps.cache[i].MySymKey = ReceivedContact.SymetricKey
}
srvs := []string{} srvs := []string{}
for srv := range ReceivedContact.PullServers { for srv := range ReceivedContact.PullServers {
srvs = append(srvs, ReceivedContact.PullServers[srv].GetUid()) srvs = append(srvs, ReceivedContact.PullServers[srv].GetUid())

View File

@@ -21,7 +21,9 @@ const (
// SendJob describes a message to send, together with its delivery tracking state. // SendJob describes a message to send, together with its delivery tracking state.
// //
// The File field holds the path of a pre-built packed server message (binary). // The File field holds the path to an outbox file written by CreateUserMessageAndSendJob.
// It must follow the naming convention outbox/{dbFile}_{dbId} so that
// ProcessSentMessages can recover the message DB location from the filename alone.
// Servers is tried in order; after MaxRetriesPerServer failures on one server // Servers is tried in order; after MaxRetriesPerServer failures on one server
// the next one is attempted. // the next one is attempted.
// //
@@ -29,8 +31,8 @@ const (
// are managed by the queue functions and must not be set by the caller. // are managed by the queue functions and must not be set by the caller.
type SendJob struct { type SendJob struct {
// --- caller-supplied fields --- // --- caller-supplied fields ---
Queue string `json:"queue,omitempty"` Queue string `json:"queue,omitempty"` // uid of destination peer, used for naming the queue sqlite db
File string `json:"file,omitempty"` File string `json:"file,omitempty"` // outbox file path; basename must be {dbFile}_{dbId}
Servers []Server `json:"servers,omitempty"` Servers []Server `json:"servers,omitempty"`
Timeout int `json:"timeout,omitempty"` // seconds; 0 = no timeout Timeout int `json:"timeout,omitempty"` // seconds; 0 = no timeout
@@ -101,7 +103,8 @@ func PushSendJob(storagePath string, job *SendJob) error {
return err return err
} }
_, err = db.Exec( _, err = db.Exec(
`INSERT INTO queue(file, servers, timeout, inserted_at, status, retries) VALUES(?,?,?,?,?,?)`, `INSERT INTO queue(file, servers, timeout, inserted_at, status, retries)
VALUES(?,?,?,?,?,?)`,
job.File, string(serversJSON), job.Timeout, time.Now().Unix(), SendStatusPending, string(retriesJSON), job.File, string(serversJSON), job.Timeout, time.Now().Unix(), SendStatusPending, string(retriesJSON),
) )
return err return err
@@ -258,6 +261,74 @@ func GetSendJob(storagePath, queue string, id int64) (*SendJob, error) {
return job, nil return job, nil
} }
// GetSentJobs returns all successfully-sent jobs from the named queue,
// ordered oldest first. Use this to reconcile delivery status with the
// message store and clean up completed entries.
func GetSentJobs(storagePath, queue string) ([]*SendJob, error) {
db, err := openOrCreateSendQueue(sendQueueDbPath(storagePath, queue))
if err != nil {
return nil, err
}
defer db.Close()
rows, err := db.Query(
`SELECT id, file, servers, timeout, inserted_at, sent_at, retries, successful_server
FROM queue WHERE status = ? ORDER BY id ASC`,
SendStatusSent,
)
if err != nil {
return nil, err
}
defer rows.Close()
var jobs []*SendJob
for rows.Next() {
var (
id int64
file string
serversJSON string
timeout int
insertedAt int64
sentAt sql.NullInt64
retriesJSON string
successfulServer sql.NullInt64
)
if err := rows.Scan(&id, &file, &serversJSON, &timeout, &insertedAt, &sentAt, &retriesJSON, &successfulServer); err != nil {
return nil, err
}
var servers []Server
if err := json.Unmarshal([]byte(serversJSON), &servers); err != nil {
return nil, err
}
var retries []int
if err := json.Unmarshal([]byte(retriesJSON), &retries); err != nil {
return nil, err
}
job := &SendJob{
ID: id,
Queue: queue,
File: file,
Servers: servers,
Timeout: timeout,
InsertedAt: time.Unix(insertedAt, 0),
Status: SendStatusSent,
Retries: retries,
}
if sentAt.Valid {
t := time.Unix(sentAt.Int64, 0)
job.SentAt = &t
}
if successfulServer.Valid {
v := int(successfulServer.Int64)
job.SuccessfulServer = &v
}
jobs = append(jobs, job)
}
return jobs, nil
}
// DeleteSendJob removes a row by id from the named queue. // DeleteSendJob removes a row by id from the named queue.
// If the queue is empty after deletion, the DB file is removed. // If the queue is empty after deletion, the DB file is removed.
func DeleteSendJob(storagePath, queue string, id int64) error { func DeleteSendJob(storagePath, queue string, id int64) error {

View File

@@ -0,0 +1,7 @@
@startuml
ClientFdThread -> Lib : write poll job list
ClientFdThread -> ClientBgThread : notify job ?
ClientBgThread -> Lib : poll for servers
ClientBgThread -> ClientFdThread : notify message here
ClientFdThread -> Lib : Read redeived message and update db
@enduml

View File

@@ -0,0 +1,7 @@
@startuml
ClientFdThread -> Lib : write msg to db, encrypted msg for user to file, and job file
ClientFdThread -> ClientBgThread : notify job
ClientBgThread -> Lib : encrypt for server(s) and send including retries
ClientBgThread -> Lib: notify send result
ClientFdThread -> Lib : Read job report and update db
@enduml

View File

@@ -0,0 +1,508 @@
# Multi-Device Conversation Sync — Implementation Plan
## Context
meowlib already has scaffolding for multi-device sync:
| Existing artefact | Where |
|---|---|
| `Identity.Device *KeyPair` | `client/identity.go:35` |
| `Identity.OwnedDevices PeerList` | `client/identity.go:40` |
| `Peer.Type string` | `client/peer.go:52` |
| `ToServerMessage.device_messages` (field 10) | `pb/messages.proto:75` |
| `FromServerMessage.device_messages` (field 9) | `pb/messages.proto:99` |
| `BackgroundJob.Device *KeyPair` | `client/identity.go:334` |
The server (`server/router.go`) does **not** yet implement `device_messages` routing; it goes through `messages`/`Chat` today.
---
## Chosen Sync Scheme: Event-Driven Delta Sync over Existing Message Infrastructure
### Rationale
| Approach | Pros | Cons | Verdict |
|---|---|---|---|
| Full DB sync | Complete history | Huge payloads, merge conflicts, wasteful | ❌ |
| Inbox/outbox file sharing | Simple to reason about | File-level granularity, no dedup, breaks privacy model | ❌ |
| **Event-driven delta sync** | Minimal data, no merge needed, reuses existing crypto + server stack | Requires dedup table | ✅ |
Each message event (received, sent, status change) is forwarded immediately to sibling devices through the **same server infrastructure** as regular peer messages. Each device maintains its own complete local DB. Convergence is eventual; dedup via `ConversationStatus.Uuid`.
### Key Design Decisions
1. **Zero server changes required.** Device sync messages are addressed to the sibling device's lookup key and travel through the existing `msg:{lookup_key}` Redis sorted-set on the server, returned in `from_server.Chat` — identical to peer messages.
2. **Device peers reuse the `Peer` struct** with `Type = "device"`, stored in `Identity.OwnedDevices`. They have their own three keypairs (`MyIdentity`, `MyEncryptionKp`, `MyLookupKp`) and `MyPullServers`.
3. **A new proto message `DeviceSyncPayload`** is added to `messages.proto`. It is serialised and placed in `UserMessage.Appdata`; the parent `UserMessage.Type` is set to `"device_sync"`. This lets the client recognise sync messages without any server-side awareness.
4. **`GetRequestJobs()`** is extended to include device lookup keys alongside peer lookup keys for the appropriate servers, so the background poll thread picks up device sync messages without any extra call.
5. **Dedup** is handled by a small SQLite table `device_sync_seen` (one table per identity folder, not per peer) keyed on `DeviceSyncPayload.DedupId`.
---
## New Protobuf Messages
Add to `pb/messages.proto` before re-generating:
```protobuf
// Payload carried inside UserMessage.appdata for device-to-device sync.
// The enclosing UserMessage.type MUST be "device_sync".
message DeviceSyncPayload {
string sync_type = 1; // "msg" | "status" | "peer_update" | "identity_update" | "server_add" | "forward"
string peer_uid = 2; // local UID of the peer conversation on the sending device
DbMessage db_message = 3; // the DbMessage to replicate (sync_type "msg" / "status")
string dedup_id = 4; // globally unique ID (= DbMessage.status.uuid or generated)
bytes peer_data = 5; // JSON-encoded Peer snapshot (sync_type "peer_update")
bytes identity_data = 6; // JSON-encoded identity profile snapshot (sync_type "identity_update")
bytes forward_payload = 7; // serialized UserMessage for primary to send on behalf of sibling (sync_type "forward")
string forward_peer_uid = 8; // primary-side peer UID to forward to (sync_type "forward")
}
```
Run `cd pb && ./protogen.sh` after adding this.
---
## Implementation Phases
### Phase 1 — Device Pairing
**Files to touch:** `client/identity.go`, `client/helpers/` (new file `deviceHelper.go`)
**Goal:** Allow two app instances owned by the same user to establish a shared keypair relationship, mirroring the peer invitation flow but flagging the peer as `Type = "device"`.
#### 1.1 `Identity.InitDevicePairing(myDeviceName string, serverUids []string) (*Peer, error)`
- Identical to `InvitePeer` but sets `peer.Type = "device"`.
- Stores the resulting peer in `Identity.OwnedDevices` (not `Peers`).
- Returns the peer so the caller can produce a `ContactCard` QR/file.
- **Sym + DR inherited automatically**: because the implementation mirrors `InvitePeer`, the device peer will have `MySymKey`, `DrKpPublic`, `DrKpPrivate`, `DrRootKey`, and `DrInitiator = true` populated automatically. The resulting `ContactCard` will carry `dr_root_key` and `dr_public_key` so the answering device can initialise its own DR session via `AnswerDevicePairing`.
#### 1.2 `Identity.AnswerDevicePairing(myDeviceName string, receivedContact *meowlib.ContactCard) (*Peer, error)`
- Mirrors `AnswerInvitation`, stores in `OwnedDevices`.
#### 1.3 `Identity.FinalizeDevicePairing(receivedContact *meowlib.ContactCard) error`
- Mirrors `FinalizeInvitation`, operates on `OwnedDevices`.
#### 1.4 Helper functions (new file `client/helpers/deviceHelper.go`)
```go
// DevicePairingCreateMessage wraps an invitation step-1 for a device peer.
func DevicePairingCreateMessage(peer *client.Peer, serverUid string) ([]byte, string, error)
// DevicePairingAnswerMessage wraps invitation step-3 answer for a device peer.
func DevicePairingAnswerMessage(peer *client.Peer, serverUid string) ([]byte, string, error)
```
These reuse `invitationCreateHelper.go`/`invitationAnswerHelper.go` logic.
#### 1.5 Extend `PeerStorage` operations for OwnedDevices
`OwnedDevices` is currently a `PeerList` (in-memory slice). This **must** be migrated to the same Badger-backed `PeerStorage` mechanism as `Peers` — it is no longer optional. Device peers carry a Double Ratchet session state (`DrStateJson`) that advances with every message sent or received. Without persistent storage the DR state is lost on restart, breaking the decryption of all subsequent messages. Add a `DeviceStorage PeerStorage` field to `Identity` with its own `DbFile`, and ensure `StorePeer` is called on the device peer after every outbound dispatch (in `DispatchSyncToDevices`) and after every inbound consume (in `ConsumeDeviceSyncMessage`), mirroring the pattern used in `messageHelper.go` and `bgPollHelper.go` for regular peers.
---
### Phase 2 — Sync Payload Helpers
**Files to touch:** `client/helpers/deviceHelper.go` (continued), `client/dbmessage.go`
#### 2.1 Build a sync message for one sibling device
```go
// BuildDeviceSyncMessage wraps a DbMessage into a UserMessage addressed to a
// sibling device peer. The caller then calls peer.ProcessOutboundUserMessage.
func BuildDeviceSyncMessage(
devicePeer *client.Peer,
syncType string, // "msg" | "status" | "peer_event"
peerUid string,
dbm *meowlib.DbMessage,
dedupId string,
) (*meowlib.UserMessage, error)
```
Implementation:
1. Serialise `DeviceSyncPayload{SyncType, PeerUid, DbMessage, DedupId}` with `proto.Marshal`.
2. Create a `UserMessage` with `Type = "device_sync"`, `Destination = devicePeer.ContactLookupKey`, `Appdata = serialisedPayload`.
3. Set `Status.Uuid = dedupId`.
#### 2.2 Dispatch sync to all sibling devices
```go
// DispatchSyncToDevices sends a DeviceSyncPayload to every device peer whose
// pull server list overlaps with the available servers.
// It enqueues a SendJob per device, reusing the existing bgSendHelper queue.
func DispatchSyncToDevices(
storagePath string,
syncType string,
peerUid string,
dbm *meowlib.DbMessage,
dedupId string,
) error
```
Iterates `identity.OwnedDevices`, builds and queues one `SendJob` per device (just like `CreateUserMessageAndSendJob` but using device peer keys and putting the message in `outbox/` with a recognisable prefix, e.g. `dev_{devPeerUid}_{dedupId}`).
After calling `peer.ProcessOutboundUserMessage` for each device peer, persist the updated DR state: `identity.DeviceStorage.StorePeer(devPeer)` if `devPeer.DrRootKey != ""`.
The message is packed into `ToServerMessage.Messages` (same field as regular chat). No server changes needed.
---
### Phase 3 — Integrate Dispatch into Send/Receive Paths
**Files to touch:** `client/helpers/messageHelper.go`, `client/helpers/bgPollHelper.go`
#### 3.1 After outbound message stored (`CreateAndStoreUserMessage`)
At the end of `CreateAndStoreUserMessage` (after `peer.StoreMessage`), add:
```go
// Async: do not block the caller
go DispatchSyncToDevices(storagePath, "msg", peerUid, dbm, usermessage.Status.Uuid)
```
The `dbm` is obtained from `UserMessageToDbMessage(true, usermessage, nil)` (files are excluded from sync — they stay on the originating device or are re-requested).
#### 3.2 After inbound message stored (`ConsumeInboxFile`)
After `peer.StoreMessage(usermsg, filenames)` succeeds:
```go
dbm := client.UserMessageToDbMessage(false, usermsg, nil)
go DispatchSyncToDevices(storagePath, "msg", peer.Uid, dbm, usermsg.Status.Uuid)
```
#### 3.3 After ACK status update (`ReadAckMessageResponse` — currently a stub)
When status timestamps (received/processed) are updated in the DB, dispatch a `"status"` sync with the updated `DbMessage`.
---
### Phase 4 — Receive & Consume Device Sync Messages
**Files to touch:** `client/helpers/bgPollHelper.go`, new `client/helpers/deviceSyncHelper.go`
#### 4.1 Extend `GetRequestJobs()` to include device lookup keys
In `identity.go:GetRequestJobs()`, after the loop over `Peers`, add a similar loop over `OwnedDevices`:
```go
for _, devPeer := range id.OwnedDevices {
for _, server := range devPeer.MyPullServers {
if job, ok := srvs[server]; ok {
job.LookupKeys = append(job.LookupKeys, devPeer.MyLookupKp)
}
}
}
```
Device messages will now arrive inside `from_server.Chat` alongside regular peer messages. The next step distinguishes them.
#### 4.2 Distinguish device vs peer messages in `ConsumeInboxFile`
After `identity.Peers.GetFromMyLookupKey(packedUserMessage.Destination)` returns `nil`, try:
```go
devPeer := identity.OwnedDevices.GetFromMyLookupKey(packedUserMessage.Destination)
if devPeer != nil {
err := ConsumeDeviceSyncMessage(devPeer, packedUserMessage)
// continue to next message
continue
}
// original error path
```
#### 4.3 `ConsumeDeviceSyncMessage` (new file `client/helpers/deviceSyncHelper.go`)
```go
func ConsumeDeviceSyncMessage(
devPeer *client.Peer,
packed *meowlib.PackedUserMessage,
) error
```
Steps:
1. Decrypt with `devPeer.ProcessInboundUserMessage(packed)` (takes the full `*PackedUserMessage`**not** `payload, signature` separately; that API was updated when the sym-encryption + double-ratchet layer was added).
2. Check `usermsg.Type == "device_sync"`.
3. Deserialise `DeviceSyncPayload` from `usermsg.Appdata`.
4. Dedup check: call `IsDeviceSyncSeen(payload.DedupId)`. If yes, skip.
5. Mark seen: `MarkDeviceSyncSeen(payload.DedupId)`.
6. **Persist DR state** — after decryption, if `devPeer.DrRootKey != ""`, call `identity.OwnedDevices.StorePeer(devPeer)` (or the equivalent Badger-backed store) to persist the updated `DrStateJson`. This mirrors what `ConsumeInboxFile` does for regular peers.
7. Dispatch by `payload.SyncType`:
- `"msg"`: find the local peer by `payload.PeerUid`, call `client.StoreDeviceSyncedMessage(peer, payload.DbMessage)`.
- `"status"`: update the status fields in the existing DB row matched by `payload.DbMessage.Status.Uuid`.
- `"peer_update"`: apply `payload.PeerData` to the local peer record (see Phase 6).
- `"identity_update"`: apply `payload.IdentityData` to the local identity profile (see Phase 6).
#### 4.4 `StoreDeviceSyncedMessage` in `client/messagestorage.go`
A thin wrapper around `storeMessage` that:
- Marks the message as synced (a new bool field `Synced` in `DbMessage`, or use a naming convention in `DbMessage.Appdata`).
- Does **not** trigger a second round of sync dispatch (no re-broadcast).
- Handles absent file paths gracefully (files are not synced, only metadata).
---
### Phase 6 — Peer Metadata and Identity Profile Sync
**Files to touch:** `client/helpers/deviceHelper.go`, `client/helpers/deviceSyncHelper.go`, `client/identity.go`
The goal is to propagate non-message data across sibling devices: peer names/avatars/settings and the identity profile. This is **one-directional fan-out** (whichever device makes the change dispatches to all siblings) — no merge protocol is needed because conflicts are resolved by last-write-wins (the dedupId carries a timestamp or UUID sufficient for dedup; ordering is not guaranteed but is acceptable for profile data).
#### 6.1 Peer metadata sync (`sync_type = "peer_update"`)
Dispatch a `"peer_update"` payload whenever a peer record is meaningfully mutated (name, avatar, notification settings, visibility, blocked state, etc.).
**Payload**: `DeviceSyncPayload.PeerData` is a JSON-encoded **full `Peer` struct**, including all private key material and DR state. This is safe because:
- The device sync channel is E2E-encrypted with the same X25519 + sym + DR stack as peer messages.
- The target server is user-owned; the operator is the user themselves.
- The recipient is the same person on a different device.
Fields included in `PeerData`:
- All keypairs in full: `MyIdentity`, `MyEncryptionKp`, `MyLookupKp` (private + public)
- `MySymKey` — shared symmetric key for that peer's channel
- `DrKpPrivate`, `DrKpPublic`, `DrRootKey`, `DrInitiator`, `ContactDrPublicKey`
- **`DrStateJson`** — current live DR session state (see DR note below)
- All contact keys: `ContactPublicKey`, `ContactEncryption`, `ContactLookupKey`, `ContactPullServers`
- All metadata: `Name`, `Avatar`, `Avatars`, `MyName`, `Visible`, `Blocked`, `MessageNotification`, `SendDeliveryAck`, `SendProcessingAck`, `CallsAllowed`, server lists, etc.
Fields excluded from `PeerData`:
- `dbPassword` — transient in-memory field, never serialised; the receiving device uses its own memory password.
The receiving device upserts the peer into its local `Peers` store. After applying the sync, the sibling device is a full participant in the conversation: it can send and receive messages using the replicated keypairs, has the same DR session state, and monitors the same lookup key queues.
**DR state sync (Phase 6 only)**: Syncing `DrStateJson` as part of `"peer_update"` gives sibling devices a working DR session at the point of pairing and keeps them in sync during normal single-active-device use. Phase 7 supersedes this with independent per-device DR sessions, eliminating all shared-state concerns. If Phase 7 is implemented, the `DrStateJson` field in `PeerData` can be omitted from the sync payload (each device initialises its own fresh session via the device introduction flow).
**New peers**: When Device A completes an invitation with a new contact, it dispatches `"peer_update"` to all siblings with the full peer record. Device B immediately becomes a full participant — same keypairs, same lookup key, same DR session start state — and can transparently send and receive messages with that contact without any secondary invitation.
#### 6.2 Identity profile sync (`sync_type = "identity_update"`)
Dispatch an `"identity_update"` whenever `Identity.Nickname`, `Identity.DefaultAvatar`, `Identity.Avatars`, or `Identity.Status` changes.
**Payload**: `DeviceSyncPayload.IdentityData` is a JSON-encoded subset of `Identity`:
```go
type IdentityProfileSnapshot struct {
Nickname string `json:"nickname"`
DefaultAvatar string `json:"default_avatar"`
Avatars []Avatar `json:"avatars"`
Status string `json:"status"`
}
```
The receiving device deserialises this and updates only the listed fields on its local `Identity`, then calls `identity.Save()`.
**Explicitly NOT synced** in `IdentityData`:
- `RootKp` — the user's root signing keypair is the trust anchor; it should be established once per identity creation and never transmitted, even over a secure channel. Compromise of the root key invalidates the entire identity.
- `Device` — device-specific keypair for server auth; each device has its own.
- `OwnedDevices` — the device mesh itself; managed separately by the pairing flow.
- `HiddenPeers` — sensitive by design; out of scope.
- `DefaultDbPassword`, `DbPasswordStore` — local security preferences.
- `MessageServers` / `Peers` — covered by their own sync types (`"server_add"`, `"peer_update"`).
#### 6.3 Server list sync (future — `sync_type = "server_add"`)
When a new `MessageServer` is added to one device's `MessageServers`, dispatch `"server_add"` so all siblings discover it. Implementation deferred; placeholder `sync_type` reserved.
#### 6.4 Dispatch hooks
- After `Identity.InvitePeer` / `FinalizeInvitation` / any peer metadata update: call `DispatchSyncToDevices(..., "peer_update", peer.Uid, nil, uuid.New().String())`.
- After `Identity.Save()` when profile fields changed: call `DispatchSyncToDevices(..., "identity_update", "", nil, uuid.New().String())`.
---
### Phase 7 — Per-Device DR Sessions (Bullet-proof Forward Secrecy)
**Goal**: Eliminate the concurrent-send DR race without shared ratchet state and without leaking device count to contacts.
#### 7.0 Privacy constraint
The naive per-device DR approach (introduce all devices to all contacts) has a fundamental privacy problem: every contact learns how many devices you own and receives session material for each. This leaks metadata — device count, device rotation events, possibly device fingerprints. This is unacceptable for a privacy-first library.
Two architecturally sound options are described below. **Option B (primary device relay) is recommended** because it preserves complete contact-side opacity and requires no protocol extension on the contact side.
---
#### Option A — Contact-aware per-device sessions (not recommended)
Each device is introduced to all contacts via a `"device_introduce"` message. The contact maintains one independent DR session per device and sends a separate encrypted copy per device on every message.
| Property | Value |
|---|---|
| DR race | ❌ Eliminated |
| Contact privacy | ❌ Contacts learn device count and session keys |
| Contact protocol change | ✅ Required (handle `DeviceInfo` list, multi-destination send) |
| Backward compatibility | ❌ Old clients can't participate |
| Server changes | ✅ None |
This is Signal's model. It is appropriate when contacts are expected to be aware of device multiplicity (e.g. a closed ecosystem). It is **not** appropriate for meowlib's open, privacy-first design.
---
#### Option B — Primary device relay (recommended)
The device that owns the peer relationship (the one whose keypairs are in the `Peer` record — call it the **primary**) is the only device that ever communicates directly with a contact. Its DR session with the contact is singular, unshared, and advances normally.
Sibling devices that want to send a message do so by dispatching a `"forward"` device sync payload to the primary. The primary re-encrypts with the contact's keys and forwards. From the contact's perspective: one sender, one DR session, zero device awareness.
| Property | Value |
|---|---|
| DR race | ❌ Eliminated (only primary drives the DR session) |
| Contact privacy | ✅ Contact is completely unaware of sibling devices |
| Contact protocol change | ✅ None required |
| Backward compatibility | ✅ Full |
| Server changes | ✅ None |
| Trade-off | If primary is offline, sibling outbound messages queue until it returns |
##### 7.1 Primary device designation
The device that completes the invitation flow for a peer (calls `InvitePeer` or `FinalizeInvitation`) is the primary for that peer. The `Peer` record synced to sibling devices carries a `PrimaryDeviceUid string` field (the UID of the device peer that "owns" this peer relationship):
```go
// Add to Peer struct:
PrimaryDeviceUid string `json:"primary_device_uid,omitempty"`
// empty = this device IS the primary for this peer
```
When a sibling device receives a `"peer_update"` sync, it sets `PrimaryDeviceUid` to the sender's device UID. When the primary device sends a peer update, it leaves `PrimaryDeviceUid` empty (it is the primary).
##### 7.2 New sync type: `"forward"`
Add to `DeviceSyncPayload.sync_type`:
```
"forward" — sibling device requests primary to send a message to a peer on its behalf
```
New fields needed in `DeviceSyncPayload`:
```protobuf
bytes forward_payload = 7; // serialized UserMessage (plaintext, will be encrypted by primary)
string forward_peer_uid = 8; // local peer UID on the primary device to forward to
```
##### 7.3 Send path on a sibling device
When a sibling device (one where `peer.PrimaryDeviceUid != ""`) sends a message to peer P:
1. Build the `UserMessage` normally.
2. **Do not** call `peer.ProcessOutboundUserMessage` — the sibling does not have a valid DR state for the contact.
3. Serialize the `UserMessage` (plaintext proto bytes).
4. Build a `DeviceSyncPayload{SyncType: "forward", ForwardPayload: serialized, ForwardPeerUid: peer.Uid}`.
5. Dispatch it to the primary device via the normal device sync send path.
6. Store the message locally with a `"pending_forward"` status so the UI reflects it immediately.
##### 7.4 Receive and forward path on the primary device
When `ConsumeDeviceSyncMessage` on the primary sees `sync_type == "forward"`:
1. Deserialize `ForwardPayload` into a `UserMessage`.
2. Locate the local peer by `ForwardPeerUid`.
3. Call `peer.ProcessOutboundUserMessage(userMessage)` — primary uses its DR session normally.
4. Enqueue a `SendJob` to deliver to the contact's server (same path as any outbound message).
5. Dispatch a `"msg"` sync back to all siblings with the now-stored `DbMessage` so they update the message status from `"pending_forward"` to sent.
##### 7.5 Offline queuing
If the primary device is offline when the sibling dispatches a `"forward"` sync, the sync message sits in the device sync queue on the server (same Redis sorted-set as all device messages). When the primary comes back online and polls, it picks up the forwarded message and delivers it. No message is lost; latency equals the primary's offline window.
##### 7.6 Result
- Zero contact protocol changes. Contacts cannot distinguish a primary-only device from a multi-device user.
- No device count leakage. Device topology is fully opaque to the outside world.
- No DR race. The primary drives a single ratchet per contact.
- No server changes.
- `ProcessOutboundUserMessage` signature stays `(*PackedUserMessage, error)` — no ripple through callers.
- Trade-off is well-bounded: forward latency ≤ primary polling interval, which is already the existing long-poll timeout.
---
### Phase 5 — Dedup Store
**Files to touch:** new `client/devicesyncdedup.go`
A single SQLite DB per identity folder: `{StoragePath}/{IdentityUuid}/devicesync.db`.
Schema:
```sql
CREATE TABLE IF NOT EXISTS seen (
id TEXT NOT NULL PRIMARY KEY,
seen_at INTEGER NOT NULL
);
```
Functions:
```go
func IsDeviceSyncSeen(storagePath, identityUuid, dedupId string) (bool, error)
func MarkDeviceSyncSeen(storagePath, identityUuid, dedupId string) error
func PruneDeviceSyncSeen(storagePath, identityUuid string, olderThan time.Duration) error
```
`PruneDeviceSyncSeen` is called periodically (e.g. weekly) from the background thread to remove entries older than 30 days.
---
## File Change Summary
| File | Change |
|---|---|
| `pb/messages.proto` | Add `DeviceSyncPayload` message (with `peer_data` and `identity_data` fields) |
| `pb/protogen.sh` → re-run | Regenerate `.pb.go` |
| `client/identity.go` | Add `InitDevicePairing`, `AnswerDevicePairing`, `FinalizeDevicePairing`; add `DeviceStorage PeerStorage` field; extend `GetRequestJobs()`; add profile-change dispatch hooks |
| `client/peer.go` | No changes needed (Type field already exists) |
| `client/messagestorage.go` | Add `StoreDeviceSyncedMessage` |
| `client/devicesyncdedup.go` | **New** — dedup SQLite helpers |
| `client/helpers/deviceHelper.go` | **New**`BuildDeviceSyncMessage`, `DispatchSyncToDevices` (msg + peer_update + identity_update), pairing message helpers |
| `client/helpers/deviceSyncHelper.go` | **New**`ConsumeDeviceSyncMessage` (handles all sync types) |
| `client/helpers/messageHelper.go` | Add `DispatchSyncToDevices` call after outbound store; detect primary vs sibling role on send |
| `client/helpers/bgPollHelper.go` | Add device message detection in `ConsumeInboxFile` |
| `client/peer.go` | Add `PrimaryDeviceUid string` field; sibling send path dispatches `"forward"` instead of direct send |
| `client/helpers/deviceSyncHelper.go` | Handle `"forward"` sync type: deserialize, re-encrypt, enqueue SendJob, dispatch `"msg"` sync back |
Server package: **no changes required**.
---
## Sync Scope
| Data | Synced | Notes |
|---|---|---|
| Message text / data | ✅ | In `DbMessage.Data` |
| Outbound flag | ✅ | In `DbMessage.Outbound` |
| Message UUID | ✅ | Via `ConversationStatus.Uuid` |
| Sent/received timestamps | ✅ | In `ConversationStatus` |
| File content | ❌ | Not synced; only `FilePaths` metadata synced |
| Peer full keypairs (private + public) | ✅ | Phase 6 — included in `"peer_update"` `PeerData`; channel is E2E-encrypted on user-owned server |
| Peer symmetric key | ✅ | Phase 6 — included in `"peer_update"` `PeerData` |
| Peer DR session state (`DrStateJson`) | ✅ | Phase 6 — synced on peer_update; Phase 7 (Option B) eliminates the need: primary drives one DR session, siblings never touch it |
| Peer metadata (name, avatar, settings) | ✅ | Phase 6 — `"peer_update"` sync type |
| New peer (unknown to sibling) | ✅ | Full peer record synced; sibling becomes immediate full participant |
| Identity profile (nickname, avatar, status) | ✅ | Phase 6 — `"identity_update"` sync type |
| Identity root keypair (`RootKp`) | ❌ | Trust anchor; never transmitted even over secure channel |
| Known/message server list | ⚠️ | Future — `"server_add"` placeholder reserved |
| Hidden peers | ❌ | Hidden by design; out of scope |
| Device keypair | ❌ | Per-device; each device authenticates to servers with its own key |
---
## Privacy Properties
- Device sync messages are end-to-end encrypted (same X25519 + sym + DR stack as peer messages).
- The server sees only the device lookup key as destination; it cannot distinguish sync messages from peer messages.
- Including device lookup keys in batch pull requests does not leak which device belongs to you (same privacy model as multiple peer lookup keys per request).
- `OwnedDevices` peers should be treated as "hidden" (not shown in contact lists) and stored in the device storage, separate from regular peers.
- **Contacts are never made aware of device count or device identity** (Phase 7 Option B). The primary device relay model means the outside world observes exactly one sender per user identity, regardless of how many devices are active.
- The device mesh topology (which devices exist, how many) is known only to the user's own devices, and is carried exclusively over the E2E-encrypted device sync channel on the user-owned server.
---
## Testing Strategy
1. **Unit tests** for `DeviceSyncPayload` serialisation round-trip.
2. **Unit tests** for dedup store (seen/mark/prune lifecycle).
3. **Integration test** extending `TestEndToEnd`:
- Create identity, two device peers (DeviceA, DeviceB).
- Send a message on DeviceA.
- Verify DeviceB's DB contains the synced message after `ConsumeDeviceSyncMessage`.
- Resend the same dedup_id — verify no duplicate row created.
4. **Integration test** for inbound sync:
- DeviceA receives a peer message.
- Verify DeviceB gets the sync and stores it correctly.

1
go.mod
View File

@@ -40,6 +40,7 @@ require (
github.com/onsi/ginkgo v1.16.5 // indirect github.com/onsi/ginkgo v1.16.5 // indirect
github.com/onsi/gomega v1.30.0 // indirect github.com/onsi/gomega v1.30.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/status-im/doubleratchet v3.0.0+incompatible // indirect
github.com/twitchtv/twirp v8.1.3+incompatible // indirect github.com/twitchtv/twirp v8.1.3+incompatible // indirect
github.com/yuin/gopher-lua v1.1.1 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect
golang.org/x/crypto v0.41.0 // indirect golang.org/x/crypto v0.41.0 // indirect

2
go.sum
View File

@@ -213,6 +213,8 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/status-im/doubleratchet v3.0.0+incompatible h1:aJ1ejcSERpSzmWZBgtfYtiU2nF0Q8ZkGyuEPYETXkCY=
github.com/status-im/doubleratchet v3.0.0+incompatible/go.mod h1:1sqR0+yhiM/bd+wrdX79AOt2csZuJOni0nUDzKNuqOU=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=

View File

@@ -669,7 +669,7 @@ func (x *FromServerMessage) GetContactCard() []*ContactCard {
type MatriochkaServer struct { type MatriochkaServer struct {
state protoimpl.MessageState `protogen:"open.v1"` state protoimpl.MessageState `protogen:"open.v1"`
Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` // Server Url Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` // Server Url
PublicKey string `protobuf:"bytes,2,opt,name=publicKey,proto3" json:"publicKey,omitempty"` // Server Public Key PublicKey string `protobuf:"bytes,2,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` // Server Public Key
Uuid string `protobuf:"bytes,3,opt,name=uuid,proto3" json:"uuid,omitempty"` // Optional, uuid for delivery confirmation Uuid string `protobuf:"bytes,3,opt,name=uuid,proto3" json:"uuid,omitempty"` // Optional, uuid for delivery confirmation
Delay int32 `protobuf:"varint,4,opt,name=delay,proto3" json:"delay,omitempty"` // Max delay requested for message forwarding or delivery tracking Delay int32 `protobuf:"varint,4,opt,name=delay,proto3" json:"delay,omitempty"` // Max delay requested for message forwarding or delivery tracking
unknownFields protoimpl.UnknownFields unknownFields protoimpl.UnknownFields
@@ -736,7 +736,7 @@ func (x *MatriochkaServer) GetDelay() int32 {
type Matriochka struct { type Matriochka struct {
state protoimpl.MessageState `protogen:"open.v1"` state protoimpl.MessageState `protogen:"open.v1"`
LookupKey string `protobuf:"bytes,1,opt,name=lookupKey,proto3" json:"lookupKey,omitempty"` // Optional, only if you want delivery tracking, less stealth LookupKey string `protobuf:"bytes,1,opt,name=lookup_key,json=lookupKey,proto3" json:"lookup_key,omitempty"` // Optional, only if you want delivery tracking, less stealth
Prev *MatriochkaServer `protobuf:"bytes,2,opt,name=prev,proto3" json:"prev,omitempty"` // Optional, like above Prev *MatriochkaServer `protobuf:"bytes,2,opt,name=prev,proto3" json:"prev,omitempty"` // Optional, like above
Next *MatriochkaServer `protobuf:"bytes,3,opt,name=next,proto3" json:"next,omitempty"` // Next server to deliver the message to Next *MatriochkaServer `protobuf:"bytes,3,opt,name=next,proto3" json:"next,omitempty"` // Next server to deliver the message to
Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` // Matriochka data Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` // Matriochka data
@@ -902,10 +902,13 @@ type ContactCard struct {
ContactPublicKey string `protobuf:"bytes,2,opt,name=contact_public_key,json=contactPublicKey,proto3" json:"contact_public_key,omitempty"` // contact public key, will be used to authenticate her/his messages ContactPublicKey string `protobuf:"bytes,2,opt,name=contact_public_key,json=contactPublicKey,proto3" json:"contact_public_key,omitempty"` // contact public key, will be used to authenticate her/his messages
EncryptionPublicKey string `protobuf:"bytes,3,opt,name=encryption_public_key,json=encryptionPublicKey,proto3" json:"encryption_public_key,omitempty"` // public key you must use to to write encrypted messages to that contact EncryptionPublicKey string `protobuf:"bytes,3,opt,name=encryption_public_key,json=encryptionPublicKey,proto3" json:"encryption_public_key,omitempty"` // public key you must use to to write encrypted messages to that contact
LookupPublicKey string `protobuf:"bytes,4,opt,name=lookup_public_key,json=lookupPublicKey,proto3" json:"lookup_public_key,omitempty"` // public key you will use as "destination identifier" for her/him to lookup for your messages on the servers LookupPublicKey string `protobuf:"bytes,4,opt,name=lookup_public_key,json=lookupPublicKey,proto3" json:"lookup_public_key,omitempty"` // public key you will use as "destination identifier" for her/him to lookup for your messages on the servers
PullServers []*ServerCard `protobuf:"bytes,5,rep,name=pull_servers,json=pullServers,proto3" json:"pull_servers,omitempty"` // list the servers where the contact will look for messages from you SymetricKey string `protobuf:"bytes,5,opt,name=symetric_key,json=symetricKey,proto3" json:"symetric_key,omitempty"` // agreed key for payload symetric encryption
Version uint32 `protobuf:"varint,6,opt,name=version,proto3" json:"version,omitempty"` PullServers []*ServerCard `protobuf:"bytes,6,rep,name=pull_servers,json=pullServers,proto3" json:"pull_servers,omitempty"` // list the servers where the contact will look for messages from you
InvitationId string `protobuf:"bytes,7,opt,name=invitation_id,json=invitationId,proto3" json:"invitation_id,omitempty"` Version uint32 `protobuf:"varint,7,opt,name=version,proto3" json:"version,omitempty"`
InvitationMessage string `protobuf:"bytes,8,opt,name=invitation_message,json=invitationMessage,proto3" json:"invitation_message,omitempty"` InvitationId string `protobuf:"bytes,8,opt,name=invitation_id,json=invitationId,proto3" json:"invitation_id,omitempty"`
InvitationMessage string `protobuf:"bytes,9,opt,name=invitation_message,json=invitationMessage,proto3" json:"invitation_message,omitempty"`
DrRootKey string `protobuf:"bytes,10,opt,name=dr_root_key,json=drRootKey,proto3" json:"dr_root_key,omitempty"` // DR pre-shared root key (base64, 32 bytes)
DrPublicKey string `protobuf:"bytes,11,opt,name=dr_public_key,json=drPublicKey,proto3" json:"dr_public_key,omitempty"` // DR DH public key of the initiator (base64)
unknownFields protoimpl.UnknownFields unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
} }
@@ -968,6 +971,13 @@ func (x *ContactCard) GetLookupPublicKey() string {
return "" return ""
} }
func (x *ContactCard) GetSymetricKey() string {
if x != nil {
return x.SymetricKey
}
return ""
}
func (x *ContactCard) GetPullServers() []*ServerCard { func (x *ContactCard) GetPullServers() []*ServerCard {
if x != nil { if x != nil {
return x.PullServers return x.PullServers
@@ -996,14 +1006,29 @@ func (x *ContactCard) GetInvitationMessage() string {
return "" return ""
} }
func (x *ContactCard) GetDrRootKey() string {
if x != nil {
return x.DrRootKey
}
return ""
}
func (x *ContactCard) GetDrPublicKey() string {
if x != nil {
return x.DrPublicKey
}
return ""
}
// structure for sending a message to be forwarded to another user in protobuf format // structure for sending a message to be forwarded to another user in protobuf format
type PackedUserMessage struct { type PackedUserMessage struct {
state protoimpl.MessageState `protogen:"open.v1"` state protoimpl.MessageState `protogen:"open.v1"`
Destination string `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` // the peer's current conversation lookup public key Destination string `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` // the peer's current conversation lookup public key
Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` // the message UserMessage encrypted with the destination peer's public key Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` // the message UserMessage encrypted with the destination peer's public key
Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"` // the payload signature with the client identity private key Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"` // the payload signature with the client identity private key
ServerTimestamp []int64 `protobuf:"varint,4,rep,packed,name=serverTimestamp,proto3" json:"serverTimestamp,omitempty"` // server time stamp, might be several in matriochka mode ServerTimestamp []int64 `protobuf:"varint,4,rep,packed,name=server_timestamp,json=serverTimestamp,proto3" json:"server_timestamp,omitempty"` // server time stamp, might be several in matriochka mode
ServerDeliveryUuid string `protobuf:"bytes,5,opt,name=server_delivery_uuid,json=serverDeliveryUuid,proto3" json:"server_delivery_uuid,omitempty"` // message uuid, for server delivery tracking, omitted if not delivery tracking desired ServerDeliveryUuid string `protobuf:"bytes,5,opt,name=server_delivery_uuid,json=serverDeliveryUuid,proto3" json:"server_delivery_uuid,omitempty"` // message uuid, for server delivery tracking, omitted if not delivery tracking desired
DrHeader []byte `protobuf:"bytes,6,opt,name=dr_header,json=drHeader,proto3" json:"dr_header,omitempty"` // serialized doubleratchet MessageHeader; empty = no DR layer
unknownFields protoimpl.UnknownFields unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
} }
@@ -1073,16 +1098,24 @@ func (x *PackedUserMessage) GetServerDeliveryUuid() string {
return "" return ""
} }
func (x *PackedUserMessage) GetDrHeader() []byte {
if x != nil {
return x.DrHeader
}
return nil
}
type ConversationStatus struct { type ConversationStatus struct {
state protoimpl.MessageState `protogen:"open.v1"` state protoimpl.MessageState `protogen:"open.v1"`
Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` // uuid of message, or uuid of related message if uuid_action is not empty
AnswerToUuid string `protobuf:"bytes,2,opt,name=answer_to_uuid,json=answerToUuid,proto3" json:"answer_to_uuid,omitempty"` // message is an answer to another one, specify uuid here Reactions []*Reaction `protobuf:"bytes,2,rep,name=reactions,proto3" json:"reactions,omitempty"` // empty => normal message, 1: receivedack, 2: processedack, 3:reaction
LocalSequence uint64 `protobuf:"varint,3,opt,name=localSequence,proto3" json:"localSequence,omitempty"` // seq number in local conversation for custom reordering ReplyToUuid string `protobuf:"bytes,3,opt,name=reply_to_uuid,json=replyToUuid,proto3" json:"reply_to_uuid,omitempty"` // this message replies to the specified uuid
Sent uint64 `protobuf:"varint,4,opt,name=sent,proto3" json:"sent,omitempty"` // timestamp of the message sent LocalSequence uint64 `protobuf:"varint,4,opt,name=local_sequence,json=localSequence,proto3" json:"local_sequence,omitempty"` // seq number in local conversation for custom reordering
Received uint64 `protobuf:"varint,5,opt,name=received,proto3" json:"received,omitempty"` // timestamp of the message received Sent uint64 `protobuf:"varint,5,opt,name=sent,proto3" json:"sent,omitempty"` // timestamp of the message sent
Processed uint64 `protobuf:"varint,6,opt,name=processed,proto3" json:"processed,omitempty"` // timestamp of the message processed Received uint64 `protobuf:"varint,6,opt,name=received,proto3" json:"received,omitempty"` // timestamp of the message received
MyNextIdentity *ContactCard `protobuf:"bytes,7,opt,name=my_next_identity,json=myNextIdentity,proto3" json:"my_next_identity,omitempty"` Processed uint64 `protobuf:"varint,7,opt,name=processed,proto3" json:"processed,omitempty"` // timestamp of the message processed
PeerNextIdentityAck int32 `protobuf:"varint,8,opt,name=peer_next_identityAck,json=peerNextIdentityAck,proto3" json:"peer_next_identityAck,omitempty"` // version of the new peer accepted id MyNextIdentity *ContactCard `protobuf:"bytes,8,opt,name=my_next_identity,json=myNextIdentity,proto3" json:"my_next_identity,omitempty"`
PeerNextIdentityAck int32 `protobuf:"varint,9,opt,name=peer_next_identity_ack,json=peerNextIdentityAck,proto3" json:"peer_next_identity_ack,omitempty"` // version of the new peer accepted id
unknownFields protoimpl.UnknownFields unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
} }
@@ -1124,9 +1157,16 @@ func (x *ConversationStatus) GetUuid() string {
return "" return ""
} }
func (x *ConversationStatus) GetAnswerToUuid() string { func (x *ConversationStatus) GetReactions() []*Reaction {
if x != nil { if x != nil {
return x.AnswerToUuid return x.Reactions
}
return nil
}
func (x *ConversationStatus) GetReplyToUuid() string {
if x != nil {
return x.ReplyToUuid
} }
return "" return ""
} }
@@ -1173,6 +1213,58 @@ func (x *ConversationStatus) GetPeerNextIdentityAck() int32 {
return 0 return 0
} }
type Reaction struct {
state protoimpl.MessageState `protogen:"open.v1"`
Reaction string `protobuf:"bytes,1,opt,name=reaction,proto3" json:"reaction,omitempty"`
ContactUuid string `protobuf:"bytes,2,opt,name=contact_uuid,json=contactUuid,proto3" json:"contact_uuid,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Reaction) Reset() {
*x = Reaction{}
mi := &file_messages_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Reaction) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Reaction) ProtoMessage() {}
func (x *Reaction) ProtoReflect() protoreflect.Message {
mi := &file_messages_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Reaction.ProtoReflect.Descriptor instead.
func (*Reaction) Descriptor() ([]byte, []int) {
return file_messages_proto_rawDescGZIP(), []int{13}
}
func (x *Reaction) GetReaction() string {
if x != nil {
return x.Reaction
}
return ""
}
func (x *Reaction) GetContactUuid() string {
if x != nil {
return x.ContactUuid
}
return ""
}
type Group struct { type Group struct {
state protoimpl.MessageState `protogen:"open.v1"` state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
@@ -1183,7 +1275,7 @@ type Group struct {
func (x *Group) Reset() { func (x *Group) Reset() {
*x = Group{} *x = Group{}
mi := &file_messages_proto_msgTypes[13] mi := &file_messages_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -1195,7 +1287,7 @@ func (x *Group) String() string {
func (*Group) ProtoMessage() {} func (*Group) ProtoMessage() {}
func (x *Group) ProtoReflect() protoreflect.Message { func (x *Group) ProtoReflect() protoreflect.Message {
mi := &file_messages_proto_msgTypes[13] mi := &file_messages_proto_msgTypes[14]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -1208,7 +1300,7 @@ func (x *Group) ProtoReflect() protoreflect.Message {
// Deprecated: Use Group.ProtoReflect.Descriptor instead. // Deprecated: Use Group.ProtoReflect.Descriptor instead.
func (*Group) Descriptor() ([]byte, []int) { func (*Group) Descriptor() ([]byte, []int) {
return file_messages_proto_rawDescGZIP(), []int{13} return file_messages_proto_rawDescGZIP(), []int{14}
} }
func (x *Group) GetName() string { func (x *Group) GetName() string {
@@ -1234,7 +1326,7 @@ type UserMessage struct {
Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
Status *ConversationStatus `protobuf:"bytes,5,opt,name=status,proto3" json:"status,omitempty"` Status *ConversationStatus `protobuf:"bytes,5,opt,name=status,proto3" json:"status,omitempty"`
Contact *ContactCard `protobuf:"bytes,6,opt,name=contact,proto3" json:"contact,omitempty"` Contact *ContactCard `protobuf:"bytes,6,opt,name=contact,proto3" json:"contact,omitempty"`
KnownServers *ServerCard `protobuf:"bytes,7,opt,name=knownServers,proto3" json:"knownServers,omitempty"` KnownServers *ServerCard `protobuf:"bytes,7,opt,name=known_servers,json=knownServers,proto3" json:"known_servers,omitempty"`
Group *Group `protobuf:"bytes,8,opt,name=group,proto3" json:"group,omitempty"` Group *Group `protobuf:"bytes,8,opt,name=group,proto3" json:"group,omitempty"`
Files []*File `protobuf:"bytes,9,rep,name=files,proto3" json:"files,omitempty"` Files []*File `protobuf:"bytes,9,rep,name=files,proto3" json:"files,omitempty"`
CurrentLocation *Location `protobuf:"bytes,10,opt,name=current_location,json=currentLocation,proto3" json:"current_location,omitempty"` CurrentLocation *Location `protobuf:"bytes,10,opt,name=current_location,json=currentLocation,proto3" json:"current_location,omitempty"`
@@ -1247,7 +1339,7 @@ type UserMessage struct {
func (x *UserMessage) Reset() { func (x *UserMessage) Reset() {
*x = UserMessage{} *x = UserMessage{}
mi := &file_messages_proto_msgTypes[14] mi := &file_messages_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -1259,7 +1351,7 @@ func (x *UserMessage) String() string {
func (*UserMessage) ProtoMessage() {} func (*UserMessage) ProtoMessage() {}
func (x *UserMessage) ProtoReflect() protoreflect.Message { func (x *UserMessage) ProtoReflect() protoreflect.Message {
mi := &file_messages_proto_msgTypes[14] mi := &file_messages_proto_msgTypes[15]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -1272,7 +1364,7 @@ func (x *UserMessage) ProtoReflect() protoreflect.Message {
// Deprecated: Use UserMessage.ProtoReflect.Descriptor instead. // Deprecated: Use UserMessage.ProtoReflect.Descriptor instead.
func (*UserMessage) Descriptor() ([]byte, []int) { func (*UserMessage) Descriptor() ([]byte, []int) {
return file_messages_proto_rawDescGZIP(), []int{14} return file_messages_proto_rawDescGZIP(), []int{15}
} }
func (x *UserMessage) GetDestination() string { func (x *UserMessage) GetDestination() string {
@@ -1378,7 +1470,7 @@ type File struct {
func (x *File) Reset() { func (x *File) Reset() {
*x = File{} *x = File{}
mi := &file_messages_proto_msgTypes[15] mi := &file_messages_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -1390,7 +1482,7 @@ func (x *File) String() string {
func (*File) ProtoMessage() {} func (*File) ProtoMessage() {}
func (x *File) ProtoReflect() protoreflect.Message { func (x *File) ProtoReflect() protoreflect.Message {
mi := &file_messages_proto_msgTypes[15] mi := &file_messages_proto_msgTypes[16]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -1403,7 +1495,7 @@ func (x *File) ProtoReflect() protoreflect.Message {
// Deprecated: Use File.ProtoReflect.Descriptor instead. // Deprecated: Use File.ProtoReflect.Descriptor instead.
func (*File) Descriptor() ([]byte, []int) { func (*File) Descriptor() ([]byte, []int) {
return file_messages_proto_rawDescGZIP(), []int{15} return file_messages_proto_rawDescGZIP(), []int{16}
} }
func (x *File) GetFilename() string { func (x *File) GetFilename() string {
@@ -1446,7 +1538,7 @@ type Location struct {
func (x *Location) Reset() { func (x *Location) Reset() {
*x = Location{} *x = Location{}
mi := &file_messages_proto_msgTypes[16] mi := &file_messages_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -1458,7 +1550,7 @@ func (x *Location) String() string {
func (*Location) ProtoMessage() {} func (*Location) ProtoMessage() {}
func (x *Location) ProtoReflect() protoreflect.Message { func (x *Location) ProtoReflect() protoreflect.Message {
mi := &file_messages_proto_msgTypes[16] mi := &file_messages_proto_msgTypes[17]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -1471,7 +1563,7 @@ func (x *Location) ProtoReflect() protoreflect.Message {
// Deprecated: Use Location.ProtoReflect.Descriptor instead. // Deprecated: Use Location.ProtoReflect.Descriptor instead.
func (*Location) Descriptor() ([]byte, []int) { func (*Location) Descriptor() ([]byte, []int) {
return file_messages_proto_rawDescGZIP(), []int{16} return file_messages_proto_rawDescGZIP(), []int{17}
} }
func (x *Location) GetTime() uint64 { func (x *Location) GetTime() uint64 {
@@ -1515,13 +1607,15 @@ type DbMessage struct {
Appdata []byte `protobuf:"bytes,9,opt,name=appdata,proto3" json:"appdata,omitempty"` Appdata []byte `protobuf:"bytes,9,opt,name=appdata,proto3" json:"appdata,omitempty"`
Invitation *Invitation `protobuf:"bytes,10,opt,name=invitation,proto3" json:"invitation,omitempty"` Invitation *Invitation `protobuf:"bytes,10,opt,name=invitation,proto3" json:"invitation,omitempty"`
From string `protobuf:"bytes,11,opt,name=from,proto3" json:"from,omitempty"` // source peer uid, used when storing group conversations with more than one peer From string `protobuf:"bytes,11,opt,name=from,proto3" json:"from,omitempty"` // source peer uid, used when storing group conversations with more than one peer
ServerDeliveryUuid string `protobuf:"bytes,12,opt,name=server_delivery_uuid,json=serverDeliveryUuid,proto3" json:"server_delivery_uuid,omitempty"` // uuid returned by the server upon delivery
ServerDeliveryTimestamp uint64 `protobuf:"varint,13,opt,name=server_delivery_timestamp,json=serverDeliveryTimestamp,proto3" json:"server_delivery_timestamp,omitempty"` // timestamp of the server delivery
unknownFields protoimpl.UnknownFields unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
} }
func (x *DbMessage) Reset() { func (x *DbMessage) Reset() {
*x = DbMessage{} *x = DbMessage{}
mi := &file_messages_proto_msgTypes[17] mi := &file_messages_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -1533,7 +1627,7 @@ func (x *DbMessage) String() string {
func (*DbMessage) ProtoMessage() {} func (*DbMessage) ProtoMessage() {}
func (x *DbMessage) ProtoReflect() protoreflect.Message { func (x *DbMessage) ProtoReflect() protoreflect.Message {
mi := &file_messages_proto_msgTypes[17] mi := &file_messages_proto_msgTypes[18]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -1546,7 +1640,7 @@ func (x *DbMessage) ProtoReflect() protoreflect.Message {
// Deprecated: Use DbMessage.ProtoReflect.Descriptor instead. // Deprecated: Use DbMessage.ProtoReflect.Descriptor instead.
func (*DbMessage) Descriptor() ([]byte, []int) { func (*DbMessage) Descriptor() ([]byte, []int) {
return file_messages_proto_rawDescGZIP(), []int{17} return file_messages_proto_rawDescGZIP(), []int{18}
} }
func (x *DbMessage) GetOutbound() bool { func (x *DbMessage) GetOutbound() bool {
@@ -1626,6 +1720,20 @@ func (x *DbMessage) GetFrom() string {
return "" return ""
} }
func (x *DbMessage) GetServerDeliveryUuid() string {
if x != nil {
return x.ServerDeliveryUuid
}
return ""
}
func (x *DbMessage) GetServerDeliveryTimestamp() uint64 {
if x != nil {
return x.ServerDeliveryTimestamp
}
return 0
}
type VideoData struct { type VideoData struct {
state protoimpl.MessageState `protogen:"open.v1"` state protoimpl.MessageState `protogen:"open.v1"`
Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
@@ -1639,7 +1747,7 @@ type VideoData struct {
func (x *VideoData) Reset() { func (x *VideoData) Reset() {
*x = VideoData{} *x = VideoData{}
mi := &file_messages_proto_msgTypes[18] mi := &file_messages_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -1651,7 +1759,7 @@ func (x *VideoData) String() string {
func (*VideoData) ProtoMessage() {} func (*VideoData) ProtoMessage() {}
func (x *VideoData) ProtoReflect() protoreflect.Message { func (x *VideoData) ProtoReflect() protoreflect.Message {
mi := &file_messages_proto_msgTypes[18] mi := &file_messages_proto_msgTypes[19]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -1664,7 +1772,7 @@ func (x *VideoData) ProtoReflect() protoreflect.Message {
// Deprecated: Use VideoData.ProtoReflect.Descriptor instead. // Deprecated: Use VideoData.ProtoReflect.Descriptor instead.
func (*VideoData) Descriptor() ([]byte, []int) { func (*VideoData) Descriptor() ([]byte, []int) {
return file_messages_proto_rawDescGZIP(), []int{18} return file_messages_proto_rawDescGZIP(), []int{19}
} }
func (x *VideoData) GetUrl() string { func (x *VideoData) GetUrl() string {
@@ -1713,7 +1821,7 @@ type VideoCredential struct {
func (x *VideoCredential) Reset() { func (x *VideoCredential) Reset() {
*x = VideoCredential{} *x = VideoCredential{}
mi := &file_messages_proto_msgTypes[19] mi := &file_messages_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -1725,7 +1833,7 @@ func (x *VideoCredential) String() string {
func (*VideoCredential) ProtoMessage() {} func (*VideoCredential) ProtoMessage() {}
func (x *VideoCredential) ProtoReflect() protoreflect.Message { func (x *VideoCredential) ProtoReflect() protoreflect.Message {
mi := &file_messages_proto_msgTypes[19] mi := &file_messages_proto_msgTypes[20]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -1738,7 +1846,7 @@ func (x *VideoCredential) ProtoReflect() protoreflect.Message {
// Deprecated: Use VideoCredential.ProtoReflect.Descriptor instead. // Deprecated: Use VideoCredential.ProtoReflect.Descriptor instead.
func (*VideoCredential) Descriptor() ([]byte, []int) { func (*VideoCredential) Descriptor() ([]byte, []int) {
return file_messages_proto_rawDescGZIP(), []int{19} return file_messages_proto_rawDescGZIP(), []int{20}
} }
func (x *VideoCredential) GetUsername() string { func (x *VideoCredential) GetUsername() string {
@@ -1833,15 +1941,17 @@ const file_messages_proto_rawDesc = "" +
"\n" + "\n" +
"video_data\x18\n" + "video_data\x18\n" +
" \x01(\v2\x12.meowlib.VideoDataR\tvideoData\x127\n" + " \x01(\v2\x12.meowlib.VideoDataR\tvideoData\x127\n" +
"\fcontact_card\x18\v \x03(\v2\x14.meowlib.ContactCardR\vcontactCard\"l\n" + "\fcontact_card\x18\v \x03(\v2\x14.meowlib.ContactCardR\vcontactCard\"m\n" +
"\x10MatriochkaServer\x12\x10\n" + "\x10MatriochkaServer\x12\x10\n" +
"\x03url\x18\x01 \x01(\tR\x03url\x12\x1c\n" + "\x03url\x18\x01 \x01(\tR\x03url\x12\x1d\n" +
"\tpublicKey\x18\x02 \x01(\tR\tpublicKey\x12\x12\n" +
"\x04uuid\x18\x03 \x01(\tR\x04uuid\x12\x14\n" +
"\x05delay\x18\x04 \x01(\x05R\x05delay\"\x9c\x01\n" +
"\n" + "\n" +
"Matriochka\x12\x1c\n" + "public_key\x18\x02 \x01(\tR\tpublicKey\x12\x12\n" +
"\tlookupKey\x18\x01 \x01(\tR\tlookupKey\x12-\n" + "\x04uuid\x18\x03 \x01(\tR\x04uuid\x12\x14\n" +
"\x05delay\x18\x04 \x01(\x05R\x05delay\"\x9d\x01\n" +
"\n" +
"Matriochka\x12\x1d\n" +
"\n" +
"lookup_key\x18\x01 \x01(\tR\tlookupKey\x12-\n" +
"\x04prev\x18\x02 \x01(\v2\x19.meowlib.MatriochkaServerR\x04prev\x12-\n" + "\x04prev\x18\x02 \x01(\v2\x19.meowlib.MatriochkaServerR\x04prev\x12-\n" +
"\x04next\x18\x03 \x01(\v2\x19.meowlib.MatriochkaServerR\x04next\x12\x12\n" + "\x04next\x18\x03 \x01(\v2\x19.meowlib.MatriochkaServerR\x04next\x12\x12\n" +
"\x04data\x18\x04 \x01(\fR\x04data\"\xc3\x01\n" + "\x04data\x18\x04 \x01(\fR\x04data\"\xc3\x01\n" +
@@ -1854,42 +1964,51 @@ const file_messages_proto_rawDesc = "" +
"\x03url\x18\x04 \x01(\tR\x03url\x12\x14\n" + "\x03url\x18\x04 \x01(\tR\x03url\x12\x14\n" +
"\x05login\x18\x05 \x01(\tR\x05login\x12\x1a\n" + "\x05login\x18\x05 \x01(\tR\x05login\x12\x1a\n" +
"\bpassword\x18\x06 \x01(\tR\bpassword\x12\x1c\n" + "\bpassword\x18\x06 \x01(\tR\bpassword\x12\x1c\n" +
"\tsignature\x18\a \x01(\tR\tsignature\"\xd5\x02\n" + "\tsignature\x18\a \x01(\tR\tsignature\"\xbc\x03\n" +
"\vContactCard\x12\x12\n" + "\vContactCard\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12,\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12,\n" +
"\x12contact_public_key\x18\x02 \x01(\tR\x10contactPublicKey\x122\n" + "\x12contact_public_key\x18\x02 \x01(\tR\x10contactPublicKey\x122\n" +
"\x15encryption_public_key\x18\x03 \x01(\tR\x13encryptionPublicKey\x12*\n" + "\x15encryption_public_key\x18\x03 \x01(\tR\x13encryptionPublicKey\x12*\n" +
"\x11lookup_public_key\x18\x04 \x01(\tR\x0flookupPublicKey\x126\n" + "\x11lookup_public_key\x18\x04 \x01(\tR\x0flookupPublicKey\x12!\n" +
"\fpull_servers\x18\x05 \x03(\v2\x13.meowlib.ServerCardR\vpullServers\x12\x18\n" + "\fsymetric_key\x18\x05 \x01(\tR\vsymetricKey\x126\n" +
"\aversion\x18\x06 \x01(\rR\aversion\x12#\n" + "\fpull_servers\x18\x06 \x03(\v2\x13.meowlib.ServerCardR\vpullServers\x12\x18\n" +
"\rinvitation_id\x18\a \x01(\tR\finvitationId\x12-\n" + "\aversion\x18\a \x01(\rR\aversion\x12#\n" +
"\x12invitation_message\x18\b \x01(\tR\x11invitationMessage\"\xc9\x01\n" + "\rinvitation_id\x18\b \x01(\tR\finvitationId\x12-\n" +
"\x12invitation_message\x18\t \x01(\tR\x11invitationMessage\x12\x1e\n" +
"\vdr_root_key\x18\n" +
" \x01(\tR\tdrRootKey\x12\"\n" +
"\rdr_public_key\x18\v \x01(\tR\vdrPublicKey\"\xe7\x01\n" +
"\x11PackedUserMessage\x12 \n" + "\x11PackedUserMessage\x12 \n" +
"\vdestination\x18\x01 \x01(\tR\vdestination\x12\x18\n" + "\vdestination\x18\x01 \x01(\tR\vdestination\x12\x18\n" +
"\apayload\x18\x02 \x01(\fR\apayload\x12\x1c\n" + "\apayload\x18\x02 \x01(\fR\apayload\x12\x1c\n" +
"\tsignature\x18\x03 \x01(\fR\tsignature\x12(\n" + "\tsignature\x18\x03 \x01(\fR\tsignature\x12)\n" +
"\x0fserverTimestamp\x18\x04 \x03(\x03R\x0fserverTimestamp\x120\n" + "\x10server_timestamp\x18\x04 \x03(\x03R\x0fserverTimestamp\x120\n" +
"\x14server_delivery_uuid\x18\x05 \x01(\tR\x12serverDeliveryUuid\"\xb6\x02\n" + "\x14server_delivery_uuid\x18\x05 \x01(\tR\x12serverDeliveryUuid\x12\x1b\n" +
"\tdr_header\x18\x06 \x01(\fR\bdrHeader\"\xe7\x02\n" +
"\x12ConversationStatus\x12\x12\n" + "\x12ConversationStatus\x12\x12\n" +
"\x04uuid\x18\x01 \x01(\tR\x04uuid\x12$\n" + "\x04uuid\x18\x01 \x01(\tR\x04uuid\x12/\n" +
"\x0eanswer_to_uuid\x18\x02 \x01(\tR\fanswerToUuid\x12$\n" + "\treactions\x18\x02 \x03(\v2\x11.meowlib.ReactionR\treactions\x12\"\n" +
"\rlocalSequence\x18\x03 \x01(\x04R\rlocalSequence\x12\x12\n" + "\rreply_to_uuid\x18\x03 \x01(\tR\vreplyToUuid\x12%\n" +
"\x04sent\x18\x04 \x01(\x04R\x04sent\x12\x1a\n" + "\x0elocal_sequence\x18\x04 \x01(\x04R\rlocalSequence\x12\x12\n" +
"\breceived\x18\x05 \x01(\x04R\breceived\x12\x1c\n" + "\x04sent\x18\x05 \x01(\x04R\x04sent\x12\x1a\n" +
"\tprocessed\x18\x06 \x01(\x04R\tprocessed\x12>\n" + "\breceived\x18\x06 \x01(\x04R\breceived\x12\x1c\n" +
"\x10my_next_identity\x18\a \x01(\v2\x14.meowlib.ContactCardR\x0emyNextIdentity\x122\n" + "\tprocessed\x18\a \x01(\x04R\tprocessed\x12>\n" +
"\x15peer_next_identityAck\x18\b \x01(\x05R\x13peerNextIdentityAck\"K\n" + "\x10my_next_identity\x18\b \x01(\v2\x14.meowlib.ContactCardR\x0emyNextIdentity\x123\n" +
"\x16peer_next_identity_ack\x18\t \x01(\x05R\x13peerNextIdentityAck\"I\n" +
"\bReaction\x12\x1a\n" +
"\breaction\x18\x01 \x01(\tR\breaction\x12!\n" +
"\fcontact_uuid\x18\x02 \x01(\tR\vcontactUuid\"K\n" +
"\x05Group\x12\x12\n" + "\x05Group\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12.\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12.\n" +
"\amembers\x18\x02 \x03(\v2\x14.meowlib.ContactCardR\amembers\"\x94\x04\n" + "\amembers\x18\x02 \x03(\v2\x14.meowlib.ContactCardR\amembers\"\x95\x04\n" +
"\vUserMessage\x12 \n" + "\vUserMessage\x12 \n" +
"\vdestination\x18\x01 \x01(\tR\vdestination\x12\x12\n" + "\vdestination\x18\x01 \x01(\tR\vdestination\x12\x12\n" +
"\x04from\x18\x02 \x01(\tR\x04from\x12\x12\n" + "\x04from\x18\x02 \x01(\tR\x04from\x12\x12\n" +
"\x04type\x18\x03 \x01(\tR\x04type\x12\x12\n" + "\x04type\x18\x03 \x01(\tR\x04type\x12\x12\n" +
"\x04data\x18\x04 \x01(\fR\x04data\x123\n" + "\x04data\x18\x04 \x01(\fR\x04data\x123\n" +
"\x06status\x18\x05 \x01(\v2\x1b.meowlib.ConversationStatusR\x06status\x12.\n" + "\x06status\x18\x05 \x01(\v2\x1b.meowlib.ConversationStatusR\x06status\x12.\n" +
"\acontact\x18\x06 \x01(\v2\x14.meowlib.ContactCardR\acontact\x127\n" + "\acontact\x18\x06 \x01(\v2\x14.meowlib.ContactCardR\acontact\x128\n" +
"\fknownServers\x18\a \x01(\v2\x13.meowlib.ServerCardR\fknownServers\x12$\n" + "\rknown_servers\x18\a \x01(\v2\x13.meowlib.ServerCardR\fknownServers\x12$\n" +
"\x05group\x18\b \x01(\v2\x0e.meowlib.GroupR\x05group\x12#\n" + "\x05group\x18\b \x01(\v2\x0e.meowlib.GroupR\x05group\x12#\n" +
"\x05files\x18\t \x03(\v2\r.meowlib.FileR\x05files\x12<\n" + "\x05files\x18\t \x03(\v2\r.meowlib.FileR\x05files\x12<\n" +
"\x10current_location\x18\n" + "\x10current_location\x18\n" +
@@ -1909,7 +2028,7 @@ const file_messages_proto_rawDesc = "" +
"\x04time\x18\x01 \x01(\x04R\x04time\x12\x1a\n" + "\x04time\x18\x01 \x01(\x04R\x04time\x12\x1a\n" +
"\blatitude\x18\x02 \x01(\x02R\blatitude\x12\x1c\n" + "\blatitude\x18\x02 \x01(\x02R\blatitude\x12\x1c\n" +
"\tlongitude\x18\x03 \x01(\x02R\tlongitude\x12\x1a\n" + "\tlongitude\x18\x03 \x01(\x02R\tlongitude\x12\x1a\n" +
"\baltitude\x18\x04 \x01(\x05R\baltitude\"\x9a\x03\n" + "\baltitude\x18\x04 \x01(\x05R\baltitude\"\x88\x04\n" +
"\tDbMessage\x12\x1a\n" + "\tDbMessage\x12\x1a\n" +
"\boutbound\x18\x01 \x01(\bR\boutbound\x12\x12\n" + "\boutbound\x18\x01 \x01(\bR\boutbound\x12\x12\n" +
"\x04type\x18\x02 \x01(\tR\x04type\x12\x12\n" + "\x04type\x18\x02 \x01(\tR\x04type\x12\x12\n" +
@@ -1925,7 +2044,9 @@ const file_messages_proto_rawDesc = "" +
"invitation\x18\n" + "invitation\x18\n" +
" \x01(\v2\x13.meowlib.InvitationR\n" + " \x01(\v2\x13.meowlib.InvitationR\n" +
"invitation\x12\x12\n" + "invitation\x12\x12\n" +
"\x04from\x18\v \x01(\tR\x04from\"\xaa\x01\n" + "\x04from\x18\v \x01(\tR\x04from\x120\n" +
"\x14server_delivery_uuid\x18\f \x01(\tR\x12serverDeliveryUuid\x12:\n" +
"\x19server_delivery_timestamp\x18\r \x01(\x04R\x17serverDeliveryTimestamp\"\xaa\x01\n" +
"\tVideoData\x12\x10\n" + "\tVideoData\x12\x10\n" +
"\x03url\x18\x01 \x01(\tR\x03url\x12\x12\n" + "\x03url\x18\x01 \x01(\tR\x03url\x12\x12\n" +
"\x04room\x18\x02 \x01(\tR\x04room\x12\x1a\n" + "\x04room\x18\x02 \x01(\tR\x04room\x12\x1a\n" +
@@ -1951,7 +2072,7 @@ func file_messages_proto_rawDescGZIP() []byte {
return file_messages_proto_rawDescData return file_messages_proto_rawDescData
} }
var file_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 20) var file_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 21)
var file_messages_proto_goTypes = []any{ var file_messages_proto_goTypes = []any{
(*PackedServerMessage)(nil), // 0: meowlib.PackedServerMessage (*PackedServerMessage)(nil), // 0: meowlib.PackedServerMessage
(*Invitation)(nil), // 1: meowlib.Invitation (*Invitation)(nil), // 1: meowlib.Invitation
@@ -1966,13 +2087,14 @@ var file_messages_proto_goTypes = []any{
(*ContactCard)(nil), // 10: meowlib.ContactCard (*ContactCard)(nil), // 10: meowlib.ContactCard
(*PackedUserMessage)(nil), // 11: meowlib.PackedUserMessage (*PackedUserMessage)(nil), // 11: meowlib.PackedUserMessage
(*ConversationStatus)(nil), // 12: meowlib.ConversationStatus (*ConversationStatus)(nil), // 12: meowlib.ConversationStatus
(*Group)(nil), // 13: meowlib.Group (*Reaction)(nil), // 13: meowlib.Reaction
(*UserMessage)(nil), // 14: meowlib.UserMessage (*Group)(nil), // 14: meowlib.Group
(*File)(nil), // 15: meowlib.File (*UserMessage)(nil), // 15: meowlib.UserMessage
(*Location)(nil), // 16: meowlib.Location (*File)(nil), // 16: meowlib.File
(*DbMessage)(nil), // 17: meowlib.DbMessage (*Location)(nil), // 17: meowlib.Location
(*VideoData)(nil), // 18: meowlib.VideoData (*DbMessage)(nil), // 18: meowlib.DbMessage
(*VideoCredential)(nil), // 19: meowlib.VideoCredential (*VideoData)(nil), // 19: meowlib.VideoData
(*VideoCredential)(nil), // 20: meowlib.VideoCredential
} }
var file_messages_proto_depIdxs = []int32{ var file_messages_proto_depIdxs = []int32{
10, // 0: meowlib.Meet.contact_card:type_name -> meowlib.ContactCard 10, // 0: meowlib.Meet.contact_card:type_name -> meowlib.ContactCard
@@ -1982,38 +2104,39 @@ var file_messages_proto_depIdxs = []int32{
8, // 4: meowlib.ToServerMessage.matriochka_message:type_name -> meowlib.Matriochka 8, // 4: meowlib.ToServerMessage.matriochka_message:type_name -> meowlib.Matriochka
1, // 5: meowlib.ToServerMessage.invitation:type_name -> meowlib.Invitation 1, // 5: meowlib.ToServerMessage.invitation:type_name -> meowlib.Invitation
11, // 6: meowlib.ToServerMessage.device_messages:type_name -> meowlib.PackedUserMessage 11, // 6: meowlib.ToServerMessage.device_messages:type_name -> meowlib.PackedUserMessage
18, // 7: meowlib.ToServerMessage.video_data:type_name -> meowlib.VideoData 19, // 7: meowlib.ToServerMessage.video_data:type_name -> meowlib.VideoData
4, // 8: meowlib.ToServerMessage.credentials:type_name -> meowlib.Credentials 4, // 8: meowlib.ToServerMessage.credentials:type_name -> meowlib.Credentials
11, // 9: meowlib.FromServerMessage.chat:type_name -> meowlib.PackedUserMessage 11, // 9: meowlib.FromServerMessage.chat:type_name -> meowlib.PackedUserMessage
9, // 10: meowlib.FromServerMessage.known_servers:type_name -> meowlib.ServerCard 9, // 10: meowlib.FromServerMessage.known_servers:type_name -> meowlib.ServerCard
1, // 11: meowlib.FromServerMessage.invitation:type_name -> meowlib.Invitation 1, // 11: meowlib.FromServerMessage.invitation:type_name -> meowlib.Invitation
11, // 12: meowlib.FromServerMessage.device_messages:type_name -> meowlib.PackedUserMessage 11, // 12: meowlib.FromServerMessage.device_messages:type_name -> meowlib.PackedUserMessage
18, // 13: meowlib.FromServerMessage.video_data:type_name -> meowlib.VideoData 19, // 13: meowlib.FromServerMessage.video_data:type_name -> meowlib.VideoData
10, // 14: meowlib.FromServerMessage.contact_card:type_name -> meowlib.ContactCard 10, // 14: meowlib.FromServerMessage.contact_card:type_name -> meowlib.ContactCard
7, // 15: meowlib.Matriochka.prev:type_name -> meowlib.MatriochkaServer 7, // 15: meowlib.Matriochka.prev:type_name -> meowlib.MatriochkaServer
7, // 16: meowlib.Matriochka.next:type_name -> meowlib.MatriochkaServer 7, // 16: meowlib.Matriochka.next:type_name -> meowlib.MatriochkaServer
9, // 17: meowlib.ContactCard.pull_servers:type_name -> meowlib.ServerCard 9, // 17: meowlib.ContactCard.pull_servers:type_name -> meowlib.ServerCard
10, // 18: meowlib.ConversationStatus.my_next_identity:type_name -> meowlib.ContactCard 13, // 18: meowlib.ConversationStatus.reactions:type_name -> meowlib.Reaction
10, // 19: meowlib.Group.members:type_name -> meowlib.ContactCard 10, // 19: meowlib.ConversationStatus.my_next_identity:type_name -> meowlib.ContactCard
12, // 20: meowlib.UserMessage.status:type_name -> meowlib.ConversationStatus 10, // 20: meowlib.Group.members:type_name -> meowlib.ContactCard
10, // 21: meowlib.UserMessage.contact:type_name -> meowlib.ContactCard 12, // 21: meowlib.UserMessage.status:type_name -> meowlib.ConversationStatus
9, // 22: meowlib.UserMessage.knownServers:type_name -> meowlib.ServerCard 10, // 22: meowlib.UserMessage.contact:type_name -> meowlib.ContactCard
13, // 23: meowlib.UserMessage.group:type_name -> meowlib.Group 9, // 23: meowlib.UserMessage.known_servers:type_name -> meowlib.ServerCard
15, // 24: meowlib.UserMessage.files:type_name -> meowlib.File 14, // 24: meowlib.UserMessage.group:type_name -> meowlib.Group
16, // 25: meowlib.UserMessage.current_location:type_name -> meowlib.Location 16, // 25: meowlib.UserMessage.files:type_name -> meowlib.File
1, // 26: meowlib.UserMessage.invitation:type_name -> meowlib.Invitation 17, // 26: meowlib.UserMessage.current_location:type_name -> meowlib.Location
18, // 27: meowlib.UserMessage.video_data:type_name -> meowlib.VideoData 1, // 27: meowlib.UserMessage.invitation:type_name -> meowlib.Invitation
12, // 28: meowlib.DbMessage.status:type_name -> meowlib.ConversationStatus 19, // 28: meowlib.UserMessage.video_data:type_name -> meowlib.VideoData
10, // 29: meowlib.DbMessage.contact:type_name -> meowlib.ContactCard 12, // 29: meowlib.DbMessage.status:type_name -> meowlib.ConversationStatus
13, // 30: meowlib.DbMessage.group:type_name -> meowlib.Group 10, // 30: meowlib.DbMessage.contact:type_name -> meowlib.ContactCard
16, // 31: meowlib.DbMessage.current_location:type_name -> meowlib.Location 14, // 31: meowlib.DbMessage.group:type_name -> meowlib.Group
1, // 32: meowlib.DbMessage.invitation:type_name -> meowlib.Invitation 17, // 32: meowlib.DbMessage.current_location:type_name -> meowlib.Location
19, // 33: meowlib.VideoData.credentials:type_name -> meowlib.VideoCredential 1, // 33: meowlib.DbMessage.invitation:type_name -> meowlib.Invitation
34, // [34:34] is the sub-list for method output_type 20, // 34: meowlib.VideoData.credentials:type_name -> meowlib.VideoCredential
34, // [34:34] is the sub-list for method input_type 35, // [35:35] is the sub-list for method output_type
34, // [34:34] is the sub-list for extension type_name 35, // [35:35] is the sub-list for method input_type
34, // [34:34] is the sub-list for extension extendee 35, // [35:35] is the sub-list for extension type_name
0, // [0:34] is the sub-list for field type_name 35, // [35:35] is the sub-list for extension extendee
0, // [0:35] is the sub-list for field type_name
} }
func init() { file_messages_proto_init() } func init() { file_messages_proto_init() }
@@ -2027,7 +2150,7 @@ func file_messages_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(), GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_messages_proto_rawDesc), len(file_messages_proto_rawDesc)), RawDescriptor: unsafe.Slice(unsafe.StringData(file_messages_proto_rawDesc), len(file_messages_proto_rawDesc)),
NumEnums: 0, NumEnums: 0,
NumMessages: 20, NumMessages: 21,
NumExtensions: 0, NumExtensions: 0,
NumServices: 0, NumServices: 0,
}, },

View File

@@ -30,7 +30,7 @@ message Invitation {
string uuid = 6; // id that the friend gave you, that you should include to your reply to get recognized string uuid = 6; // id that the friend gave you, that you should include to your reply to get recognized
int64 expiry = 7; // the server allowed expiry date, it may be samller than the requested timeout according to server policy int64 expiry = 7; // the server allowed expiry date, it may be samller than the requested timeout according to server policy
int32 step = 8; // progress in the inviattion process : 1=invite friend, 2=friend requests invitation, 3=friend's answer int32 step = 8; // progress in the inviattion process : 1=invite friend, 2=friend requests invitation, 3=friend's answer
string from=9; // used in step 3 the answer public key to check the signature in user message string from = 9; // used in step 3 the answer public key to check the signature in user message
} }
// structure for requesting incoming messages // structure for requesting incoming messages
@@ -81,7 +81,6 @@ message ToServerMessage {
Credentials credentials = 13; // credentials for a new user or mandatory server creds Credentials credentials = 13; // credentials for a new user or mandatory server creds
} }
// structure defining a from server receiver message decrypted from a "packedmessage" payload // structure defining a from server receiver message decrypted from a "packedmessage" payload
message FromServerMessage { message FromServerMessage {
string type = 1; // Type string type = 1; // Type
@@ -105,13 +104,13 @@ message FromServerMessage {
message MatriochkaServer { message MatriochkaServer {
string url = 1; // Server Url string url = 1; // Server Url
string publicKey = 2; // Server Public Key string public_key = 2; // Server Public Key
string uuid = 3 ; // Optional, uuid for delivery confirmation string uuid = 3 ; // Optional, uuid for delivery confirmation
int32 delay = 4; // Max delay requested for message forwarding or delivery tracking int32 delay = 4; // Max delay requested for message forwarding or delivery tracking
} }
message Matriochka { message Matriochka {
string lookupKey = 1; // Optional, only if you want delivery tracking, less stealth string lookup_key = 1; // Optional, only if you want delivery tracking, less stealth
MatriochkaServer prev = 2; // Optional, like above MatriochkaServer prev = 2; // Optional, like above
MatriochkaServer next = 3; // Next server to deliver the message to MatriochkaServer next = 3; // Next server to deliver the message to
bytes data = 4; // Matriochka data bytes data = 4; // Matriochka data
@@ -120,51 +119,61 @@ message Matriochka {
// structure describing required server attributes // structure describing required server attributes
message ServerCard { message ServerCard {
string name = 1; // friendly server name string name = 1; // friendly server name
string description=2; // description : owner type (company/private/university...), string description = 2; // description : owner type (company/private/university...),
string public_key = 3; // public key you must use to send encrypted messages to that server string public_key = 3; // public key you must use to send encrypted messages to that server
string url = 4; // meow server url string url = 4; // meow server url
string login = 5; // required login to access the server string login = 5; // required login to access the server
string password = 6; // password associated to the login string password = 6; // password associated to the login
string signature = 7; // signature of all previous fields by the server itself string signature = 7; // signature of all previous fields by the server itself
} }
// structure describing a user contact card ie the minimum set of attributes for exchanging identities // structure describing a user contact card ie the minimum set of attributes for exchanging identities
message ContactCard { message ContactCard {
string name=1; // contact nickname string name = 1; // contact nickname
string contact_public_key =2; // contact public key, will be used to authenticate her/his messages string contact_public_key = 2; // contact public key, will be used to authenticate her/his messages
string encryption_public_key= 3; // public key you must use to to write encrypted messages to that contact string encryption_public_key = 3; // public key you must use to to write encrypted messages to that contact
string lookup_public_key =4; // public key you will use as "destination identifier" for her/him to lookup for your messages on the servers string lookup_public_key = 4; // public key you will use as "destination identifier" for her/him to lookup for your messages on the servers
repeated ServerCard pull_servers =5; // list the servers where the contact will look for messages from you string symetric_key = 5; // agreed key for payload symetric encryption
uint32 version = 6; repeated ServerCard pull_servers = 6; // list the servers where the contact will look for messages from you
string invitation_id=7; uint32 version = 7;
string invitation_message=8; string invitation_id = 8;
string invitation_message = 9;
string dr_root_key = 10; // DR pre-shared root key (base64, 32 bytes)
string dr_public_key = 11; // DR DH public key of the initiator (base64)
} }
// structure for sending a message to be forwarded to another user in protobuf format // structure for sending a message to be forwarded to another user in protobuf format
message PackedUserMessage { message PackedUserMessage {
string destination=1; // the peer's current conversation lookup public key string destination = 1; // the peer's current conversation lookup public key
bytes payload=2; // the message UserMessage encrypted with the destination peer's public key bytes payload = 2; // the message UserMessage encrypted with the destination peer's public key
bytes signature=3; // the payload signature with the client identity private key bytes signature = 3; // the payload signature with the client identity private key
repeated int64 serverTimestamp=4; // server time stamp, might be several in matriochka mode repeated int64 server_timestamp = 4; // server time stamp, might be several in matriochka mode
string server_delivery_uuid=5; // message uuid, for server delivery tracking, omitted if not delivery tracking desired string server_delivery_uuid = 5; // message uuid, for server delivery tracking, omitted if not delivery tracking desired
bytes dr_header = 6; // serialized doubleratchet MessageHeader; empty = no DR layer
} }
message ConversationStatus { message ConversationStatus {
string uuid = 1; string uuid = 1; // uuid of message, or uuid of related message if uuid_action is not empty
string answer_to_uuid=2; // message is an answer to another one, specify uuid here repeated Reaction reactions = 2; // reaction to the message per peer
uint64 localSequence = 3 ; // seq number in local conversation for custom reordering string reply_to_uuid = 3; // this message replies to the specified uuid
uint64 sent = 4 ; // timestamp of the message sent uint64 local_sequence = 4 ; // seq number in local conversation for custom reordering
uint64 received = 5; // timestamp of the message received uint64 sent = 5 ; // timestamp of the message sent
uint64 processed = 6; // timestamp of the message processed uint64 received = 6; // timestamp of the message received
ContactCard my_next_identity = 7; uint64 processed = 7; // timestamp of the message processed
int32 peer_next_identityAck = 8; // version of the new peer accepted id ContactCard my_next_identity = 8;
} int32 peer_next_identity_ack = 9; // version of the new peer accepted id
message Group{
string name=1;
repeated ContactCard members = 2;
} }
message Reaction {
string reaction = 1;
string contact_uuid = 2;
}
message Group{
string name = 1;
repeated ContactCard members = 2;
}
// structure defining information that might be exchanged between two peers. // structure defining information that might be exchanged between two peers.
message UserMessage { message UserMessage {
@@ -174,7 +183,7 @@ message UserMessage {
bytes data = 4; bytes data = 4;
ConversationStatus status = 5; ConversationStatus status = 5;
ContactCard contact = 6; ContactCard contact = 6;
ServerCard knownServers = 7; ServerCard known_servers = 7;
Group group = 8; Group group = 8;
repeated File files = 9; repeated File files = 9;
Location current_location = 10; Location current_location = 10;
@@ -190,19 +199,18 @@ message UserMessage {
// 4 : location request // 4 : location request
// 5 : location response // 5 : location response
message File { message File {
string filename=1; // the proposed filename string filename = 1; // the proposed filename
uint64 size=2; // the file size uint64 size = 2; // the file size
uint32 chunk=3; // the chunk counter if file is sent by chunks uint32 chunk = 3; // the chunk counter if file is sent by chunks
bytes data=4; // the file/chunk content bytes data = 4; // the file/chunk content
} }
message Location { message Location {
uint64 time=1; uint64 time = 1;
float latitude=2; float latitude = 2;
float longitude=3; float longitude = 3;
int32 altitude=4; int32 altitude = 4;
} }
message DbMessage { message DbMessage {
@@ -217,6 +225,8 @@ message DbMessage {
bytes appdata = 9; bytes appdata = 9;
Invitation invitation = 10; Invitation invitation = 10;
string from = 11; // source peer uid, used when storing group conversations with more than one peer string from = 11; // source peer uid, used when storing group conversations with more than one peer
string server_delivery_uuid = 12; // uuid returned by the server upon delivery
uint64 server_delivery_timestamp = 13; // timestamp of the server delivery
} }
message VideoData { message VideoData {