Compare commits
23 Commits
e9cc5b9c76
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d23ab73cf9 | ||
|
|
f6531e344e | ||
|
|
32cc9ff848 | ||
|
|
f4fb42d72e | ||
|
|
c0dcfe997c | ||
|
|
5748ead926 | ||
|
|
14a07dcb5c | ||
|
|
fab5818ec7 | ||
|
|
8836d5c591 | ||
|
|
0fdf5dd9c7 | ||
|
|
7d06f0ff3e | ||
|
|
b722a916a9 | ||
|
|
cd9ee54f6d | ||
|
|
67823237e6 | ||
|
|
aa91d2cc0f | ||
|
|
66a6674a6a | ||
|
|
a322f3fccf | ||
|
|
e6f9bc796e | ||
|
|
f76213d55a | ||
|
|
aeeebf6f58 | ||
|
|
423c5d6d64 | ||
|
|
eb7fdc9b03 | ||
|
|
cfa20861c5 |
@@ -20,6 +20,8 @@ func DbMessageToInternalUserMessage(id int64, dbFile string, dbm *meowlib.DbMess
|
||||
ium.Messagetype = dbm.Type
|
||||
ium.Appdata = dbm.Appdata
|
||||
ium.FilePaths = dbm.FilePaths
|
||||
ium.ServerDeliveryUuid = dbm.ServerDeliveryUuid
|
||||
ium.ServerDeliveryTimestamp = dbm.ServerDeliveryTimestamp
|
||||
return &ium
|
||||
}
|
||||
|
||||
@@ -33,6 +35,8 @@ func InternalUserMessageToDbMessage(ium *InternalUserMessage) *meowlib.DbMessage
|
||||
dbm.CurrentLocation = ium.CurrentLocation
|
||||
dbm.Status = ium.Status
|
||||
dbm.FilePaths = ium.FilePaths
|
||||
dbm.ServerDeliveryUuid = ium.ServerDeliveryUuid
|
||||
dbm.ServerDeliveryTimestamp = ium.ServerDeliveryTimestamp
|
||||
return &dbm
|
||||
}
|
||||
|
||||
|
||||
174
client/drsession.go
Normal file
174
client/drsession.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
|
||||
doubleratchet "github.com/status-im/doubleratchet"
|
||||
)
|
||||
|
||||
// drLocalPair implements doubleratchet.DHPair using raw byte slices.
|
||||
type drLocalPair struct {
|
||||
priv doubleratchet.Key
|
||||
pub doubleratchet.Key
|
||||
}
|
||||
|
||||
func (p drLocalPair) PrivateKey() doubleratchet.Key { return p.priv }
|
||||
func (p drLocalPair) PublicKey() doubleratchet.Key { return p.pub }
|
||||
|
||||
// serializedDRState is an intermediate JSON-friendly representation of doubleratchet.State.
|
||||
type serializedDRState struct {
|
||||
DHrPublic []byte `json:"dhr_pub"`
|
||||
DHsPrivate []byte `json:"dhs_priv"`
|
||||
DHsPublic []byte `json:"dhs_pub"`
|
||||
RootChCK []byte `json:"root_ch_ck"`
|
||||
SendChCK []byte `json:"send_ch_ck"`
|
||||
SendChN uint32 `json:"send_ch_n"`
|
||||
RecvChCK []byte `json:"recv_ch_ck"`
|
||||
RecvChN uint32 `json:"recv_ch_n"`
|
||||
PN uint32 `json:"pn"`
|
||||
MkSkipped map[string]map[uint][]byte `json:"mk_skipped"`
|
||||
MaxSkip uint `json:"max_skip"`
|
||||
MaxKeep uint `json:"max_keep"`
|
||||
MaxMessageKeysPerSession int `json:"max_mks_per_session"`
|
||||
Step uint `json:"step"`
|
||||
KeysCount uint `json:"keys_count"`
|
||||
}
|
||||
|
||||
// drSessionStorage implements doubleratchet.SessionStorage, persisting state into peer.DrStateJson.
|
||||
type drSessionStorage struct{ peer *Peer }
|
||||
|
||||
func (s *drSessionStorage) Save(id []byte, state *doubleratchet.State) error {
|
||||
all, err := state.MkSkipped.All()
|
||||
if err != nil {
|
||||
return fmt.Errorf("drSessionStorage.Save: MkSkipped.All: %w", err)
|
||||
}
|
||||
mkSkipped := make(map[string]map[uint][]byte, len(all))
|
||||
for k, msgs := range all {
|
||||
inner := make(map[uint][]byte, len(msgs))
|
||||
for num, mk := range msgs {
|
||||
inner[num] = []byte(mk)
|
||||
}
|
||||
mkSkipped[k] = inner
|
||||
}
|
||||
|
||||
ss := serializedDRState{
|
||||
DHrPublic: []byte(state.DHr),
|
||||
DHsPrivate: []byte(state.DHs.PrivateKey()),
|
||||
DHsPublic: []byte(state.DHs.PublicKey()),
|
||||
RootChCK: []byte(state.RootCh.CK),
|
||||
SendChCK: []byte(state.SendCh.CK),
|
||||
SendChN: state.SendCh.N,
|
||||
RecvChCK: []byte(state.RecvCh.CK),
|
||||
RecvChN: state.RecvCh.N,
|
||||
PN: state.PN,
|
||||
MkSkipped: mkSkipped,
|
||||
MaxSkip: state.MaxSkip,
|
||||
MaxKeep: state.MaxKeep,
|
||||
MaxMessageKeysPerSession: state.MaxMessageKeysPerSession,
|
||||
Step: state.Step,
|
||||
KeysCount: state.KeysCount,
|
||||
}
|
||||
|
||||
b, err := json.Marshal(ss)
|
||||
if err != nil {
|
||||
return fmt.Errorf("drSessionStorage.Save: json.Marshal: %w", err)
|
||||
}
|
||||
s.peer.DrStateJson = string(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *drSessionStorage) Load(id []byte) (*doubleratchet.State, error) {
|
||||
if s.peer.DrStateJson == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var ss serializedDRState
|
||||
if err := json.Unmarshal([]byte(s.peer.DrStateJson), &ss); err != nil {
|
||||
return nil, fmt.Errorf("drSessionStorage.Load: json.Unmarshal: %w", err)
|
||||
}
|
||||
|
||||
c := doubleratchet.DefaultCrypto{}
|
||||
mkStorage := &doubleratchet.KeysStorageInMemory{}
|
||||
seq := uint(0)
|
||||
for k, msgs := range ss.MkSkipped {
|
||||
pubKey, err := hex.DecodeString(k)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("drSessionStorage.Load: decode skipped key hex: %w", err)
|
||||
}
|
||||
for num, mk := range msgs {
|
||||
if err := mkStorage.Put(id, doubleratchet.Key(pubKey), num, doubleratchet.Key(mk), seq); err != nil {
|
||||
return nil, fmt.Errorf("drSessionStorage.Load: Put: %w", err)
|
||||
}
|
||||
seq++
|
||||
}
|
||||
}
|
||||
|
||||
state := &doubleratchet.State{
|
||||
Crypto: c,
|
||||
DHr: doubleratchet.Key(ss.DHrPublic),
|
||||
DHs: drLocalPair{priv: doubleratchet.Key(ss.DHsPrivate), pub: doubleratchet.Key(ss.DHsPublic)},
|
||||
PN: ss.PN,
|
||||
MkSkipped: mkStorage,
|
||||
MaxSkip: ss.MaxSkip,
|
||||
MaxKeep: ss.MaxKeep,
|
||||
MaxMessageKeysPerSession: ss.MaxMessageKeysPerSession,
|
||||
Step: ss.Step,
|
||||
KeysCount: ss.KeysCount,
|
||||
}
|
||||
state.RootCh.CK = doubleratchet.Key(ss.RootChCK)
|
||||
state.RootCh.Crypto = c
|
||||
state.SendCh.CK = doubleratchet.Key(ss.SendChCK)
|
||||
state.SendCh.N = ss.SendChN
|
||||
state.SendCh.Crypto = c
|
||||
state.RecvCh.CK = doubleratchet.Key(ss.RecvChCK)
|
||||
state.RecvCh.N = ss.RecvChN
|
||||
state.RecvCh.Crypto = c
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// GetDRSession returns an active DR session for the peer, creating one if needed.
|
||||
func (p *Peer) GetDRSession() (doubleratchet.Session, error) {
|
||||
store := &drSessionStorage{peer: p}
|
||||
|
||||
// If we already have a saved state, load it
|
||||
if p.DrStateJson != "" {
|
||||
return doubleratchet.Load([]byte(p.Uid), store)
|
||||
}
|
||||
|
||||
// Initiator: has own DH keypair + root key, no state yet
|
||||
if p.DrInitiator && p.DrKpPrivate != "" {
|
||||
privBytes, err := base64.StdEncoding.DecodeString(p.DrKpPrivate)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetDRSession: decode DrKpPrivate: %w", err)
|
||||
}
|
||||
pubBytes, err := base64.StdEncoding.DecodeString(p.DrKpPublic)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetDRSession: decode DrKpPublic: %w", err)
|
||||
}
|
||||
rootKeyBytes, err := base64.StdEncoding.DecodeString(p.DrRootKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetDRSession: decode DrRootKey: %w", err)
|
||||
}
|
||||
kp := drLocalPair{priv: doubleratchet.Key(privBytes), pub: doubleratchet.Key(pubBytes)}
|
||||
return doubleratchet.New([]byte(p.Uid), doubleratchet.Key(rootKeyBytes), kp, store)
|
||||
}
|
||||
|
||||
// Responder: has remote DH public key + root key
|
||||
if !p.DrInitiator && p.ContactDrPublicKey != "" {
|
||||
remotePubBytes, err := base64.StdEncoding.DecodeString(p.ContactDrPublicKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetDRSession: decode ContactDrPublicKey: %w", err)
|
||||
}
|
||||
rootKeyBytes, err := base64.StdEncoding.DecodeString(p.DrRootKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetDRSession: decode DrRootKey: %w", err)
|
||||
}
|
||||
return doubleratchet.NewWithRemoteKey([]byte(p.Uid), doubleratchet.Key(rootKeyBytes), doubleratchet.Key(remotePubBytes), store)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("GetDRSession: peer %s has no DR keys configured", p.Uid)
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package helpers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@@ -142,11 +143,21 @@ func ConsumeInboxFile(messageFilename string) ([]string, []string, string, error
|
||||
return nil, nil, "ReadMessage: GetFromMyLookupKey", errors.New("no visible peer for that message")
|
||||
}
|
||||
// Unpack the message
|
||||
usermsg, err := peer.ProcessInboundUserMessage(packedUserMessage.Payload, packedUserMessage.Signature)
|
||||
usermsg, err := peer.ProcessInboundUserMessage(packedUserMessage)
|
||||
if err != nil {
|
||||
return nil, nil, "ReadMessage: ProcessInboundUserMessage", err
|
||||
}
|
||||
|
||||
// Check for received or processed already filled => it's an ack for one of our sent messages
|
||||
if len(usermsg.Data) == 0 && usermsg.Status != nil && usermsg.Status.Uuid != "" &&
|
||||
(usermsg.Status.Received != 0 || usermsg.Status.Processed != 0) {
|
||||
password, _ := client.GetConfig().GetMemPass()
|
||||
if ackErr := client.UpdateMessageAck(peer, usermsg.Status.Uuid, usermsg.Status.Received, usermsg.Status.Processed, password); ackErr != nil {
|
||||
logger.Warn().Err(ackErr).Str("uuid", usermsg.Status.Uuid).Msg("ConsumeInboxFile: UpdateMessageAck")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
//fmt.Println("From:", usermsg.From)
|
||||
//jsonUserMessage, _ := json.Marshal(usermsg)
|
||||
//fmt.Println(string(jsonUserMessage))
|
||||
@@ -172,6 +183,14 @@ func ConsumeInboxFile(messageFilename string) ([]string, []string, string, error
|
||||
// user message
|
||||
|
||||
messagesOverview = append(messagesOverview, peer.Name+" > "+string(usermsg.Data))
|
||||
|
||||
// stamp the received time before storing
|
||||
receivedAt := time.Now().UTC().Unix()
|
||||
if usermsg.Status == nil {
|
||||
usermsg.Status = &meowlib.ConversationStatus{}
|
||||
}
|
||||
usermsg.Status.Received = uint64(receivedAt)
|
||||
|
||||
// add message to storage
|
||||
err = peer.StoreMessage(usermsg, filenames)
|
||||
if err != nil {
|
||||
@@ -179,6 +198,20 @@ func ConsumeInboxFile(messageFilename string) ([]string, []string, string, error
|
||||
}
|
||||
filenames = []string{}
|
||||
|
||||
// Persist peer to save updated DR state (DrStateJson)
|
||||
if peer.DrRootKey != "" {
|
||||
if storeErr := identity.Peers.StorePeer(peer); storeErr != nil {
|
||||
logger.Warn().Err(storeErr).Str("peer", peer.Uid).Msg("ConsumeInboxFile: StorePeer (DR state)")
|
||||
}
|
||||
}
|
||||
|
||||
// Send delivery ack if the peer requested it
|
||||
if peer.SendDeliveryAck && usermsg.Status.Uuid != "" {
|
||||
storagePath := filepath.Join(client.GetConfig().StoragePath, identity.Uuid)
|
||||
if ackErr := sendDeliveryAck(storagePath, peer, usermsg.Status.Uuid, receivedAt); ackErr != nil {
|
||||
logger.Warn().Err(ackErr).Str("peer", peer.Uid).Msg("ConsumeInboxFile: sendDeliveryAck")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -240,3 +273,45 @@ func LongPollAllServerJobs(storage_path string, jobs []client.RequestsJob, timeo
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// sendDeliveryAck builds a delivery acknowledgment for messageUuid and enqueues
|
||||
// it for sending to the peer's contact pull servers.
|
||||
func sendDeliveryAck(storagePath string, peer *client.Peer, messageUuid string, receivedAt int64) error {
|
||||
packedMsg, _, err := BuildReceivedMessage(messageUuid, peer.Uid, receivedAt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("sendDeliveryAck: BuildReceivedMessage: %w", err)
|
||||
}
|
||||
|
||||
data, err := proto.Marshal(packedMsg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("sendDeliveryAck: proto.Marshal: %w", err)
|
||||
}
|
||||
|
||||
outboxDir := filepath.Join(storagePath, "outbox")
|
||||
if err := os.MkdirAll(outboxDir, 0700); err != nil {
|
||||
return fmt.Errorf("sendDeliveryAck: MkdirAll: %w", err)
|
||||
}
|
||||
|
||||
outboxFile := filepath.Join(outboxDir, "ack_"+uuid.New().String())
|
||||
if err := os.WriteFile(outboxFile, data, 0600); err != nil {
|
||||
return fmt.Errorf("sendDeliveryAck: WriteFile: %w", err)
|
||||
}
|
||||
|
||||
var servers []client.Server
|
||||
for _, srvUid := range peer.ContactPullServers {
|
||||
srv, loadErr := client.GetConfig().GetIdentity().MessageServers.LoadServer(srvUid)
|
||||
if loadErr == nil && srv != nil {
|
||||
servers = append(servers, *srv)
|
||||
}
|
||||
}
|
||||
if len(servers) == 0 {
|
||||
os.Remove(outboxFile)
|
||||
return errors.New("sendDeliveryAck: no contact servers found")
|
||||
}
|
||||
|
||||
return client.PushSendJob(storagePath, &client.SendJob{
|
||||
Queue: peer.Uid,
|
||||
File: outboxFile,
|
||||
Servers: servers,
|
||||
})
|
||||
}
|
||||
190
client/helpers/bgSendHelper.go
Normal file
190
client/helpers/bgSendHelper.go
Normal file
@@ -0,0 +1,190 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"forge.redroom.link/yves/meowlib"
|
||||
"forge.redroom.link/yves/meowlib/client"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const maxRetriesPerServer = 3
|
||||
const defaultSendTimeout = 3600 * 24 // seconds, used when job.Timeout is 0
|
||||
const defaultPostTimeout = 200
|
||||
|
||||
// CreateUserMessageAndSendJob is the single entry point for sending a message.
|
||||
// It creates and stores the user message, serialises the packed form to
|
||||
// storagePath/outbox/{dbFile}_{dbId}, and enqueues a SendJob in
|
||||
// storagePath/queues/{peerUid}.
|
||||
func CreateUserMessageAndSendJob(storagePath, message, peerUid, replyToUid string, filelist []string, servers []client.Server, timeout int) error {
|
||||
packedMsg, dbFile, dbId, errTxt, err := CreateAndStoreUserMessage(message, peerUid, replyToUid, filelist)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", errTxt, err)
|
||||
}
|
||||
|
||||
data, err := proto.Marshal(packedMsg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("CreateUserMessageAndSendJob: proto.Marshal: %w", err)
|
||||
}
|
||||
|
||||
outboxDir := filepath.Join(storagePath, "outbox")
|
||||
if err := os.MkdirAll(outboxDir, 0700); err != nil {
|
||||
return fmt.Errorf("CreateUserMessageAndSendJob: MkdirAll: %w", err)
|
||||
}
|
||||
|
||||
outboxFile := filepath.Join(outboxDir, fmt.Sprintf("%s_%d", dbFile, dbId))
|
||||
if err := os.WriteFile(outboxFile, data, 0600); err != nil {
|
||||
return fmt.Errorf("CreateUserMessageAndSendJob: WriteFile: %w", err)
|
||||
}
|
||||
|
||||
return client.PushSendJob(storagePath, &client.SendJob{
|
||||
Queue: peerUid,
|
||||
File: outboxFile,
|
||||
Servers: servers,
|
||||
Timeout: timeout,
|
||||
})
|
||||
}
|
||||
|
||||
// ProcessSendQueues discovers every queue DB file under storagePath/queues/
|
||||
// and processes each queue concurrently in its own goroutine.
|
||||
// Call this from the send isolate on wake-up notification or on a periodic timer.
|
||||
// It returns the total number of successfully sent messages across all queues.
|
||||
func ProcessSendQueues(storagePath string) int {
|
||||
queueDir := filepath.Join(storagePath, "queues")
|
||||
entries, err := os.ReadDir(queueDir)
|
||||
if err != nil {
|
||||
logger.Warn().Err(err).Str("dir", queueDir).Msg("ProcessSendQueues: ReadDir")
|
||||
return 0
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
counts := make(chan int, len(entries))
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
queue := entry.Name()
|
||||
go func(q string) {
|
||||
defer wg.Done()
|
||||
counts <- processSendQueue(storagePath, q)
|
||||
}(queue)
|
||||
}
|
||||
wg.Wait()
|
||||
close(counts)
|
||||
|
||||
total := 0
|
||||
for n := range counts {
|
||||
total += n
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// processSendQueue processes pending jobs for a single named queue sequentially.
|
||||
// It returns the number of successfully sent messages.
|
||||
//
|
||||
// For each pending job it will:
|
||||
// - immediately mark it failed if its TTL (job.Timeout) has elapsed – this is the
|
||||
// only criterion for permanent failure; retry exhaustion is never a failure cause
|
||||
// - attempt delivery, cycling through servers until one succeeds
|
||||
// - mark it sent on success
|
||||
// - stop and return when all servers fail this run (will resume on next call)
|
||||
//
|
||||
// Per-server retry counts (maxRetriesPerServer) are local to each call so that
|
||||
// past failures in previous runs never prevent future delivery attempts.
|
||||
func processSendQueue(storagePath, queue string) int {
|
||||
sent := 0
|
||||
for {
|
||||
job, _, err := client.PeekSendJob(storagePath, queue)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Str("queue", queue).Msg("processSendQueue: PeekSendJob")
|
||||
return sent
|
||||
}
|
||||
if job == nil {
|
||||
return sent // no more pending jobs
|
||||
}
|
||||
|
||||
// Hard timeout: the only criterion for permanent failure.
|
||||
// Use defaultSendTimeout when the job carries no explicit TTL.
|
||||
ttl := job.Timeout
|
||||
if ttl <= 0 {
|
||||
ttl = defaultSendTimeout
|
||||
}
|
||||
if time.Since(job.InsertedAt) > time.Duration(ttl)*time.Second {
|
||||
job.Status = client.SendStatusFailed
|
||||
if err := client.UpdateSendJob(storagePath, queue, job); err != nil {
|
||||
logger.Error().Err(err).Int64("id", job.ID).Msg("processSendQueue: UpdateSendJob timeout")
|
||||
}
|
||||
continue // try the next pending job
|
||||
}
|
||||
|
||||
// runRetries is allocated fresh every call so it never accumulates
|
||||
// across processSendQueue invocations.
|
||||
runRetries := make([]int, len(job.Servers))
|
||||
serverIdx, sendErr := attemptSendJob(job, runRetries)
|
||||
if sendErr == nil {
|
||||
now := time.Now().UTC()
|
||||
job.Status = client.SendStatusSent
|
||||
job.SentAt = &now
|
||||
job.SuccessfulServer = &serverIdx
|
||||
if err := client.UpdateSendJob(storagePath, queue, job); err != nil {
|
||||
logger.Error().Err(err).Int64("id", job.ID).Msg("processSendQueue: UpdateSendJob sent")
|
||||
}
|
||||
sent++
|
||||
continue // job delivered – look for the next one
|
||||
}
|
||||
|
||||
// All servers failed this run; stop and wait for the next poll.
|
||||
// Permanent failure is decided solely by the TTL check above.
|
||||
return sent
|
||||
}
|
||||
}
|
||||
|
||||
// attemptSendJob reads the pre-built packed message from job.File and tries
|
||||
// each server in order, skipping any server that has already reached
|
||||
// maxRetriesPerServer failures within the current run.
|
||||
// On the first successful POST it returns the server index.
|
||||
// Retry counts are tracked in the caller-supplied retries slice (run-local,
|
||||
// never persisted) so that previous runs do not influence this attempt.
|
||||
func attemptSendJob(job *client.SendJob, retries []int) (int, error) {
|
||||
data, err := os.ReadFile(job.File)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
// Ensure the retries slice is aligned with the servers slice.
|
||||
for len(retries) < len(job.Servers) {
|
||||
retries = append(retries, 0)
|
||||
}
|
||||
|
||||
for i, srv := range job.Servers {
|
||||
if retries[i] >= maxRetriesPerServer {
|
||||
continue // this server is exhausted for the current run
|
||||
}
|
||||
|
||||
// Unmarshal the stored PackedUserMessage and wrap it for this server.
|
||||
packedUsrMsg := &meowlib.PackedUserMessage{}
|
||||
if err := proto.Unmarshal(data, packedUsrMsg); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
serverData, errTxt, packErr := PackMessageForServer(packedUsrMsg, srv.GetUid())
|
||||
if packErr != nil {
|
||||
logger.Error().Err(packErr).Str("errTxt", errTxt).Str("url", srv.Url).Msg("attemptSendJob: PackMessageForServer")
|
||||
retries[i]++
|
||||
continue
|
||||
}
|
||||
|
||||
_, err = meowlib.HttpPostMessage(srv.Url, serverData, defaultPostTimeout)
|
||||
if err != nil {
|
||||
logger.Warn().Err(err).Str("url", srv.Url).Int("retry", retries[i]+1).Msg("attemptSendJob: POST failed")
|
||||
retries[i]++
|
||||
continue
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
return -1, errors.New("all servers failed or exhausted")
|
||||
}
|
||||
389
client/helpers/bgSendHelper_test.go
Normal file
389
client/helpers/bgSendHelper_test.go
Normal file
@@ -0,0 +1,389 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"forge.redroom.link/yves/meowlib"
|
||||
"forge.redroom.link/yves/meowlib/client"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
// --- test helpers -------------------------------------------------------
|
||||
|
||||
// acceptServer starts an httptest server that counts received POST /msg requests.
|
||||
func acceptServer(t *testing.T, received *int64) *httptest.Server {
|
||||
t.Helper()
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
atomic.AddInt64(received, 1)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
}
|
||||
|
||||
// closedServerURL starts and immediately closes an httptest server so its URL
|
||||
// causes "connection refused" without any wait.
|
||||
func closedServerURL(t *testing.T) string {
|
||||
t.Helper()
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))
|
||||
srv.Close()
|
||||
return srv.URL
|
||||
}
|
||||
|
||||
// writeMsgFile writes a valid serialised empty PackedUserMessage to a temp file
|
||||
// and returns the path. The file content satisfies proto.Unmarshal inside
|
||||
// attemptSendJob; the httptest endpoints ignore the encrypted payload.
|
||||
func writeMsgFile(t *testing.T, dir, name string) string {
|
||||
t.Helper()
|
||||
p := filepath.Join(dir, name)
|
||||
data, err := proto.Marshal(&meowlib.PackedUserMessage{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, os.WriteFile(p, data, 0600))
|
||||
return p
|
||||
}
|
||||
|
||||
// newTestServer creates a client.Server for the given URL, generates a
|
||||
// throwaway keypair so that AsymEncryptMessage succeeds, and stores the server
|
||||
// in the current identity's MessageServers so that PackMessageForServer can
|
||||
// look it up via LoadServer. Returns the registered server.
|
||||
//
|
||||
// Call setupMsgHelperConfig before this so an identity is in place.
|
||||
func newTestServer(t *testing.T, url string) client.Server {
|
||||
t.Helper()
|
||||
srv, err := client.CreateServerFromUrl(url)
|
||||
require.NoError(t, err)
|
||||
kp, err := meowlib.NewKeyPair()
|
||||
require.NoError(t, err)
|
||||
srv.PublicKey = kp.Public
|
||||
require.NoError(t, client.GetConfig().GetIdentity().MessageServers.StoreServer(srv))
|
||||
return *srv
|
||||
}
|
||||
|
||||
// pushJob is a convenience wrapper around client.PushSendJob.
|
||||
func pushJob(t *testing.T, dir, queue, file string, servers []client.Server, timeout int) {
|
||||
t.Helper()
|
||||
require.NoError(t, client.PushSendJob(dir, &client.SendJob{
|
||||
Queue: queue,
|
||||
File: file,
|
||||
Servers: servers,
|
||||
Timeout: timeout,
|
||||
}))
|
||||
}
|
||||
|
||||
// serverSlice builds a []client.Server from plain URLs.
|
||||
func serverSlice(urls ...string) []client.Server {
|
||||
out := make([]client.Server, len(urls))
|
||||
for i, u := range urls {
|
||||
out[i] = client.Server{Url: u}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// --- unit tests ---------------------------------------------------------
|
||||
|
||||
// TestAttemptSendJob_Success verifies a successful POST to the first server.
|
||||
func TestAttemptSendJob_Success(t *testing.T) {
|
||||
dir, _ := setupMsgHelperConfig(t)
|
||||
var received int64
|
||||
srv := acceptServer(t, &received)
|
||||
defer srv.Close()
|
||||
|
||||
newTestServer(t, srv.URL)
|
||||
|
||||
job := &client.SendJob{
|
||||
File: writeMsgFile(t, dir, "msg"),
|
||||
Servers: serverSlice(srv.URL),
|
||||
Timeout: 5,
|
||||
}
|
||||
retries := make([]int, len(job.Servers))
|
||||
|
||||
idx, err := attemptSendJob(job, retries)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, idx)
|
||||
assert.Equal(t, int64(1), atomic.LoadInt64(&received))
|
||||
}
|
||||
|
||||
// TestAttemptSendJob_Fallback verifies that when the first server refuses the
|
||||
// connection, the second server is tried and succeeds.
|
||||
func TestAttemptSendJob_Fallback(t *testing.T) {
|
||||
dir, _ := setupMsgHelperConfig(t)
|
||||
var received int64
|
||||
good := acceptServer(t, &received)
|
||||
defer good.Close()
|
||||
|
||||
deadURL := closedServerURL(t)
|
||||
newTestServer(t, deadURL)
|
||||
newTestServer(t, good.URL)
|
||||
|
||||
job := &client.SendJob{
|
||||
File: writeMsgFile(t, dir, "msg"),
|
||||
Servers: serverSlice(deadURL, good.URL),
|
||||
Timeout: 5,
|
||||
}
|
||||
retries := make([]int, len(job.Servers))
|
||||
|
||||
idx, err := attemptSendJob(job, retries)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, idx, "second server should have been used")
|
||||
assert.Equal(t, int64(1), atomic.LoadInt64(&received))
|
||||
assert.Equal(t, 1, retries[0], "first server retry should be incremented")
|
||||
assert.Equal(t, 0, retries[1], "second server retry must stay at zero")
|
||||
}
|
||||
|
||||
// TestAttemptSendJob_AllFail verifies that all retry counts are incremented
|
||||
// and an error is returned when every server refuses connections.
|
||||
func TestAttemptSendJob_AllFail(t *testing.T) {
|
||||
dir, _ := setupMsgHelperConfig(t)
|
||||
dead1 := closedServerURL(t)
|
||||
dead2 := closedServerURL(t)
|
||||
newTestServer(t, dead1)
|
||||
newTestServer(t, dead2)
|
||||
|
||||
job := &client.SendJob{
|
||||
File: writeMsgFile(t, dir, "msg"),
|
||||
Servers: serverSlice(dead1, dead2),
|
||||
Timeout: 5,
|
||||
}
|
||||
retries := make([]int, len(job.Servers))
|
||||
|
||||
_, err := attemptSendJob(job, retries)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, 1, retries[0])
|
||||
assert.Equal(t, 1, retries[1])
|
||||
}
|
||||
|
||||
// TestAttemptSendJob_SkipsExhaustedServer verifies that a server already at
|
||||
// maxRetriesPerServer is not contacted.
|
||||
func TestAttemptSendJob_SkipsExhaustedServer(t *testing.T) {
|
||||
dir, _ := setupMsgHelperConfig(t)
|
||||
var received int64
|
||||
good := acceptServer(t, &received)
|
||||
defer good.Close()
|
||||
|
||||
deadURL := closedServerURL(t)
|
||||
newTestServer(t, good.URL) // only good server needs to be reachable
|
||||
|
||||
job := &client.SendJob{
|
||||
File: writeMsgFile(t, dir, "msg"),
|
||||
Servers: serverSlice(
|
||||
deadURL, // exhausted – must be skipped (no need to store in identity)
|
||||
good.URL,
|
||||
),
|
||||
Timeout: 5,
|
||||
}
|
||||
retries := []int{maxRetriesPerServer, 0} // first server already exhausted this run
|
||||
|
||||
idx, err := attemptSendJob(job, retries)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, idx)
|
||||
assert.Equal(t, int64(1), atomic.LoadInt64(&received))
|
||||
}
|
||||
|
||||
// --- integration tests --------------------------------------------------
|
||||
|
||||
// TestCreateUserMessageAndSendJob verifies that the packed message is written to
|
||||
// outbox/{dbFile}_{dbId} and a pending send job is enqueued for the peer.
|
||||
func TestCreateUserMessageAndSendJob(t *testing.T) {
|
||||
dir, id := setupMsgHelperConfig(t)
|
||||
|
||||
peer := newFullyKeyedPeer(t, "peer-create-send")
|
||||
require.NoError(t, id.Peers.StorePeer(peer))
|
||||
|
||||
srv := newTestServer(t, "http://test-srv.example")
|
||||
|
||||
err := CreateUserMessageAndSendJob(
|
||||
dir,
|
||||
"hello from integration",
|
||||
"peer-create-send",
|
||||
"",
|
||||
nil,
|
||||
[]client.Server{srv},
|
||||
60,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// A pending job must be in the queue.
|
||||
job, _, err := client.PeekSendJob(dir, "peer-create-send")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, job, "a send job must be enqueued")
|
||||
|
||||
// The outbox file must exist under storagePath/outbox/.
|
||||
assert.FileExists(t, job.File)
|
||||
assert.True(t, strings.HasPrefix(job.File, filepath.Join(dir, "outbox")),
|
||||
"outbox file must be under storagePath/outbox/")
|
||||
|
||||
// The basename must follow the {dbFile}_{dbId} naming convention.
|
||||
base := filepath.Base(job.File)
|
||||
sep := strings.LastIndex(base, "_")
|
||||
require.Greater(t, sep, 0, "filename must contain an underscore separating dbFile from dbId")
|
||||
dbId, parseErr := strconv.ParseInt(base[sep+1:], 10, 64)
|
||||
assert.NoError(t, parseErr, "suffix after underscore must be a numeric db ID")
|
||||
assert.Greater(t, dbId, int64(0), "db ID must be positive")
|
||||
}
|
||||
|
||||
// TestProcessSendQueues_Success verifies that a pending job is delivered and
|
||||
// marked as sent when the server accepts it.
|
||||
func TestProcessSendQueues_Success(t *testing.T) {
|
||||
dir, _ := setupMsgHelperConfig(t)
|
||||
var received int64
|
||||
srv := acceptServer(t, &received)
|
||||
defer srv.Close()
|
||||
|
||||
newTestServer(t, srv.URL)
|
||||
|
||||
msgPath := writeMsgFile(t, dir, "msg")
|
||||
pushJob(t, dir, "q1", msgPath, serverSlice(srv.URL), 10)
|
||||
|
||||
// grab the ID before processing so we can inspect the row afterward
|
||||
_, id, err := client.PeekSendJob(dir, "q1")
|
||||
require.NoError(t, err)
|
||||
|
||||
ProcessSendQueues(dir)
|
||||
|
||||
assert.Equal(t, int64(1), atomic.LoadInt64(&received), "server should have received exactly one message")
|
||||
|
||||
job, err := client.GetSendJob(dir, "q1", id)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, job)
|
||||
assert.Equal(t, client.SendStatusSent, job.Status)
|
||||
assert.NotNil(t, job.SentAt)
|
||||
require.NotNil(t, job.SuccessfulServer)
|
||||
assert.Equal(t, 0, *job.SuccessfulServer)
|
||||
}
|
||||
|
||||
// TestProcessSendQueues_ServerFallback verifies that when the first server is
|
||||
// unreachable, the second server is tried successfully in the same pass.
|
||||
func TestProcessSendQueues_ServerFallback(t *testing.T) {
|
||||
dir, _ := setupMsgHelperConfig(t)
|
||||
var received int64
|
||||
good := acceptServer(t, &received)
|
||||
defer good.Close()
|
||||
|
||||
deadURL := closedServerURL(t)
|
||||
newTestServer(t, deadURL)
|
||||
newTestServer(t, good.URL)
|
||||
|
||||
msgPath := writeMsgFile(t, dir, "msg")
|
||||
pushJob(t, dir, "q1", msgPath, serverSlice(deadURL, good.URL), 10)
|
||||
|
||||
_, id, err := client.PeekSendJob(dir, "q1")
|
||||
require.NoError(t, err)
|
||||
|
||||
ProcessSendQueues(dir)
|
||||
|
||||
assert.Equal(t, int64(1), atomic.LoadInt64(&received))
|
||||
|
||||
job, err := client.GetSendJob(dir, "q1", id)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, job)
|
||||
assert.Equal(t, client.SendStatusSent, job.Status)
|
||||
require.NotNil(t, job.SuccessfulServer)
|
||||
assert.Equal(t, 1, *job.SuccessfulServer, "second server should be recorded as successful")
|
||||
}
|
||||
|
||||
// TestProcessSendQueues_FailedRunsStayPending verifies that repeated delivery
|
||||
// failures do NOT mark a job as permanently failed. Only a TTL timeout can do
|
||||
// that; retry exhaustion merely stops the current run.
|
||||
func TestProcessSendQueues_FailedRunsStayPending(t *testing.T) {
|
||||
dir, _ := setupMsgHelperConfig(t)
|
||||
deadURL := closedServerURL(t)
|
||||
newTestServer(t, deadURL)
|
||||
|
||||
msgPath := writeMsgFile(t, dir, "msg")
|
||||
// timeout=0 → uses defaultSendTimeout (24 h), so the job won't expire here.
|
||||
pushJob(t, dir, "q1", msgPath, serverSlice(deadURL), 0)
|
||||
|
||||
_, id, err := client.PeekSendJob(dir, "q1")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Run several times – per-server retry counts reset each run, so the job
|
||||
// must remain pending no matter how many runs fail.
|
||||
for i := 0; i < maxRetriesPerServer+2; i++ {
|
||||
ProcessSendQueues(dir)
|
||||
}
|
||||
|
||||
job, err := client.GetSendJob(dir, "q1", id)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, job)
|
||||
assert.Equal(t, client.SendStatusPending, job.Status, "repeated failures must not cause permanent failure – only timeout does")
|
||||
}
|
||||
|
||||
// TestProcessSendQueues_JobTimeout verifies that a job whose timeout has elapsed
|
||||
// is immediately marked as failed without any send attempt.
|
||||
func TestProcessSendQueues_JobTimeout(t *testing.T) {
|
||||
dir, _ := setupMsgHelperConfig(t)
|
||||
var received int64
|
||||
srv := acceptServer(t, &received)
|
||||
defer srv.Close()
|
||||
|
||||
newTestServer(t, srv.URL)
|
||||
|
||||
msgPath := writeMsgFile(t, dir, "msg")
|
||||
// Timeout of 1 second; we will backdate inserted_at so the job looks expired.
|
||||
pushJob(t, dir, "q1", msgPath, serverSlice(srv.URL), 1)
|
||||
|
||||
_, id, err := client.PeekSendJob(dir, "q1")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Backdate inserted_at by 60 seconds directly in the DB.
|
||||
dbPath := filepath.Join(dir, "queues", "q1")
|
||||
backdateJob(t, dbPath, id, -60*time.Second)
|
||||
|
||||
ProcessSendQueues(dir)
|
||||
|
||||
assert.Equal(t, int64(0), atomic.LoadInt64(&received), "no send should be attempted for an expired job")
|
||||
|
||||
job, err := client.GetSendJob(dir, "q1", id)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, job)
|
||||
assert.Equal(t, client.SendStatusFailed, job.Status)
|
||||
}
|
||||
|
||||
// TestProcessSendQueues_MultipleQueues verifies that jobs in different queue
|
||||
// files are processed concurrently and independently.
|
||||
func TestProcessSendQueues_MultipleQueues(t *testing.T) {
|
||||
dir, _ := setupMsgHelperConfig(t)
|
||||
var received int64
|
||||
srv := acceptServer(t, &received)
|
||||
defer srv.Close()
|
||||
|
||||
newTestServer(t, srv.URL)
|
||||
|
||||
for _, q := range []string{"qa", "qb", "qc"} {
|
||||
msgPath := writeMsgFile(t, dir, "msg_"+q)
|
||||
pushJob(t, dir, q, msgPath, serverSlice(srv.URL), 10)
|
||||
}
|
||||
|
||||
// Concurrent goroutines for each queue all try to open the same BadgerDB for
|
||||
// server lookup; only one can hold the lock at a time. Jobs that lose the
|
||||
// race stay pending and are retried on the next call. Three passes guarantee
|
||||
// every queue gets at least one uncontested turn.
|
||||
for i := 0; i < 3; i++ {
|
||||
ProcessSendQueues(dir)
|
||||
}
|
||||
|
||||
assert.Equal(t, int64(3), atomic.LoadInt64(&received), "all three queues should have delivered their message")
|
||||
}
|
||||
|
||||
// backdateJob opens the SQLite file directly and shifts inserted_at by delta.
|
||||
// This lets tests simulate elapsed time without sleeping.
|
||||
func backdateJob(t *testing.T, dbPath string, id int64, delta time.Duration) {
|
||||
t.Helper()
|
||||
db, err := sql.Open("sqlite3", dbPath)
|
||||
require.NoError(t, err)
|
||||
defer db.Close()
|
||||
newTs := time.Now().Add(delta).Unix()
|
||||
_, err = db.Exec("UPDATE queue SET inserted_at = ? WHERE id = ?", newTs, id)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -36,6 +36,7 @@ func ReadCallRequestResponseMessage(data []byte, srvuid string) (*meowlib.VideoD
|
||||
return serverMsg.VideoData, "", nil
|
||||
}
|
||||
|
||||
/*
|
||||
func BuildCallMessage(videodata *meowlib.VideoData, srvuid string, peer_uid string, replyToUid string, filelist []string) ([]byte, string, error) {
|
||||
peer := client.GetConfig().GetIdentity().Peers.GetFromUid(peer_uid)
|
||||
|
||||
@@ -53,3 +54,4 @@ func BuildCallMessage(videodata *meowlib.VideoData, srvuid string, peer_uid stri
|
||||
func BuildCancelCallMessage() {
|
||||
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -28,7 +28,7 @@ func invitationGetAnswerReadResponse(invitation *meowlib.Invitation) (*client.Pe
|
||||
if peer != nil {
|
||||
|
||||
// process the packed user message
|
||||
usermsg, err := peer.ProcessInboundUserMessage(invitationAnswer.Payload, invitationAnswer.Signature)
|
||||
usermsg, err := peer.ProcessInboundUserMessage(&invitationAnswer)
|
||||
if err != nil {
|
||||
return nil, "InvitationGetAnswerReadResponse: ProcessInboundUserMessage", err
|
||||
}
|
||||
|
||||
@@ -1,86 +1,288 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"forge.redroom.link/yves/meowlib"
|
||||
"forge.redroom.link/yves/meowlib/client"
|
||||
"github.com/google/uuid"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func messageBuildPackAndStore(msg *meowlib.UserMessage, srvuid string, peer *client.Peer) ([]byte, string, error) {
|
||||
func PackMessageForServer(packedMsg *meowlib.PackedUserMessage, srvuid string) ([]byte, string, error) {
|
||||
// Get the message server
|
||||
srv, err := client.GetConfig().GetIdentity().MessageServers.LoadServer(srvuid)
|
||||
if err != nil {
|
||||
return nil, "messageBuildPostprocess : LoadServer", err
|
||||
return nil, "PackMessageForServer : LoadServer", err
|
||||
}
|
||||
// Prepare cyphered + packed user message
|
||||
packedMsg, err := peer.ProcessOutboundUserMessage(msg)
|
||||
if err != nil {
|
||||
return nil, "messageBuildPostprocess : ProcessOutboundUserMessage", err
|
||||
// Fetch and persist the server public key if it was never stored
|
||||
// (servers added via invitation finalization only have a UserKp, no PublicKey)
|
||||
if srv.PublicKey == "" {
|
||||
srvdata, err := meowlib.HttpGetId(srv.Url)
|
||||
if err != nil {
|
||||
return nil, "PackMessageForServer : HttpGetId", err
|
||||
}
|
||||
srv.PublicKey = srvdata["publicKey"]
|
||||
client.GetConfig().GetIdentity().MessageServers.StoreServer(srv)
|
||||
}
|
||||
// Creating Server message for transporting the user message
|
||||
toServerMessage := srv.BuildToServerMessageFromUserMessage(packedMsg)
|
||||
data, err := srv.ProcessOutboundMessage(toServerMessage)
|
||||
if err != nil {
|
||||
return nil, "messageBuildPostprocess : ProcessOutboundMessage", err
|
||||
}
|
||||
// Store message
|
||||
err = peer.StoreMessage(msg, nil)
|
||||
if err != nil {
|
||||
return nil, "messageBuildPostprocess : StoreMessage", err
|
||||
return nil, "PackMessageForServer : ProcessOutboundMessage", err
|
||||
}
|
||||
return data, "", nil
|
||||
|
||||
}
|
||||
|
||||
func CreateUserMessage(message string, srvuid string, peer_uid string, replyToUid string, filelist []string) ([]byte, string, error) {
|
||||
func CreateStorePackUserMessageForServer(message string, srvuid string, peer_uid string, replyToUid string, filelist []string) ([]byte, string, error) {
|
||||
usermessage, _, _, errtxt, err := CreateAndStoreUserMessage(message, peer_uid, replyToUid, filelist)
|
||||
if err != nil {
|
||||
return nil, errtxt, err
|
||||
}
|
||||
return PackMessageForServer(usermessage, srvuid)
|
||||
}
|
||||
|
||||
// CreateAndStoreUserMessage creates, signs, and stores an outbound message for
|
||||
// peer_uid. It returns the packed (encrypted) form ready for server transport,
|
||||
// the peer DB file UUID (dbFile), the SQLite row ID (dbId), an error context
|
||||
// string, and any error.
|
||||
func CreateAndStoreUserMessage(message string, peer_uid string, replyToUid string, filelist []string) (*meowlib.PackedUserMessage, string, int64, string, error) {
|
||||
peer := client.GetConfig().GetIdentity().Peers.GetFromUid(peer_uid)
|
||||
|
||||
// Creating User message
|
||||
usermessage, err := peer.BuildSimpleUserMessage([]byte(message))
|
||||
if err != nil {
|
||||
return nil, "PrepareServerMessage : BuildSimpleUserMessage", err
|
||||
return nil, "", 0, "PrepareServerMessage : BuildSimpleUserMessage", err
|
||||
}
|
||||
for _, file := range filelist {
|
||||
err = usermessage.AddFile(file, client.GetConfig().Chunksize)
|
||||
if err != nil {
|
||||
return nil, "PrepareServerMessage : AddFile", err
|
||||
return nil, "", 0, "PrepareServerMessage : AddFile", err
|
||||
}
|
||||
}
|
||||
usermessage.Status.AnswerToUuid = replyToUid
|
||||
usermessage.Status.Sent = uint64(time.Now().UTC().Unix())
|
||||
usermessage.Status.ReplyToUuid = replyToUid
|
||||
|
||||
return messageBuildPackAndStore(usermessage, srvuid, peer)
|
||||
// Store message
|
||||
err = peer.StoreMessage(usermessage, nil)
|
||||
if err != nil {
|
||||
return nil, "", 0, "messageBuildPostprocess : StoreMessage", err
|
||||
}
|
||||
|
||||
dbFile := peer.LastMessage.Dbfile
|
||||
dbId := peer.LastMessage.Dbid
|
||||
|
||||
// Prepare cyphered + packed user message
|
||||
packedMsg, err := peer.ProcessOutboundUserMessage(usermessage)
|
||||
if err != nil {
|
||||
return nil, "", 0, "messageBuildPostprocess : ProcessOutboundUserMessage", err
|
||||
}
|
||||
|
||||
// Persist peer to save updated DR state (DrStateJson)
|
||||
if peer.DrRootKey != "" {
|
||||
if storeErr := client.GetConfig().GetIdentity().Peers.StorePeer(peer); storeErr != nil {
|
||||
logger.Warn().Err(storeErr).Str("peer", peer.Uid).Msg("messageBuildPostprocess: StorePeer (DR state)")
|
||||
}
|
||||
}
|
||||
|
||||
return packedMsg, dbFile, dbId, "", nil
|
||||
}
|
||||
|
||||
func BuildAckMessage(messageUid string, srvuid string, peer_uid string, received int64, processed int64) ([]byte, string, error) {
|
||||
|
||||
func BuildReceivedMessage(messageUid string, peer_uid string, received int64) (*meowlib.PackedUserMessage, string, error) {
|
||||
peer := client.GetConfig().GetIdentity().Peers.GetFromUid(peer_uid)
|
||||
srv, err := client.GetConfig().GetIdentity().MessageServers.LoadServer(srvuid)
|
||||
if err != nil {
|
||||
return nil, "PrepareServerMessage : LoadServer", err
|
||||
}
|
||||
// Creating User message
|
||||
usermessage, err := peer.BuildSimpleUserMessage(nil)
|
||||
if err != nil {
|
||||
return nil, "PrepareServerMessage : BuildSimpleUserMessage", err
|
||||
return nil, "BuildReceivedMessage : BuildSimpleUserMessage", err
|
||||
}
|
||||
usermessage.Status.Uuid = messageUid
|
||||
usermessage.Status.Received = uint64(received)
|
||||
// Prepare cyphered + packed user message
|
||||
packedMsg, err := peer.ProcessOutboundUserMessage(usermessage)
|
||||
if err != nil {
|
||||
return nil, "BuildReceivedMessage : ProcessOutboundUserMessage", err
|
||||
}
|
||||
// Persist peer to save updated DR state (DrStateJson)
|
||||
if peer.DrRootKey != "" {
|
||||
client.GetConfig().GetIdentity().Peers.StorePeer(peer)
|
||||
}
|
||||
return packedMsg, "", nil
|
||||
}
|
||||
|
||||
func BuildProcessedMessage(messageUid string, peer_uid string, processed int64) (*meowlib.PackedUserMessage, string, error) {
|
||||
peer := client.GetConfig().GetIdentity().Peers.GetFromUid(peer_uid)
|
||||
// Creating User message
|
||||
usermessage, err := peer.BuildSimpleUserMessage(nil)
|
||||
if err != nil {
|
||||
return nil, "BuildProcessedMessage : BuildSimpleUserMessage", err
|
||||
}
|
||||
usermessage.Status.Uuid = messageUid
|
||||
usermessage.Status.Processed = uint64(processed)
|
||||
// Prepare cyphered + packed user message
|
||||
packedMsg, err := peer.ProcessOutboundUserMessage(usermessage)
|
||||
if err != nil {
|
||||
return nil, "PrepareServerMessage : ProcessOutboundUserMessage", err
|
||||
return nil, "BuildProcessedMessage : ProcessOutboundUserMessage", err
|
||||
}
|
||||
// Creating Server message for transporting the user message
|
||||
toServerMessage := srv.BuildToServerMessageFromUserMessage(packedMsg)
|
||||
data, err := srv.ProcessOutboundMessage(toServerMessage)
|
||||
if err != nil {
|
||||
return nil, "PrepareServerMessage : ProcessOutboundMessage", err
|
||||
// Persist peer to save updated DR state (DrStateJson)
|
||||
if peer.DrRootKey != "" {
|
||||
client.GetConfig().GetIdentity().Peers.StorePeer(peer)
|
||||
}
|
||||
|
||||
return data, "", nil
|
||||
return packedMsg, "", nil
|
||||
}
|
||||
|
||||
func ReadAckMessageResponse() {
|
||||
//! update the status in message store
|
||||
}
|
||||
|
||||
// MarkMessageProcessed stamps the stored message with a processed timestamp of
|
||||
// now(), persists the updated record, and — if the peer has SendProcessingAck
|
||||
// enabled and the message carries a UUID — enqueues a processed acknowledgment
|
||||
// to the peer's contact pull servers.
|
||||
func MarkMessageProcessed(peerUid string, dbFile string, dbId int64) error {
|
||||
password, _ := client.GetConfig().GetMemPass()
|
||||
processedAt := time.Now().UTC().Unix()
|
||||
|
||||
dbm, err := client.GetDbMessage(dbFile, dbId, password)
|
||||
if err != nil {
|
||||
return fmt.Errorf("MarkMessageProcessed: GetDbMessage: %w", err)
|
||||
}
|
||||
if dbm.Status == nil {
|
||||
dbm.Status = &meowlib.ConversationStatus{}
|
||||
}
|
||||
dbm.Status.Processed = uint64(processedAt)
|
||||
if err := client.UpdateDbMessage(dbm, dbFile, dbId, password); err != nil {
|
||||
return fmt.Errorf("MarkMessageProcessed: UpdateDbMessage: %w", err)
|
||||
}
|
||||
|
||||
peer := client.GetConfig().GetIdentity().Peers.GetFromUid(peerUid)
|
||||
if peer == nil || !peer.SendProcessingAck || dbm.Status.Uuid == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
identity := client.GetConfig().GetIdentity()
|
||||
storagePath := filepath.Join(client.GetConfig().StoragePath, identity.Uuid)
|
||||
return sendProcessingAck(storagePath, peer, dbm.Status.Uuid, processedAt)
|
||||
}
|
||||
|
||||
// sendProcessingAck builds a processing acknowledgment for messageUuid and
|
||||
// enqueues it for sending to the peer's contact pull servers.
|
||||
func sendProcessingAck(storagePath string, peer *client.Peer, messageUuid string, processedAt int64) error {
|
||||
packedMsg, _, err := BuildProcessedMessage(messageUuid, peer.Uid, processedAt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("sendProcessingAck: BuildProcessedMessage: %w", err)
|
||||
}
|
||||
|
||||
data, err := proto.Marshal(packedMsg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("sendProcessingAck: proto.Marshal: %w", err)
|
||||
}
|
||||
|
||||
outboxDir := filepath.Join(storagePath, "outbox")
|
||||
if err := os.MkdirAll(outboxDir, 0700); err != nil {
|
||||
return fmt.Errorf("sendProcessingAck: MkdirAll: %w", err)
|
||||
}
|
||||
|
||||
outboxFile := filepath.Join(outboxDir, "ack_"+uuid.New().String())
|
||||
if err := os.WriteFile(outboxFile, data, 0600); err != nil {
|
||||
return fmt.Errorf("sendProcessingAck: WriteFile: %w", err)
|
||||
}
|
||||
|
||||
var servers []client.Server
|
||||
for _, srvUid := range peer.ContactPullServers {
|
||||
srv, loadErr := client.GetConfig().GetIdentity().MessageServers.LoadServer(srvUid)
|
||||
if loadErr == nil && srv != nil {
|
||||
servers = append(servers, *srv)
|
||||
}
|
||||
}
|
||||
if len(servers) == 0 {
|
||||
os.Remove(outboxFile)
|
||||
return errors.New("sendProcessingAck: no contact servers found")
|
||||
}
|
||||
|
||||
return client.PushSendJob(storagePath, &client.SendJob{
|
||||
Queue: peer.Uid,
|
||||
File: outboxFile,
|
||||
Servers: servers,
|
||||
})
|
||||
}
|
||||
|
||||
// ProcessSentMessages scans every send queue under storagePath/queues/, updates
|
||||
// the message storage entry with server delivery info for each sent job, then
|
||||
// removes the job from the queue. Returns the number of messages updated.
|
||||
//
|
||||
// The message DB location is recovered from the job's File basename, which must
|
||||
// follow the naming convention produced by CreateUserMessageAndSendJob:
|
||||
//
|
||||
// outbox/{dbFile}_{dbId}
|
||||
func ProcessSentMessages(storagePath string) int {
|
||||
password, _ := client.GetConfig().GetMemPass()
|
||||
queueDir := filepath.Join(storagePath, "queues")
|
||||
entries, err := os.ReadDir(queueDir)
|
||||
if err != nil {
|
||||
logger.Warn().Err(err).Str("dir", queueDir).Msg("ProcessSentMessages: ReadDir")
|
||||
return 0
|
||||
}
|
||||
|
||||
updated := 0
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
queue := entry.Name()
|
||||
|
||||
jobs, err := client.GetSentJobs(storagePath, queue)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Str("queue", queue).Msg("ProcessSentMessages: GetSentJobs")
|
||||
continue
|
||||
}
|
||||
|
||||
for _, job := range jobs {
|
||||
if job.SuccessfulServer == nil || job.SentAt == nil {
|
||||
// No delivery info – discard the job so it doesn't block the queue
|
||||
if err := client.DeleteSendJob(storagePath, queue, job.ID); err != nil {
|
||||
logger.Error().Err(err).Int64("id", job.ID).Msg("ProcessSentMessages: DeleteSendJob (incomplete)")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Recover dbFile and dbId from the outbox filename: {dbFile}_{dbId}
|
||||
base := filepath.Base(job.File)
|
||||
sep := strings.LastIndex(base, "_")
|
||||
if sep <= 0 {
|
||||
logger.Error().Int64("id", job.ID).Str("file", job.File).
|
||||
Msg("ProcessSentMessages: cannot parse dbFile/dbId from job filename — use CreateUserMessageAndSendJob to build jobs")
|
||||
continue
|
||||
}
|
||||
dbFile := base[:sep]
|
||||
dbId, parseErr := strconv.ParseInt(base[sep+1:], 10, 64)
|
||||
if parseErr != nil || dbFile == "" || dbId == 0 {
|
||||
logger.Error().Int64("id", job.ID).Str("file", job.File).
|
||||
Msg("ProcessSentMessages: invalid dbFile/dbId in job filename")
|
||||
continue
|
||||
}
|
||||
|
||||
serverUid := job.Servers[*job.SuccessfulServer].GetUid()
|
||||
receiveTime := uint64(job.SentAt.Unix())
|
||||
|
||||
if err := client.SetMessageServerDelivery(dbFile, dbId, serverUid, receiveTime, password); err != nil {
|
||||
logger.Error().Err(err).Str("queue", queue).
|
||||
Str("dbFile", dbFile).Int64("dbId", dbId).
|
||||
Msg("ProcessSentMessages: SetMessageServerDelivery")
|
||||
continue
|
||||
}
|
||||
if err := client.DeleteSendJob(storagePath, queue, job.ID); err != nil {
|
||||
logger.Error().Err(err).Int64("id", job.ID).Msg("ProcessSentMessages: DeleteSendJob")
|
||||
}
|
||||
updated++
|
||||
}
|
||||
}
|
||||
return updated
|
||||
}
|
||||
|
||||
302
client/helpers/messageHelper_test.go
Normal file
302
client/helpers/messageHelper_test.go
Normal file
@@ -0,0 +1,302 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"forge.redroom.link/yves/meowlib"
|
||||
"forge.redroom.link/yves/meowlib/client"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
// setupMsgHelperConfig wires the global client.Config singleton to a fresh
|
||||
// temporary directory and returns it. Original values are restored in t.Cleanup.
|
||||
func setupMsgHelperConfig(t *testing.T) (dir string, id *client.Identity) {
|
||||
t.Helper()
|
||||
dir = t.TempDir()
|
||||
|
||||
cfg := client.GetConfig()
|
||||
origStorage := cfg.StoragePath
|
||||
origSuffix := cfg.DbSuffix
|
||||
origChunk := cfg.Chunksize
|
||||
|
||||
cfg.StoragePath = dir
|
||||
cfg.DbSuffix = ".sqlite"
|
||||
cfg.Chunksize = 1024 * 1024
|
||||
require.NoError(t, cfg.SetMemPass("testpassword"))
|
||||
|
||||
var err error
|
||||
id, err = client.CreateIdentity("testuser")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
cfg.StoragePath = origStorage
|
||||
cfg.DbSuffix = origSuffix
|
||||
cfg.Chunksize = origChunk
|
||||
})
|
||||
return dir, id
|
||||
}
|
||||
|
||||
// newFullyKeyedPeer returns a Peer with all three keypairs and contact keys set,
|
||||
// ready to store messages.
|
||||
func newFullyKeyedPeer(t *testing.T, uid string) *client.Peer {
|
||||
t.Helper()
|
||||
var err error
|
||||
peer := &client.Peer{
|
||||
Uid: uid,
|
||||
Name: "TestPeer-" + uid,
|
||||
}
|
||||
peer.MyIdentity, err = meowlib.NewKeyPair()
|
||||
require.NoError(t, err)
|
||||
peer.MyEncryptionKp, err = meowlib.NewKeyPair()
|
||||
require.NoError(t, err)
|
||||
peer.MyLookupKp, err = meowlib.NewKeyPair()
|
||||
require.NoError(t, err)
|
||||
|
||||
k, err := meowlib.NewKeyPair()
|
||||
require.NoError(t, err)
|
||||
peer.ContactPublicKey = k.Public
|
||||
|
||||
k, err = meowlib.NewKeyPair()
|
||||
require.NoError(t, err)
|
||||
peer.ContactEncryption = k.Public
|
||||
|
||||
k, err = meowlib.NewKeyPair()
|
||||
require.NoError(t, err)
|
||||
peer.ContactLookupKey = k.Public
|
||||
|
||||
return peer
|
||||
}
|
||||
|
||||
// storeTestMessage stores a single outbound message for peer.
|
||||
func storeTestMessage(t *testing.T, peer *client.Peer, text string) {
|
||||
t.Helper()
|
||||
um := &meowlib.UserMessage{
|
||||
Data: []byte(text),
|
||||
From: peer.MyIdentity.Public,
|
||||
Status: &meowlib.ConversationStatus{Uuid: "uuid-" + text},
|
||||
}
|
||||
require.NoError(t, peer.StoreMessage(um, nil))
|
||||
require.NotNil(t, peer.LastMessage, "StoreMessage must set LastMessage")
|
||||
}
|
||||
|
||||
// pushAndMarkSent pushes a send job for the given peer and marks it as delivered
|
||||
// by the given server. Returns the job after the status update.
|
||||
// The outbox file is named {dbFile}_{dbId} so that ProcessSentMessages can
|
||||
// recover the message DB location from the filename, matching the convention
|
||||
// used by CreateUserMessageAndSendJob.
|
||||
func pushAndMarkSent(t *testing.T, dir string, peer *client.Peer, srv client.Server) *client.SendJob {
|
||||
t.Helper()
|
||||
|
||||
dbFile := peer.LastMessage.Dbfile
|
||||
dbId := peer.LastMessage.Dbid
|
||||
|
||||
outboxDir := filepath.Join(dir, "outbox")
|
||||
require.NoError(t, os.MkdirAll(outboxDir, 0700))
|
||||
msgFile := filepath.Join(outboxDir, fmt.Sprintf("%s_%d", dbFile, dbId))
|
||||
require.NoError(t, os.WriteFile(msgFile, []byte("packed-server-message"), 0600))
|
||||
|
||||
require.NoError(t, client.PushSendJob(dir, &client.SendJob{
|
||||
Queue: peer.Uid,
|
||||
File: msgFile,
|
||||
Servers: []client.Server{srv},
|
||||
Timeout: 60,
|
||||
}))
|
||||
|
||||
job, _, err := client.PeekSendJob(dir, peer.Uid)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, job)
|
||||
|
||||
sentAt := time.Now()
|
||||
srvIdx := 0
|
||||
job.Status = client.SendStatusSent
|
||||
job.SentAt = &sentAt
|
||||
job.SuccessfulServer = &srvIdx
|
||||
require.NoError(t, client.UpdateSendJob(dir, peer.Uid, job))
|
||||
|
||||
return job
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// TestProcessSentMessages_UpdatesDeliveryInfo is the main round-trip test.
|
||||
// It verifies that after ProcessSentMessages runs:
|
||||
// - the function returns 1 (one message updated)
|
||||
// - the send job is removed from the queue
|
||||
// - a subsequent LoadMessagesHistory returns ServerDeliveryUuid and
|
||||
// ServerDeliveryTimestamp for the message
|
||||
func TestProcessSentMessages_UpdatesDeliveryInfo(t *testing.T) {
|
||||
dir, id := setupMsgHelperConfig(t)
|
||||
|
||||
peer := newFullyKeyedPeer(t, "peer-uid-main")
|
||||
require.NoError(t, id.Peers.StorePeer(peer))
|
||||
|
||||
storeTestMessage(t, peer, "hello world")
|
||||
|
||||
srv := client.Server{Url: "http://test-server.example"}
|
||||
job := pushAndMarkSent(t, dir, peer, srv)
|
||||
|
||||
// --- call under test ---
|
||||
updated := ProcessSentMessages(dir)
|
||||
|
||||
assert.Equal(t, 1, updated, "exactly one message should be updated")
|
||||
|
||||
// The job must be removed from the queue after processing.
|
||||
jobAfter, err := client.GetSendJob(dir, peer.Uid, job.ID)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, jobAfter, "job should be deleted after processing")
|
||||
|
||||
// Reload message history and verify delivery metadata was persisted.
|
||||
msgs, err := peer.LoadMessagesHistory(0, 0, 50)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, msgs, 1, "expected exactly one message in history")
|
||||
|
||||
assert.Equal(t, srv.GetUid(), msgs[0].ServerDeliveryUuid,
|
||||
"ServerDeliveryUuid should match the server that accepted the message")
|
||||
assert.NotZero(t, msgs[0].ServerDeliveryTimestamp,
|
||||
"ServerDeliveryTimestamp should be set after ProcessSentMessages")
|
||||
assert.Equal(t, uint64(job.SentAt.Unix()), msgs[0].ServerDeliveryTimestamp,
|
||||
"ServerDeliveryTimestamp should match job.SentAt")
|
||||
}
|
||||
|
||||
// TestProcessSentMessages_SkipsJobWithoutDeliveryInfo verifies that a Sent job
|
||||
// missing SentAt or SuccessfulServer is discarded (not counted, not updating
|
||||
// the message DB).
|
||||
func TestProcessSentMessages_SkipsJobWithoutDeliveryInfo(t *testing.T) {
|
||||
dir, id := setupMsgHelperConfig(t)
|
||||
|
||||
peer := newFullyKeyedPeer(t, "peer-uid-incomplete")
|
||||
require.NoError(t, id.Peers.StorePeer(peer))
|
||||
|
||||
storeTestMessage(t, peer, "incomplete job")
|
||||
|
||||
dbFile := peer.LastMessage.Dbfile
|
||||
dbId := peer.LastMessage.Dbid
|
||||
|
||||
outboxDir := filepath.Join(dir, "outbox")
|
||||
require.NoError(t, os.MkdirAll(outboxDir, 0700))
|
||||
msgFile := filepath.Join(outboxDir, fmt.Sprintf("%s_%d", dbFile, dbId))
|
||||
require.NoError(t, os.WriteFile(msgFile, []byte("packed"), 0600))
|
||||
|
||||
require.NoError(t, client.PushSendJob(dir, &client.SendJob{
|
||||
Queue: peer.Uid,
|
||||
File: msgFile,
|
||||
Servers: []client.Server{{Url: "http://test-server.example"}},
|
||||
Timeout: 60,
|
||||
}))
|
||||
|
||||
job, _, err := client.PeekSendJob(dir, peer.Uid)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, job)
|
||||
|
||||
// Mark as Sent but intentionally leave SentAt and SuccessfulServer nil.
|
||||
job.Status = client.SendStatusSent
|
||||
require.NoError(t, client.UpdateSendJob(dir, peer.Uid, job))
|
||||
|
||||
updated := ProcessSentMessages(dir)
|
||||
assert.Equal(t, 0, updated, "incomplete job must not be counted as updated")
|
||||
|
||||
// Message should have no delivery info.
|
||||
msgs, err := peer.LoadMessagesHistory(0, 0, 50)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, msgs, 1)
|
||||
assert.Empty(t, msgs[0].ServerDeliveryUuid, "delivery UUID must not be set")
|
||||
assert.Zero(t, msgs[0].ServerDeliveryTimestamp, "delivery timestamp must not be set")
|
||||
}
|
||||
|
||||
// TestProcessSentMessages_EmptyQueues verifies that an absent or empty queues
|
||||
// directory results in 0 updates without error.
|
||||
func TestProcessSentMessages_EmptyQueues(t *testing.T) {
|
||||
dir, _ := setupMsgHelperConfig(t)
|
||||
// queues/ directory does not exist yet.
|
||||
updated := ProcessSentMessages(dir)
|
||||
assert.Equal(t, 0, updated, "no queues → 0 updates")
|
||||
|
||||
// Also test with the directory present but empty.
|
||||
require.NoError(t, os.MkdirAll(filepath.Join(dir, "queues"), 0700))
|
||||
updated = ProcessSentMessages(dir)
|
||||
assert.Equal(t, 0, updated, "empty queues → 0 updates")
|
||||
}
|
||||
|
||||
// TestProcessSentMessages_UnparseableFilename verifies that a job whose filename
|
||||
// does not follow the {dbFile}_{dbId} convention is skipped with a logged error
|
||||
// and not counted as updated.
|
||||
func TestProcessSentMessages_UnparseableFilename(t *testing.T) {
|
||||
dir, id := setupMsgHelperConfig(t)
|
||||
|
||||
peer := newFullyKeyedPeer(t, "peer-uid-nodbinfo")
|
||||
require.NoError(t, id.Peers.StorePeer(peer))
|
||||
|
||||
storeTestMessage(t, peer, "the real message")
|
||||
|
||||
// A filename with no underscore cannot be parsed as {dbFile}_{dbId}.
|
||||
msgFile := filepath.Join(dir, "badname.bin")
|
||||
require.NoError(t, os.WriteFile(msgFile, []byte("packed"), 0600))
|
||||
|
||||
require.NoError(t, client.PushSendJob(dir, &client.SendJob{
|
||||
Queue: peer.Uid,
|
||||
File: msgFile,
|
||||
Servers: []client.Server{{Url: "http://test-server.example"}},
|
||||
Timeout: 60,
|
||||
}))
|
||||
|
||||
job, _, err := client.PeekSendJob(dir, peer.Uid)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, job)
|
||||
|
||||
sentAt := time.Now()
|
||||
srvIdx := 0
|
||||
job.Status = client.SendStatusSent
|
||||
job.SentAt = &sentAt
|
||||
job.SuccessfulServer = &srvIdx
|
||||
require.NoError(t, client.UpdateSendJob(dir, peer.Uid, job))
|
||||
|
||||
// Must NOT count as updated; the real message row must be untouched.
|
||||
updated := ProcessSentMessages(dir)
|
||||
assert.Equal(t, 0, updated, "job without db info must not be counted as updated")
|
||||
|
||||
msgs, err := peer.LoadMessagesHistory(0, 0, 50)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, msgs, 1)
|
||||
assert.Empty(t, msgs[0].ServerDeliveryUuid, "delivery UUID must not be set")
|
||||
assert.Zero(t, msgs[0].ServerDeliveryTimestamp, "delivery timestamp must not be set")
|
||||
}
|
||||
|
||||
// TestProcessSentMessages_MultipleMessages verifies that all jobs in the same
|
||||
// queue are processed and that each message gets its own delivery info.
|
||||
func TestProcessSentMessages_MultipleMessages(t *testing.T) {
|
||||
dir, id := setupMsgHelperConfig(t)
|
||||
|
||||
peer := newFullyKeyedPeer(t, "peer-uid-multi")
|
||||
require.NoError(t, id.Peers.StorePeer(peer))
|
||||
|
||||
srv := client.Server{Url: "http://test-server.example"}
|
||||
|
||||
const n = 3
|
||||
for i := range n {
|
||||
storeTestMessage(t, peer, fmt.Sprintf("message-%d", i))
|
||||
pushAndMarkSent(t, dir, peer, srv)
|
||||
}
|
||||
|
||||
updated := ProcessSentMessages(dir)
|
||||
assert.Equal(t, n, updated, "all %d messages should be updated", n)
|
||||
|
||||
msgs, err := peer.LoadMessagesHistory(0, 0, 50)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, msgs, n)
|
||||
|
||||
for _, m := range msgs {
|
||||
assert.Equal(t, srv.GetUid(), m.ServerDeliveryUuid,
|
||||
"every message should have ServerDeliveryUuid set")
|
||||
assert.NotZero(t, m.ServerDeliveryTimestamp,
|
||||
"every message should have ServerDeliveryTimestamp set")
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
package helpers
|
||||
|
||||
/*
|
||||
import (
|
||||
"forge.redroom.link/yves/meowlib"
|
||||
"forge.redroom.link/yves/meowlib/client"
|
||||
@@ -28,3 +29,4 @@ func HttpSendMessage(serverUid string, message []byte, timeout int) ([]byte, err
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"math/rand"
|
||||
mrand "math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -13,6 +15,7 @@ import (
|
||||
"forge.redroom.link/yves/meowlib"
|
||||
"github.com/ProtonMail/gopenpgp/v2/helper"
|
||||
"github.com/google/uuid"
|
||||
doubleratchet "github.com/status-im/doubleratchet"
|
||||
)
|
||||
|
||||
const maxHiddenCount = 30
|
||||
@@ -20,7 +23,7 @@ const maxHiddenCount = 30
|
||||
// Package-level random number generator with mutex for thread-safe access
|
||||
var (
|
||||
rngMu sync.Mutex
|
||||
rng = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
rng = mrand.New(mrand.NewSource(time.Now().UnixNano()))
|
||||
)
|
||||
|
||||
type Identity struct {
|
||||
@@ -96,6 +99,11 @@ func (id *Identity) InvitePeer(MyName string, ContactName string, MessageServerU
|
||||
}
|
||||
peer.Name = ContactName
|
||||
peer.InvitationId = uuid.New().String() // todo as param to identify then update url
|
||||
symKeyBytes := make([]byte, 32)
|
||||
if _, err = rand.Read(symKeyBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
peer.MySymKey = base64.StdEncoding.EncodeToString(symKeyBytes)
|
||||
/* if id.MessageServers.Servers == nil {
|
||||
return nil, errors.New("no message servers defined in your identity")
|
||||
}
|
||||
@@ -115,6 +123,21 @@ func (id *Identity) InvitePeer(MyName string, ContactName string, MessageServerU
|
||||
peer.MyPullServers = MessageServerUids
|
||||
peer.MyName = MyName
|
||||
peer.InvitationMessage = InvitationMessage
|
||||
|
||||
// Generate DR keypair and root key for the initiator side
|
||||
drKp, err := doubleratchet.DefaultCrypto{}.GenerateDH()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
peer.DrKpPrivate = base64.StdEncoding.EncodeToString(drKp.PrivateKey())
|
||||
peer.DrKpPublic = base64.StdEncoding.EncodeToString(drKp.PublicKey())
|
||||
drRootKey := make([]byte, 32)
|
||||
if _, err = rand.Read(drRootKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
peer.DrRootKey = base64.StdEncoding.EncodeToString(drRootKey)
|
||||
peer.DrInitiator = true
|
||||
|
||||
id.Peers.StorePeer(&peer)
|
||||
|
||||
return &peer, nil
|
||||
@@ -161,6 +184,7 @@ func (id *Identity) AnswerInvitation(MyName string, ContactName string, MessageS
|
||||
peer.ContactEncryption = ReceivedContact.EncryptionPublicKey
|
||||
peer.ContactLookupKey = ReceivedContact.LookupPublicKey
|
||||
peer.ContactPublicKey = ReceivedContact.ContactPublicKey
|
||||
peer.MySymKey = ReceivedContact.SymetricKey
|
||||
peer.InvitationId = ReceivedContact.InvitationId
|
||||
peer.InvitationMessage = ReceivedContact.InvitationMessage
|
||||
for srv := range ReceivedContact.PullServers {
|
||||
@@ -179,6 +203,10 @@ func (id *Identity) AnswerInvitation(MyName string, ContactName string, MessageS
|
||||
peer.MyPullServers = MessageServerIdxs
|
||||
peer.MyName = MyName
|
||||
peer.InvitationId = ReceivedContact.InvitationId
|
||||
// Adopt DR material from the initiator's ContactCard
|
||||
peer.DrRootKey = ReceivedContact.DrRootKey
|
||||
peer.ContactDrPublicKey = ReceivedContact.DrPublicKey
|
||||
peer.DrInitiator = false
|
||||
id.Peers.StorePeer(&peer)
|
||||
|
||||
return &peer, nil
|
||||
|
||||
@@ -9,7 +9,7 @@ type InternalUserMessage struct {
|
||||
Status *meowlib.ConversationStatus `json:"conversation_status,omitempty"`
|
||||
Contact *meowlib.ContactCard `json:"contact,omitempty"`
|
||||
ServerDeliveryUuid string `json:"server_delivery_uuid,omitempty"`
|
||||
ServerDeliveryTimestamp int64 `json:"server_delivery_timestamp,omitempty"`
|
||||
ServerDeliveryTimestamp uint64 `json:"server_delivery_timestamp,omitempty"`
|
||||
//Group group
|
||||
FilePaths []string `json:"file_paths,omitempty"`
|
||||
CurrentLocation *meowlib.Location `json:"current_location,omitempty"`
|
||||
@@ -44,6 +44,10 @@ func ProcessOutboundTextMessage(peer *Peer, text string, srv *Server) ([]byte, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Persist peer to save updated DR state (DrStateJson)
|
||||
if peer.DrRootKey != "" {
|
||||
GetConfig().GetIdentity().Peers.StorePeer(peer)
|
||||
}
|
||||
// Creating Server message for transporting the user message
|
||||
toServerMessage := srv.BuildToServerMessageFromUserMessage(packedMsg)
|
||||
return srv.ProcessOutboundMessage(toServerMessage)
|
||||
|
||||
@@ -2,6 +2,7 @@ package client
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -12,7 +13,7 @@ import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func StoreMessage(peer *Peer, usermessage *meowlib.UserMessage, filenames []string, password string) error {
|
||||
func storeMessage(peer *Peer, usermessage *meowlib.UserMessage, filenames []string, password string) error {
|
||||
var dbid string
|
||||
cfg := GetConfig()
|
||||
identity := cfg.GetIdentity()
|
||||
@@ -106,7 +107,7 @@ func StoreMessage(peer *Peer, usermessage *meowlib.UserMessage, filenames []stri
|
||||
}
|
||||
|
||||
// Get new messages from a peer
|
||||
func GetNewMessages(peer *Peer, lastDbId int, password string) ([]*InternalUserMessage, error) {
|
||||
func loadNewMessages(peer *Peer, lastDbId int, password string) ([]*InternalUserMessage, error) {
|
||||
var messages []*InternalUserMessage
|
||||
cfg := GetConfig()
|
||||
identity := cfg.GetIdentity()
|
||||
@@ -164,7 +165,7 @@ func GetNewMessages(peer *Peer, lastDbId int, password string) ([]*InternalUserM
|
||||
}
|
||||
|
||||
// Get old messages from a peer
|
||||
func GetMessagesHistory(peer *Peer, inAppMsgCount int, lastDbId int, wantMore int, password string) ([]InternalUserMessage, error) {
|
||||
func loadMessagesHistory(peer *Peer, inAppMsgCount int, lastDbId int, wantMore int, password string) ([]InternalUserMessage, error) {
|
||||
var messages []InternalUserMessage
|
||||
// handle no db yet
|
||||
if len(peer.DbIds) == 0 {
|
||||
@@ -256,8 +257,9 @@ func GetDbMessage(dbFile string, dbId int64, password string) (*meowlib.DbMessag
|
||||
}
|
||||
defer rows.Close()
|
||||
var dbm meowlib.DbMessage
|
||||
found := false
|
||||
for rows.Next() {
|
||||
|
||||
found = true
|
||||
var id int64
|
||||
var m []byte
|
||||
err = rows.Scan(&id, &m)
|
||||
@@ -272,7 +274,9 @@ func GetDbMessage(dbFile string, dbId int64, password string) (*meowlib.DbMessag
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
if !found {
|
||||
return nil, fmt.Errorf("message row %d not found in %s", dbId, dbFile)
|
||||
}
|
||||
return &dbm, nil
|
||||
}
|
||||
@@ -353,6 +357,78 @@ func getMessageCount(dbid string) (int, error) {
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// SetMessageServerDelivery updates the server delivery UUID and timestamp for an existing stored message.
|
||||
func SetMessageServerDelivery(dbFile string, dbId int64, serverUid string, receiveTime uint64, password string) error {
|
||||
dbm, err := GetDbMessage(dbFile, dbId, password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbm.ServerDeliveryUuid = serverUid
|
||||
dbm.ServerDeliveryTimestamp = receiveTime
|
||||
return UpdateDbMessage(dbm, dbFile, dbId, password)
|
||||
}
|
||||
|
||||
// FindMessageByUuid scans all DB files for a peer (newest first) and returns
|
||||
// the dbFile, row ID, and DbMessage for the message whose Status.Uuid matches.
|
||||
func FindMessageByUuid(peer *Peer, messageUuid string, password string) (string, int64, *meowlib.DbMessage, error) {
|
||||
cfg := GetConfig()
|
||||
identity := cfg.GetIdentity()
|
||||
for i := len(peer.DbIds) - 1; i >= 0; i-- {
|
||||
dbid := peer.DbIds[i]
|
||||
db, err := sql.Open("sqlite3", filepath.Join(cfg.StoragePath, identity.Uuid, dbid+GetConfig().DbSuffix))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
rows, err := db.Query("SELECT id, m FROM message ORDER BY id DESC")
|
||||
if err != nil {
|
||||
db.Close()
|
||||
continue
|
||||
}
|
||||
for rows.Next() {
|
||||
var id int64
|
||||
var m []byte
|
||||
if err := rows.Scan(&id, &m); err != nil {
|
||||
continue
|
||||
}
|
||||
decdata, err := meowlib.SymDecrypt(password, m)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var dbm meowlib.DbMessage
|
||||
if err := proto.Unmarshal(decdata, &dbm); err != nil {
|
||||
continue
|
||||
}
|
||||
if dbm.Status != nil && dbm.Status.Uuid == messageUuid {
|
||||
rows.Close()
|
||||
db.Close()
|
||||
return dbid, id, &dbm, nil
|
||||
}
|
||||
}
|
||||
rows.Close()
|
||||
db.Close()
|
||||
}
|
||||
return "", 0, nil, fmt.Errorf("message with UUID %s not found", messageUuid)
|
||||
}
|
||||
|
||||
// UpdateMessageAck finds a stored outbound message by UUID and stamps it with
|
||||
// the received and/or processed timestamps from an inbound ACK message.
|
||||
func UpdateMessageAck(peer *Peer, messageUuid string, receivedAt uint64, processedAt uint64, password string) error {
|
||||
dbFile, dbId, dbm, err := FindMessageByUuid(peer, messageUuid, password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dbm.Status == nil {
|
||||
dbm.Status = &meowlib.ConversationStatus{}
|
||||
}
|
||||
if receivedAt != 0 {
|
||||
dbm.Status.Received = receivedAt
|
||||
}
|
||||
if processedAt != 0 {
|
||||
dbm.Status.Processed = processedAt
|
||||
}
|
||||
return UpdateDbMessage(dbm, dbFile, dbId, password)
|
||||
}
|
||||
|
||||
func createMessageTable(db *sql.DB) error {
|
||||
createMessageTableSQL := `CREATE TABLE message (
|
||||
"id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||
|
||||
@@ -22,11 +22,11 @@ func TestStoreMessage(t *testing.T) {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = StoreMessage(peers[0], &um, []string{}, password)
|
||||
err = storeMessage(peers[0], &um, []string{}, password)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
messages, err := GetMessagesHistory(peers[0], 0, 0, 10, password)
|
||||
messages, err := loadMessagesHistory(peers[0], 0, 0, 10, password)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -56,7 +56,7 @@ func TestManyStoreMessage(t *testing.T) {
|
||||
}
|
||||
peers, err := id.Peers.GetPeers()
|
||||
// test with zero messages
|
||||
messages, err := GetMessagesHistory(peers[0], 0, 0, 10, password)
|
||||
messages, err := loadMessagesHistory(peers[0], 0, 0, 10, password)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -64,12 +64,12 @@ func TestManyStoreMessage(t *testing.T) {
|
||||
for i := 1; i < 100; i++ {
|
||||
var um meowlib.UserMessage
|
||||
um.Data = []byte(randomLenString(20, 200))
|
||||
err := StoreMessage(peers[0], &um, []string{}, password)
|
||||
err := storeMessage(peers[0], &um, []string{}, password)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
messages, err = GetMessagesHistory(peers[0], 0, 0, 10, password)
|
||||
messages, err = loadMessagesHistory(peers[0], 0, 0, 10, password)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"forge.redroom.link/yves/meowlib"
|
||||
doubleratchet "github.com/status-im/doubleratchet"
|
||||
"github.com/google/uuid"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
@@ -26,6 +28,7 @@ type Peer struct {
|
||||
MyIdentity *meowlib.KeyPair `json:"my_identity,omitempty"`
|
||||
MyEncryptionKp *meowlib.KeyPair `json:"my_encryption_kp,omitempty"`
|
||||
MyLookupKp *meowlib.KeyPair `json:"my_lookup_kp,omitempty"`
|
||||
MySymKey string `json:"my_sym_key,omitempty"`
|
||||
MyPullServers []string `json:"my_pull_servers,omitempty"`
|
||||
// Peer keys and infos
|
||||
//Contact meowlib.ContactCard `json:"contact,omitempty"` // todo : remove
|
||||
@@ -40,6 +43,8 @@ type Peer struct {
|
||||
LastMessage *InternalUserMessage `json:"last_message,omitempty"`
|
||||
// Internal management attributes
|
||||
Visible bool `json:"visible,omitempty"`
|
||||
SendDeliveryAck bool `json:"send_delivery_ack,omitempty"`
|
||||
SendProcessingAck bool `json:"send_processing_ack,omitempty"`
|
||||
VisiblePassword string `json:"visible_password,omitempty"`
|
||||
PasswordType string `json:"password_type,omitempty"`
|
||||
Blocked bool `json:"blocked,omitempty"`
|
||||
@@ -51,7 +56,14 @@ type Peer struct {
|
||||
DbIds []string `json:"db_ids,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
PersonnaeDbId string `json:"personnae_db_id,omitempty"`
|
||||
dbPassword string
|
||||
// Double Ratchet state
|
||||
DrKpPublic string `json:"dr_kp_public,omitempty"`
|
||||
DrKpPrivate string `json:"dr_kp_private,omitempty"`
|
||||
DrRootKey string `json:"dr_root_key,omitempty"`
|
||||
DrInitiator bool `json:"dr_initiator,omitempty"`
|
||||
ContactDrPublicKey string `json:"contact_dr_public_key,omitempty"`
|
||||
DrStateJson string `json:"dr_state_json,omitempty"`
|
||||
dbPassword string
|
||||
}
|
||||
|
||||
//
|
||||
@@ -70,6 +82,9 @@ func (p *Peer) GetMyContact() *meowlib.ContactCard {
|
||||
c.InvitationId = p.InvitationId
|
||||
c.InvitationMessage = p.InvitationMessage
|
||||
c.Name = p.MyName
|
||||
c.SymetricKey = p.MySymKey
|
||||
c.DrRootKey = p.DrRootKey
|
||||
c.DrPublicKey = p.DrKpPublic
|
||||
return &c
|
||||
}
|
||||
|
||||
@@ -196,6 +211,24 @@ func (p *Peer) DeserializeUserMessage(data []byte) (*meowlib.UserMessage, error)
|
||||
return &msg, nil
|
||||
}
|
||||
|
||||
// SymEncryptPayload applies the shared symmetric key over already-encrypted data.
|
||||
// If MySymKey is empty, data is returned unchanged (peer has no symkey configured).
|
||||
func (p *Peer) SymEncryptPayload(data []byte) ([]byte, error) {
|
||||
if p.MySymKey == "" {
|
||||
return data, nil
|
||||
}
|
||||
return meowlib.SymEncrypt(p.MySymKey, data)
|
||||
}
|
||||
|
||||
// SymDecryptPayload removes the outer symmetric encryption layer.
|
||||
// If MySymKey is empty, data is returned unchanged.
|
||||
func (p *Peer) SymDecryptPayload(data []byte) ([]byte, error) {
|
||||
if p.MySymKey == "" {
|
||||
return data, nil
|
||||
}
|
||||
return meowlib.SymDecrypt(p.MySymKey, data)
|
||||
}
|
||||
|
||||
// AsymEncryptMessage prepares a message to send to a specific peer contact
|
||||
func (p *Peer) AsymEncryptMessage(Message []byte) (*meowlib.EncryptedMessage, error) {
|
||||
var enc *meowlib.EncryptedMessage
|
||||
@@ -256,19 +289,66 @@ func (p *Peer) ProcessOutboundUserMessage(usermessage *meowlib.UserMessage) (*me
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Encrypting it
|
||||
// Asymmetric encryption + signature (inner layer)
|
||||
enc, err := p.AsymEncryptMessage(serializedMessage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Packing it
|
||||
packedMsg := p.PackUserMessage(enc.Data, enc.Signature)
|
||||
// Symmetric encryption (middle layer, if symkey is configured)
|
||||
symEncrypted, err := p.SymEncryptPayload(enc.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Double Ratchet encryption (outermost layer, if DR is configured)
|
||||
if p.DrRootKey != "" {
|
||||
session, err := p.GetDRSession()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
drMsg, err := session.RatchetEncrypt(symEncrypted, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headerBytes, err := json.Marshal(drMsg.Header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
packed := p.PackUserMessage(drMsg.Ciphertext, enc.Signature)
|
||||
packed.DrHeader = headerBytes
|
||||
return packed, nil
|
||||
}
|
||||
// No DR layer
|
||||
packedMsg := p.PackUserMessage(symEncrypted, enc.Signature)
|
||||
return packedMsg, nil
|
||||
}
|
||||
|
||||
// ProcessInboundUserMessage is a helper function that decrypts and deserializes a user message
|
||||
func (p *Peer) ProcessInboundUserMessage(message []byte, signature []byte) (*meowlib.UserMessage, error) {
|
||||
dec, err := p.AsymDecryptMessage(message, signature)
|
||||
func (p *Peer) ProcessInboundUserMessage(packed *meowlib.PackedUserMessage) (*meowlib.UserMessage, error) {
|
||||
payload := packed.Payload
|
||||
// Double Ratchet decryption (outermost layer), only when DR is configured and header present
|
||||
if p.DrRootKey != "" && len(packed.DrHeader) > 0 {
|
||||
session, err := p.GetDRSession()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var header doubleratchet.MessageHeader
|
||||
if err := json.Unmarshal(packed.DrHeader, &header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload, err = session.RatchetDecrypt(
|
||||
doubleratchet.Message{Header: header, Ciphertext: packed.Payload},
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Symmetric decryption (middle layer, if symkey is configured)
|
||||
symDecrypted, err := p.SymDecryptPayload(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dec, err := p.AsymDecryptMessage(symDecrypted, packed.Signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -300,7 +380,7 @@ func (p *Peer) StoreMessage(msg *meowlib.UserMessage, filenames []string) error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return StoreMessage(p, msg, filenames, password)
|
||||
return storeMessage(p, msg, filenames, password)
|
||||
}
|
||||
|
||||
func (p *Peer) GetFilePreview(filename string) ([]byte, error) {
|
||||
@@ -320,7 +400,7 @@ func (p *Peer) LoadMessagesHistory(alreadyLoadedCount int, oldestMessageId int,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return GetMessagesHistory(p, alreadyLoadedCount, oldestMessageId, qty, password)
|
||||
return loadMessagesHistory(p, alreadyLoadedCount, oldestMessageId, qty, password)
|
||||
|
||||
}
|
||||
|
||||
@@ -329,7 +409,7 @@ func (p *Peer) LoadNewMessages(lastMessageId int) ([]*InternalUserMessage, error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return GetNewMessages(p, lastMessageId, password)
|
||||
return loadNewMessages(p, lastMessageId, password)
|
||||
}
|
||||
|
||||
func (p *Peer) LoadMessage(uid string) (*InternalUserMessage, error) {
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"forge.redroom.link/yves/meowlib"
|
||||
doubleratchet "github.com/status-im/doubleratchet"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -421,7 +424,7 @@ func TestProcessOutboundInbound_RoundTrip(t *testing.T) {
|
||||
assert.NotEmpty(t, packed.Signature)
|
||||
assert.Equal(t, bob.MyLookupKp.Public, packed.Destination)
|
||||
|
||||
received, err := bob.ProcessInboundUserMessage(packed.Payload, packed.Signature)
|
||||
received, err := bob.ProcessInboundUserMessage(packed)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte("end to end test"), received.Data)
|
||||
assert.Equal(t, alice.MyIdentity.Public, received.From)
|
||||
@@ -436,7 +439,7 @@ func TestProcessOutboundInbound_EmptyMessage(t *testing.T) {
|
||||
packed, err := alice.ProcessOutboundUserMessage(userMsg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
received, err := bob.ProcessInboundUserMessage(packed.Payload, packed.Signature)
|
||||
received, err := bob.ProcessInboundUserMessage(packed)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, received.Data)
|
||||
}
|
||||
@@ -452,6 +455,74 @@ func TestProcessOutboundUserMessage_InvalidKey(t *testing.T) {
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// DR-encrypted round-trip
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func makeDRPeerPair(t *testing.T) (alice *Peer, bob *Peer) {
|
||||
t.Helper()
|
||||
alice, bob = makePeerPair(t)
|
||||
|
||||
// Generate DR keypair for alice (initiator)
|
||||
drKp, err := doubleratchet.DefaultCrypto{}.GenerateDH()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
drRootKeyBytes := make([]byte, 32)
|
||||
if _, err = rand.Read(drRootKeyBytes); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
drRootKey := base64.StdEncoding.EncodeToString(drRootKeyBytes)
|
||||
|
||||
alice.DrKpPrivate = base64.StdEncoding.EncodeToString(drKp.PrivateKey())
|
||||
alice.DrKpPublic = base64.StdEncoding.EncodeToString(drKp.PublicKey())
|
||||
alice.DrRootKey = drRootKey
|
||||
alice.DrInitiator = true
|
||||
|
||||
bob.DrRootKey = drRootKey
|
||||
bob.ContactDrPublicKey = alice.DrKpPublic
|
||||
bob.DrInitiator = false
|
||||
|
||||
return alice, bob
|
||||
}
|
||||
|
||||
func TestProcessOutboundInbound_DR_RoundTrip(t *testing.T) {
|
||||
alice, bob := makeDRPeerPair(t)
|
||||
|
||||
userMsg, err := alice.BuildSimpleUserMessage([]byte("dr round trip test"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
packed, err := alice.ProcessOutboundUserMessage(userMsg)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, packed.DrHeader, "DR header should be set")
|
||||
|
||||
received, err := bob.ProcessInboundUserMessage(packed)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte("dr round trip test"), received.Data)
|
||||
|
||||
// Verify DR state was updated
|
||||
assert.NotEmpty(t, alice.DrStateJson, "alice DR state should be persisted")
|
||||
assert.NotEmpty(t, bob.DrStateJson, "bob DR state should be persisted")
|
||||
}
|
||||
|
||||
func TestProcessOutboundInbound_DR_MultipleMessages(t *testing.T) {
|
||||
alice, bob := makeDRPeerPair(t)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
msg := []byte("message " + strconv.Itoa(i))
|
||||
userMsg, err := alice.BuildSimpleUserMessage(msg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
packed, err := alice.ProcessOutboundUserMessage(userMsg)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, packed.DrHeader)
|
||||
|
||||
received, err := bob.ProcessInboundUserMessage(packed)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, msg, received.Data)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// GetConversationRequest
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@@ -238,6 +238,9 @@ func (ps *PeerStorage) FinalizeInvitation(ReceivedContact *meowlib.ContactCard)
|
||||
ps.cache[i].ContactEncryption = ReceivedContact.EncryptionPublicKey
|
||||
ps.cache[i].ContactLookupKey = ReceivedContact.LookupPublicKey
|
||||
ps.cache[i].ContactPublicKey = ReceivedContact.ContactPublicKey
|
||||
if ps.cache[i].MySymKey == "" {
|
||||
ps.cache[i].MySymKey = ReceivedContact.SymetricKey
|
||||
}
|
||||
srvs := []string{}
|
||||
for srv := range ReceivedContact.PullServers {
|
||||
srvs = append(srvs, ReceivedContact.PullServers[srv].GetUid())
|
||||
|
||||
357
client/sendjobs.go
Normal file
357
client/sendjobs.go
Normal file
@@ -0,0 +1,357 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
// SendStatus represents the delivery state of a queued send job.
|
||||
type SendStatus int
|
||||
|
||||
const (
|
||||
SendStatusPending SendStatus = 0 // waiting to be sent
|
||||
SendStatusSent SendStatus = 1 // successfully delivered
|
||||
SendStatusFailed SendStatus = 2 // all servers exhausted or timed out
|
||||
)
|
||||
|
||||
// SendJob describes a message to send, together with its delivery tracking state.
|
||||
//
|
||||
// The File field holds the path to an outbox file written by CreateUserMessageAndSendJob.
|
||||
// It must follow the naming convention outbox/{dbFile}_{dbId} so that
|
||||
// ProcessSentMessages can recover the message DB location from the filename alone.
|
||||
// Servers is tried in order; after MaxRetriesPerServer failures on one server
|
||||
// the next one is attempted.
|
||||
//
|
||||
// Tracking fields (ID, InsertedAt, Status, SentAt, Retries, SuccessfulServer)
|
||||
// are managed by the queue functions and must not be set by the caller.
|
||||
type SendJob struct {
|
||||
// --- caller-supplied fields ---
|
||||
Queue string `json:"queue,omitempty"` // uid of destination peer, used for naming the queue sqlite db
|
||||
File string `json:"file,omitempty"` // outbox file path; basename must be {dbFile}_{dbId}
|
||||
Servers []Server `json:"servers,omitempty"`
|
||||
Timeout int `json:"timeout,omitempty"` // seconds; 0 = no timeout
|
||||
|
||||
// --- DB-managed tracking fields (not serialised by the caller) ---
|
||||
ID int64
|
||||
InsertedAt time.Time
|
||||
Status SendStatus
|
||||
SentAt *time.Time
|
||||
Retries []int // retry count per server index
|
||||
SuccessfulServer *int // index into Servers of the server that accepted
|
||||
}
|
||||
|
||||
func sendQueueDbPath(storagePath, queue string) string {
|
||||
return filepath.Join(storagePath, "queues", queue)
|
||||
}
|
||||
|
||||
func openOrCreateSendQueue(dbPath string) (*sql.DB, error) {
|
||||
dir := filepath.Dir(dbPath)
|
||||
if _, err := os.Stat(dir); os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
|
||||
f, err := os.Create(dbPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
db, err := sql.Open("sqlite3", dbPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS queue (
|
||||
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||
file TEXT NOT NULL,
|
||||
servers TEXT NOT NULL,
|
||||
timeout INTEGER NOT NULL DEFAULT 0,
|
||||
inserted_at INTEGER NOT NULL,
|
||||
status INTEGER NOT NULL DEFAULT 0,
|
||||
sent_at INTEGER,
|
||||
retries TEXT NOT NULL DEFAULT '[]',
|
||||
successful_server INTEGER
|
||||
)`)
|
||||
if err != nil {
|
||||
db.Close()
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// PushSendJob appends a SendJob to the SQLite queue identified by job.Queue inside storagePath.
|
||||
// The initial retry counters are set to zero for each server.
|
||||
func PushSendJob(storagePath string, job *SendJob) error {
|
||||
db, err := openOrCreateSendQueue(sendQueueDbPath(storagePath, job.Queue))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
serversJSON, err := json.Marshal(job.Servers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
retriesJSON, err := json.Marshal(make([]int, len(job.Servers)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = db.Exec(
|
||||
`INSERT INTO queue(file, servers, timeout, inserted_at, status, retries)
|
||||
VALUES(?,?,?,?,?,?)`,
|
||||
job.File, string(serversJSON), job.Timeout, time.Now().Unix(), SendStatusPending, string(retriesJSON),
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
// PeekSendJob returns the oldest pending SendJob from the named queue.
|
||||
// Returns nil, 0, nil when the queue has no pending jobs.
|
||||
func PeekSendJob(storagePath, queue string) (*SendJob, int64, error) {
|
||||
db, err := openOrCreateSendQueue(sendQueueDbPath(storagePath, queue))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
var (
|
||||
id int64
|
||||
file string
|
||||
serversJSON string
|
||||
timeout int
|
||||
insertedAt int64
|
||||
status SendStatus
|
||||
sentAt sql.NullInt64
|
||||
retriesJSON string
|
||||
successfulServer sql.NullInt64
|
||||
)
|
||||
err = db.QueryRow(
|
||||
`SELECT id, file, servers, timeout, inserted_at, status, sent_at, retries, successful_server
|
||||
FROM queue WHERE status = ? ORDER BY id ASC LIMIT 1`,
|
||||
SendStatusPending,
|
||||
).Scan(&id, &file, &serversJSON, &timeout, &insertedAt, &status, &sentAt, &retriesJSON, &successfulServer)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, 0, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
var servers []Server
|
||||
if err := json.Unmarshal([]byte(serversJSON), &servers); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
var retries []int
|
||||
if err := json.Unmarshal([]byte(retriesJSON), &retries); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
job := &SendJob{
|
||||
ID: id,
|
||||
Queue: queue,
|
||||
File: file,
|
||||
Servers: servers,
|
||||
Timeout: timeout,
|
||||
InsertedAt: time.Unix(insertedAt, 0),
|
||||
Status: status,
|
||||
Retries: retries,
|
||||
}
|
||||
if sentAt.Valid {
|
||||
t := time.Unix(sentAt.Int64, 0)
|
||||
job.SentAt = &t
|
||||
}
|
||||
if successfulServer.Valid {
|
||||
v := int(successfulServer.Int64)
|
||||
job.SuccessfulServer = &v
|
||||
}
|
||||
return job, id, nil
|
||||
}
|
||||
|
||||
// UpdateSendJob persists the tracking fields (status, sent_at, retries, successful_server)
|
||||
// for a job that was previously returned by PeekSendJob.
|
||||
func UpdateSendJob(storagePath, queue string, job *SendJob) error {
|
||||
db, err := openOrCreateSendQueue(sendQueueDbPath(storagePath, queue))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
retriesJSON, err := json.Marshal(job.Retries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var sentAt any
|
||||
if job.SentAt != nil {
|
||||
sentAt = job.SentAt.Unix()
|
||||
}
|
||||
var successfulServer any
|
||||
if job.SuccessfulServer != nil {
|
||||
successfulServer = *job.SuccessfulServer
|
||||
}
|
||||
_, err = db.Exec(
|
||||
`UPDATE queue SET status=?, sent_at=?, retries=?, successful_server=? WHERE id=?`,
|
||||
job.Status, sentAt, string(retriesJSON), successfulServer, job.ID,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetSendJob retrieves any job by row id regardless of its status.
|
||||
// Returns nil, nil when no row with that id exists.
|
||||
func GetSendJob(storagePath, queue string, id int64) (*SendJob, error) {
|
||||
db, err := openOrCreateSendQueue(sendQueueDbPath(storagePath, queue))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
var (
|
||||
file string
|
||||
serversJSON string
|
||||
timeout int
|
||||
insertedAt int64
|
||||
status SendStatus
|
||||
sentAt sql.NullInt64
|
||||
retriesJSON string
|
||||
successfulServer sql.NullInt64
|
||||
)
|
||||
err = db.QueryRow(
|
||||
`SELECT file, servers, timeout, inserted_at, status, sent_at, retries, successful_server
|
||||
FROM queue WHERE id = ?`,
|
||||
id,
|
||||
).Scan(&file, &serversJSON, &timeout, &insertedAt, &status, &sentAt, &retriesJSON, &successfulServer)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var servers []Server
|
||||
if err := json.Unmarshal([]byte(serversJSON), &servers); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var retries []int
|
||||
if err := json.Unmarshal([]byte(retriesJSON), &retries); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
job := &SendJob{
|
||||
ID: id,
|
||||
Queue: queue,
|
||||
File: file,
|
||||
Servers: servers,
|
||||
Timeout: timeout,
|
||||
InsertedAt: time.Unix(insertedAt, 0),
|
||||
Status: status,
|
||||
Retries: retries,
|
||||
}
|
||||
if sentAt.Valid {
|
||||
t := time.Unix(sentAt.Int64, 0)
|
||||
job.SentAt = &t
|
||||
}
|
||||
if successfulServer.Valid {
|
||||
v := int(successfulServer.Int64)
|
||||
job.SuccessfulServer = &v
|
||||
}
|
||||
return job, nil
|
||||
}
|
||||
|
||||
// GetSentJobs returns all successfully-sent jobs from the named queue,
|
||||
// ordered oldest first. Use this to reconcile delivery status with the
|
||||
// message store and clean up completed entries.
|
||||
func GetSentJobs(storagePath, queue string) ([]*SendJob, error) {
|
||||
db, err := openOrCreateSendQueue(sendQueueDbPath(storagePath, queue))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
rows, err := db.Query(
|
||||
`SELECT id, file, servers, timeout, inserted_at, sent_at, retries, successful_server
|
||||
FROM queue WHERE status = ? ORDER BY id ASC`,
|
||||
SendStatusSent,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var jobs []*SendJob
|
||||
for rows.Next() {
|
||||
var (
|
||||
id int64
|
||||
file string
|
||||
serversJSON string
|
||||
timeout int
|
||||
insertedAt int64
|
||||
sentAt sql.NullInt64
|
||||
retriesJSON string
|
||||
successfulServer sql.NullInt64
|
||||
)
|
||||
if err := rows.Scan(&id, &file, &serversJSON, &timeout, &insertedAt, &sentAt, &retriesJSON, &successfulServer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var servers []Server
|
||||
if err := json.Unmarshal([]byte(serversJSON), &servers); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var retries []int
|
||||
if err := json.Unmarshal([]byte(retriesJSON), &retries); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
job := &SendJob{
|
||||
ID: id,
|
||||
Queue: queue,
|
||||
File: file,
|
||||
Servers: servers,
|
||||
Timeout: timeout,
|
||||
InsertedAt: time.Unix(insertedAt, 0),
|
||||
Status: SendStatusSent,
|
||||
Retries: retries,
|
||||
}
|
||||
if sentAt.Valid {
|
||||
t := time.Unix(sentAt.Int64, 0)
|
||||
job.SentAt = &t
|
||||
}
|
||||
if successfulServer.Valid {
|
||||
v := int(successfulServer.Int64)
|
||||
job.SuccessfulServer = &v
|
||||
}
|
||||
jobs = append(jobs, job)
|
||||
}
|
||||
return jobs, nil
|
||||
}
|
||||
|
||||
// DeleteSendJob removes a row by id from the named queue.
|
||||
// If the queue is empty after deletion, the DB file is removed.
|
||||
func DeleteSendJob(storagePath, queue string, id int64) error {
|
||||
dbPath := sendQueueDbPath(storagePath, queue)
|
||||
db, err := openOrCreateSendQueue(dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = db.Exec(`DELETE FROM queue WHERE id=?`, id); err != nil {
|
||||
db.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
var count int
|
||||
if err = db.QueryRow(`SELECT COUNT(*) FROM queue`).Scan(&count); err != nil {
|
||||
db.Close()
|
||||
return err
|
||||
}
|
||||
db.Close()
|
||||
|
||||
if count == 0 {
|
||||
return os.Remove(dbPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
184
client/sendjobs_test.go
Normal file
184
client/sendjobs_test.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// helpers ----------------------------------------------------------------
|
||||
|
||||
func makeServers(urls ...string) []Server {
|
||||
out := make([]Server, len(urls))
|
||||
for i, u := range urls {
|
||||
out[i] = Server{Url: u}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func pushJob(t *testing.T, dir, queue, file string, servers []Server, timeout int) {
|
||||
t.Helper()
|
||||
require.NoError(t, PushSendJob(dir, &SendJob{
|
||||
Queue: queue,
|
||||
File: file,
|
||||
Servers: servers,
|
||||
Timeout: timeout,
|
||||
}))
|
||||
}
|
||||
|
||||
// tests ------------------------------------------------------------------
|
||||
|
||||
func TestPushAndPeekSendJob(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
servers := makeServers("http://s1.example", "http://s2.example")
|
||||
pushJob(t, dir, "q1", "/tmp/msg", servers, 60)
|
||||
|
||||
got, id, err := PeekSendJob(dir, "q1")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, got)
|
||||
|
||||
assert.Greater(t, id, int64(0))
|
||||
assert.Equal(t, "/tmp/msg", got.File)
|
||||
assert.Equal(t, 60, got.Timeout)
|
||||
assert.Equal(t, SendStatusPending, got.Status)
|
||||
assert.Nil(t, got.SentAt)
|
||||
assert.Nil(t, got.SuccessfulServer)
|
||||
assert.Len(t, got.Retries, 2)
|
||||
assert.Equal(t, 0, got.Retries[0])
|
||||
assert.Equal(t, 0, got.Retries[1])
|
||||
assert.WithinDuration(t, time.Now(), got.InsertedAt, 5*time.Second)
|
||||
}
|
||||
|
||||
func TestPeekSendJob_EmptyQueue(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
got, id, err := PeekSendJob(dir, "empty")
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, got)
|
||||
assert.Equal(t, int64(0), id)
|
||||
}
|
||||
|
||||
func TestPeekSendJob_OldestFirst(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
for _, f := range []string{"/a", "/b", "/c"} {
|
||||
pushJob(t, dir, "q1", f, makeServers("http://s1"), 0)
|
||||
}
|
||||
|
||||
got, _, err := PeekSendJob(dir, "q1")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, got)
|
||||
assert.Equal(t, "/a", got.File)
|
||||
}
|
||||
|
||||
func TestPeekSendJob_SkipsNonPending(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
for _, f := range []string{"/a", "/b", "/c"} {
|
||||
pushJob(t, dir, "q1", f, makeServers("http://s1"), 0)
|
||||
}
|
||||
|
||||
// mark first as sent
|
||||
first, _, err := PeekSendJob(dir, "q1")
|
||||
require.NoError(t, err)
|
||||
first.Status = SendStatusSent
|
||||
require.NoError(t, UpdateSendJob(dir, "q1", first))
|
||||
|
||||
// mark second as failed
|
||||
second, _, err := PeekSendJob(dir, "q1")
|
||||
require.NoError(t, err)
|
||||
second.Status = SendStatusFailed
|
||||
require.NoError(t, UpdateSendJob(dir, "q1", second))
|
||||
|
||||
// only /c is still pending
|
||||
got, _, err := PeekSendJob(dir, "q1")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, got)
|
||||
assert.Equal(t, "/c", got.File)
|
||||
}
|
||||
|
||||
func TestUpdateSendJob_Sent(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
pushJob(t, dir, "q1", "/tmp/f", makeServers("http://s1"), 10)
|
||||
|
||||
job, id, err := PeekSendJob(dir, "q1")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, job)
|
||||
|
||||
now := time.Now()
|
||||
srvIdx := 0
|
||||
job.Status = SendStatusSent
|
||||
job.SentAt = &now
|
||||
job.SuccessfulServer = &srvIdx
|
||||
require.NoError(t, UpdateSendJob(dir, "q1", job))
|
||||
|
||||
// persisted correctly
|
||||
got, err := GetSendJob(dir, "q1", id)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, got)
|
||||
assert.Equal(t, SendStatusSent, got.Status)
|
||||
assert.NotNil(t, got.SentAt)
|
||||
assert.WithinDuration(t, now, *got.SentAt, time.Second)
|
||||
require.NotNil(t, got.SuccessfulServer)
|
||||
assert.Equal(t, 0, *got.SuccessfulServer)
|
||||
|
||||
// no more pending jobs
|
||||
pending, _, err := PeekSendJob(dir, "q1")
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, pending)
|
||||
}
|
||||
|
||||
func TestUpdateSendJob_Retries(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
pushJob(t, dir, "q1", "/tmp/f", makeServers("http://s1", "http://s2"), 10)
|
||||
|
||||
job, id, err := PeekSendJob(dir, "q1")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, job)
|
||||
|
||||
job.Retries[0] = 2
|
||||
require.NoError(t, UpdateSendJob(dir, "q1", job))
|
||||
|
||||
got, err := GetSendJob(dir, "q1", id)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, got)
|
||||
assert.Equal(t, SendStatusPending, got.Status) // still pending
|
||||
assert.Equal(t, 2, got.Retries[0])
|
||||
assert.Equal(t, 0, got.Retries[1])
|
||||
}
|
||||
|
||||
func TestGetSendJob_NotFound(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
pushJob(t, dir, "q1", "/tmp/f", makeServers("http://s1"), 0)
|
||||
|
||||
got, err := GetSendJob(dir, "q1", 9999)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, got)
|
||||
}
|
||||
|
||||
func TestDeleteSendJob_KeepsDbWhenNotEmpty(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
pushJob(t, dir, "q1", "/a", makeServers("http://s1"), 0)
|
||||
pushJob(t, dir, "q1", "/b", makeServers("http://s1"), 0)
|
||||
|
||||
_, id, err := PeekSendJob(dir, "q1")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, DeleteSendJob(dir, "q1", id))
|
||||
|
||||
// DB file must still exist (second row remains)
|
||||
_, statErr := os.Stat(filepath.Join(dir, "queues", "q1"))
|
||||
require.NoError(t, statErr)
|
||||
}
|
||||
|
||||
func TestDeleteSendJob_RemovesDbWhenEmpty(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
pushJob(t, dir, "q1", "/a", makeServers("http://s1"), 0)
|
||||
|
||||
_, id, err := PeekSendJob(dir, "q1")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, DeleteSendJob(dir, "q1", id))
|
||||
|
||||
_, statErr := os.Stat(filepath.Join(dir, "queues", "q1"))
|
||||
assert.True(t, os.IsNotExist(statErr), "DB file should be removed when queue is empty")
|
||||
}
|
||||
7
doc/messaging/sq_msg02_bgpoll.puml
Normal file
7
doc/messaging/sq_msg02_bgpoll.puml
Normal file
@@ -0,0 +1,7 @@
|
||||
@startuml
|
||||
ClientFdThread -> Lib : write poll job list
|
||||
ClientFdThread -> ClientBgThread : notify job ?
|
||||
ClientBgThread -> Lib : poll for servers
|
||||
ClientBgThread -> ClientFdThread : notify message here
|
||||
ClientFdThread -> Lib : Read redeived message and update db
|
||||
@enduml
|
||||
7
doc/messaging/sq_msg02_bgsend.puml
Normal file
7
doc/messaging/sq_msg02_bgsend.puml
Normal file
@@ -0,0 +1,7 @@
|
||||
@startuml
|
||||
ClientFdThread -> Lib : write msg to db, encrypted msg for user to file, and job file
|
||||
ClientFdThread -> ClientBgThread : notify job
|
||||
ClientBgThread -> Lib : encrypt for server(s) and send including retries
|
||||
ClientBgThread -> Lib: notify send result
|
||||
ClientFdThread -> Lib : Read job report and update db
|
||||
@enduml
|
||||
508
doc/multi_device_sync_plan.md
Normal file
508
doc/multi_device_sync_plan.md
Normal file
@@ -0,0 +1,508 @@
|
||||
# Multi-Device Conversation Sync — Implementation Plan
|
||||
|
||||
## Context
|
||||
|
||||
meowlib already has scaffolding for multi-device sync:
|
||||
|
||||
| Existing artefact | Where |
|
||||
|---|---|
|
||||
| `Identity.Device *KeyPair` | `client/identity.go:35` |
|
||||
| `Identity.OwnedDevices PeerList` | `client/identity.go:40` |
|
||||
| `Peer.Type string` | `client/peer.go:52` |
|
||||
| `ToServerMessage.device_messages` (field 10) | `pb/messages.proto:75` |
|
||||
| `FromServerMessage.device_messages` (field 9) | `pb/messages.proto:99` |
|
||||
| `BackgroundJob.Device *KeyPair` | `client/identity.go:334` |
|
||||
|
||||
The server (`server/router.go`) does **not** yet implement `device_messages` routing; it goes through `messages`/`Chat` today.
|
||||
|
||||
---
|
||||
|
||||
## Chosen Sync Scheme: Event-Driven Delta Sync over Existing Message Infrastructure
|
||||
|
||||
### Rationale
|
||||
|
||||
| Approach | Pros | Cons | Verdict |
|
||||
|---|---|---|---|
|
||||
| Full DB sync | Complete history | Huge payloads, merge conflicts, wasteful | ❌ |
|
||||
| Inbox/outbox file sharing | Simple to reason about | File-level granularity, no dedup, breaks privacy model | ❌ |
|
||||
| **Event-driven delta sync** | Minimal data, no merge needed, reuses existing crypto + server stack | Requires dedup table | ✅ |
|
||||
|
||||
Each message event (received, sent, status change) is forwarded immediately to sibling devices through the **same server infrastructure** as regular peer messages. Each device maintains its own complete local DB. Convergence is eventual; dedup via `ConversationStatus.Uuid`.
|
||||
|
||||
### Key Design Decisions
|
||||
|
||||
1. **Zero server changes required.** Device sync messages are addressed to the sibling device's lookup key and travel through the existing `msg:{lookup_key}` Redis sorted-set on the server, returned in `from_server.Chat` — identical to peer messages.
|
||||
|
||||
2. **Device peers reuse the `Peer` struct** with `Type = "device"`, stored in `Identity.OwnedDevices`. They have their own three keypairs (`MyIdentity`, `MyEncryptionKp`, `MyLookupKp`) and `MyPullServers`.
|
||||
|
||||
3. **A new proto message `DeviceSyncPayload`** is added to `messages.proto`. It is serialised and placed in `UserMessage.Appdata`; the parent `UserMessage.Type` is set to `"device_sync"`. This lets the client recognise sync messages without any server-side awareness.
|
||||
|
||||
4. **`GetRequestJobs()`** is extended to include device lookup keys alongside peer lookup keys for the appropriate servers, so the background poll thread picks up device sync messages without any extra call.
|
||||
|
||||
5. **Dedup** is handled by a small SQLite table `device_sync_seen` (one table per identity folder, not per peer) keyed on `DeviceSyncPayload.DedupId`.
|
||||
|
||||
---
|
||||
|
||||
## New Protobuf Messages
|
||||
|
||||
Add to `pb/messages.proto` before re-generating:
|
||||
|
||||
```protobuf
|
||||
// Payload carried inside UserMessage.appdata for device-to-device sync.
|
||||
// The enclosing UserMessage.type MUST be "device_sync".
|
||||
message DeviceSyncPayload {
|
||||
string sync_type = 1; // "msg" | "status" | "peer_update" | "identity_update" | "server_add" | "forward"
|
||||
string peer_uid = 2; // local UID of the peer conversation on the sending device
|
||||
DbMessage db_message = 3; // the DbMessage to replicate (sync_type "msg" / "status")
|
||||
string dedup_id = 4; // globally unique ID (= DbMessage.status.uuid or generated)
|
||||
bytes peer_data = 5; // JSON-encoded Peer snapshot (sync_type "peer_update")
|
||||
bytes identity_data = 6; // JSON-encoded identity profile snapshot (sync_type "identity_update")
|
||||
bytes forward_payload = 7; // serialized UserMessage for primary to send on behalf of sibling (sync_type "forward")
|
||||
string forward_peer_uid = 8; // primary-side peer UID to forward to (sync_type "forward")
|
||||
}
|
||||
```
|
||||
|
||||
Run `cd pb && ./protogen.sh` after adding this.
|
||||
|
||||
---
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 1 — Device Pairing
|
||||
|
||||
**Files to touch:** `client/identity.go`, `client/helpers/` (new file `deviceHelper.go`)
|
||||
|
||||
**Goal:** Allow two app instances owned by the same user to establish a shared keypair relationship, mirroring the peer invitation flow but flagging the peer as `Type = "device"`.
|
||||
|
||||
#### 1.1 `Identity.InitDevicePairing(myDeviceName string, serverUids []string) (*Peer, error)`
|
||||
- Identical to `InvitePeer` but sets `peer.Type = "device"`.
|
||||
- Stores the resulting peer in `Identity.OwnedDevices` (not `Peers`).
|
||||
- Returns the peer so the caller can produce a `ContactCard` QR/file.
|
||||
- **Sym + DR inherited automatically**: because the implementation mirrors `InvitePeer`, the device peer will have `MySymKey`, `DrKpPublic`, `DrKpPrivate`, `DrRootKey`, and `DrInitiator = true` populated automatically. The resulting `ContactCard` will carry `dr_root_key` and `dr_public_key` so the answering device can initialise its own DR session via `AnswerDevicePairing`.
|
||||
|
||||
#### 1.2 `Identity.AnswerDevicePairing(myDeviceName string, receivedContact *meowlib.ContactCard) (*Peer, error)`
|
||||
- Mirrors `AnswerInvitation`, stores in `OwnedDevices`.
|
||||
|
||||
#### 1.3 `Identity.FinalizeDevicePairing(receivedContact *meowlib.ContactCard) error`
|
||||
- Mirrors `FinalizeInvitation`, operates on `OwnedDevices`.
|
||||
|
||||
#### 1.4 Helper functions (new file `client/helpers/deviceHelper.go`)
|
||||
```go
|
||||
// DevicePairingCreateMessage – wraps an invitation step-1 for a device peer.
|
||||
func DevicePairingCreateMessage(peer *client.Peer, serverUid string) ([]byte, string, error)
|
||||
|
||||
// DevicePairingAnswerMessage – wraps invitation step-3 answer for a device peer.
|
||||
func DevicePairingAnswerMessage(peer *client.Peer, serverUid string) ([]byte, string, error)
|
||||
```
|
||||
These reuse `invitationCreateHelper.go`/`invitationAnswerHelper.go` logic.
|
||||
|
||||
#### 1.5 Extend `PeerStorage` operations for OwnedDevices
|
||||
`OwnedDevices` is currently a `PeerList` (in-memory slice). This **must** be migrated to the same Badger-backed `PeerStorage` mechanism as `Peers` — it is no longer optional. Device peers carry a Double Ratchet session state (`DrStateJson`) that advances with every message sent or received. Without persistent storage the DR state is lost on restart, breaking the decryption of all subsequent messages. Add a `DeviceStorage PeerStorage` field to `Identity` with its own `DbFile`, and ensure `StorePeer` is called on the device peer after every outbound dispatch (in `DispatchSyncToDevices`) and after every inbound consume (in `ConsumeDeviceSyncMessage`), mirroring the pattern used in `messageHelper.go` and `bgPollHelper.go` for regular peers.
|
||||
|
||||
---
|
||||
|
||||
### Phase 2 — Sync Payload Helpers
|
||||
|
||||
**Files to touch:** `client/helpers/deviceHelper.go` (continued), `client/dbmessage.go`
|
||||
|
||||
#### 2.1 Build a sync message for one sibling device
|
||||
|
||||
```go
|
||||
// BuildDeviceSyncMessage wraps a DbMessage into a UserMessage addressed to a
|
||||
// sibling device peer. The caller then calls peer.ProcessOutboundUserMessage.
|
||||
func BuildDeviceSyncMessage(
|
||||
devicePeer *client.Peer,
|
||||
syncType string, // "msg" | "status" | "peer_event"
|
||||
peerUid string,
|
||||
dbm *meowlib.DbMessage,
|
||||
dedupId string,
|
||||
) (*meowlib.UserMessage, error)
|
||||
```
|
||||
|
||||
Implementation:
|
||||
1. Serialise `DeviceSyncPayload{SyncType, PeerUid, DbMessage, DedupId}` with `proto.Marshal`.
|
||||
2. Create a `UserMessage` with `Type = "device_sync"`, `Destination = devicePeer.ContactLookupKey`, `Appdata = serialisedPayload`.
|
||||
3. Set `Status.Uuid = dedupId`.
|
||||
|
||||
#### 2.2 Dispatch sync to all sibling devices
|
||||
|
||||
```go
|
||||
// DispatchSyncToDevices sends a DeviceSyncPayload to every device peer whose
|
||||
// pull server list overlaps with the available servers.
|
||||
// It enqueues a SendJob per device, reusing the existing bgSendHelper queue.
|
||||
func DispatchSyncToDevices(
|
||||
storagePath string,
|
||||
syncType string,
|
||||
peerUid string,
|
||||
dbm *meowlib.DbMessage,
|
||||
dedupId string,
|
||||
) error
|
||||
```
|
||||
|
||||
Iterates `identity.OwnedDevices`, builds and queues one `SendJob` per device (just like `CreateUserMessageAndSendJob` but using device peer keys and putting the message in `outbox/` with a recognisable prefix, e.g. `dev_{devPeerUid}_{dedupId}`).
|
||||
|
||||
After calling `peer.ProcessOutboundUserMessage` for each device peer, persist the updated DR state: `identity.DeviceStorage.StorePeer(devPeer)` if `devPeer.DrRootKey != ""`.
|
||||
|
||||
The message is packed into `ToServerMessage.Messages` (same field as regular chat). No server changes needed.
|
||||
|
||||
---
|
||||
|
||||
### Phase 3 — Integrate Dispatch into Send/Receive Paths
|
||||
|
||||
**Files to touch:** `client/helpers/messageHelper.go`, `client/helpers/bgPollHelper.go`
|
||||
|
||||
#### 3.1 After outbound message stored (`CreateAndStoreUserMessage`)
|
||||
|
||||
At the end of `CreateAndStoreUserMessage` (after `peer.StoreMessage`), add:
|
||||
|
||||
```go
|
||||
// Async: do not block the caller
|
||||
go DispatchSyncToDevices(storagePath, "msg", peerUid, dbm, usermessage.Status.Uuid)
|
||||
```
|
||||
|
||||
The `dbm` is obtained from `UserMessageToDbMessage(true, usermessage, nil)` (files are excluded from sync — they stay on the originating device or are re-requested).
|
||||
|
||||
#### 3.2 After inbound message stored (`ConsumeInboxFile`)
|
||||
|
||||
After `peer.StoreMessage(usermsg, filenames)` succeeds:
|
||||
|
||||
```go
|
||||
dbm := client.UserMessageToDbMessage(false, usermsg, nil)
|
||||
go DispatchSyncToDevices(storagePath, "msg", peer.Uid, dbm, usermsg.Status.Uuid)
|
||||
```
|
||||
|
||||
#### 3.3 After ACK status update (`ReadAckMessageResponse` — currently a stub)
|
||||
|
||||
When status timestamps (received/processed) are updated in the DB, dispatch a `"status"` sync with the updated `DbMessage`.
|
||||
|
||||
---
|
||||
|
||||
### Phase 4 — Receive & Consume Device Sync Messages
|
||||
|
||||
**Files to touch:** `client/helpers/bgPollHelper.go`, new `client/helpers/deviceSyncHelper.go`
|
||||
|
||||
#### 4.1 Extend `GetRequestJobs()` to include device lookup keys
|
||||
|
||||
In `identity.go:GetRequestJobs()`, after the loop over `Peers`, add a similar loop over `OwnedDevices`:
|
||||
|
||||
```go
|
||||
for _, devPeer := range id.OwnedDevices {
|
||||
for _, server := range devPeer.MyPullServers {
|
||||
if job, ok := srvs[server]; ok {
|
||||
job.LookupKeys = append(job.LookupKeys, devPeer.MyLookupKp)
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Device messages will now arrive inside `from_server.Chat` alongside regular peer messages. The next step distinguishes them.
|
||||
|
||||
#### 4.2 Distinguish device vs peer messages in `ConsumeInboxFile`
|
||||
|
||||
After `identity.Peers.GetFromMyLookupKey(packedUserMessage.Destination)` returns `nil`, try:
|
||||
|
||||
```go
|
||||
devPeer := identity.OwnedDevices.GetFromMyLookupKey(packedUserMessage.Destination)
|
||||
if devPeer != nil {
|
||||
err := ConsumeDeviceSyncMessage(devPeer, packedUserMessage)
|
||||
// continue to next message
|
||||
continue
|
||||
}
|
||||
// original error path
|
||||
```
|
||||
|
||||
#### 4.3 `ConsumeDeviceSyncMessage` (new file `client/helpers/deviceSyncHelper.go`)
|
||||
|
||||
```go
|
||||
func ConsumeDeviceSyncMessage(
|
||||
devPeer *client.Peer,
|
||||
packed *meowlib.PackedUserMessage,
|
||||
) error
|
||||
```
|
||||
|
||||
Steps:
|
||||
1. Decrypt with `devPeer.ProcessInboundUserMessage(packed)` (takes the full `*PackedUserMessage` — **not** `payload, signature` separately; that API was updated when the sym-encryption + double-ratchet layer was added).
|
||||
2. Check `usermsg.Type == "device_sync"`.
|
||||
3. Deserialise `DeviceSyncPayload` from `usermsg.Appdata`.
|
||||
4. Dedup check: call `IsDeviceSyncSeen(payload.DedupId)`. If yes, skip.
|
||||
5. Mark seen: `MarkDeviceSyncSeen(payload.DedupId)`.
|
||||
6. **Persist DR state** — after decryption, if `devPeer.DrRootKey != ""`, call `identity.OwnedDevices.StorePeer(devPeer)` (or the equivalent Badger-backed store) to persist the updated `DrStateJson`. This mirrors what `ConsumeInboxFile` does for regular peers.
|
||||
7. Dispatch by `payload.SyncType`:
|
||||
- `"msg"`: find the local peer by `payload.PeerUid`, call `client.StoreDeviceSyncedMessage(peer, payload.DbMessage)`.
|
||||
- `"status"`: update the status fields in the existing DB row matched by `payload.DbMessage.Status.Uuid`.
|
||||
- `"peer_update"`: apply `payload.PeerData` to the local peer record (see Phase 6).
|
||||
- `"identity_update"`: apply `payload.IdentityData` to the local identity profile (see Phase 6).
|
||||
|
||||
#### 4.4 `StoreDeviceSyncedMessage` in `client/messagestorage.go`
|
||||
|
||||
A thin wrapper around `storeMessage` that:
|
||||
- Marks the message as synced (a new bool field `Synced` in `DbMessage`, or use a naming convention in `DbMessage.Appdata`).
|
||||
- Does **not** trigger a second round of sync dispatch (no re-broadcast).
|
||||
- Handles absent file paths gracefully (files are not synced, only metadata).
|
||||
|
||||
---
|
||||
|
||||
### Phase 6 — Peer Metadata and Identity Profile Sync
|
||||
|
||||
**Files to touch:** `client/helpers/deviceHelper.go`, `client/helpers/deviceSyncHelper.go`, `client/identity.go`
|
||||
|
||||
The goal is to propagate non-message data across sibling devices: peer names/avatars/settings and the identity profile. This is **one-directional fan-out** (whichever device makes the change dispatches to all siblings) — no merge protocol is needed because conflicts are resolved by last-write-wins (the dedupId carries a timestamp or UUID sufficient for dedup; ordering is not guaranteed but is acceptable for profile data).
|
||||
|
||||
#### 6.1 Peer metadata sync (`sync_type = "peer_update"`)
|
||||
|
||||
Dispatch a `"peer_update"` payload whenever a peer record is meaningfully mutated (name, avatar, notification settings, visibility, blocked state, etc.).
|
||||
|
||||
**Payload**: `DeviceSyncPayload.PeerData` is a JSON-encoded **full `Peer` struct**, including all private key material and DR state. This is safe because:
|
||||
- The device sync channel is E2E-encrypted with the same X25519 + sym + DR stack as peer messages.
|
||||
- The target server is user-owned; the operator is the user themselves.
|
||||
- The recipient is the same person on a different device.
|
||||
|
||||
Fields included in `PeerData`:
|
||||
- All keypairs in full: `MyIdentity`, `MyEncryptionKp`, `MyLookupKp` (private + public)
|
||||
- `MySymKey` — shared symmetric key for that peer's channel
|
||||
- `DrKpPrivate`, `DrKpPublic`, `DrRootKey`, `DrInitiator`, `ContactDrPublicKey`
|
||||
- **`DrStateJson`** — current live DR session state (see DR note below)
|
||||
- All contact keys: `ContactPublicKey`, `ContactEncryption`, `ContactLookupKey`, `ContactPullServers`
|
||||
- All metadata: `Name`, `Avatar`, `Avatars`, `MyName`, `Visible`, `Blocked`, `MessageNotification`, `SendDeliveryAck`, `SendProcessingAck`, `CallsAllowed`, server lists, etc.
|
||||
|
||||
Fields excluded from `PeerData`:
|
||||
- `dbPassword` — transient in-memory field, never serialised; the receiving device uses its own memory password.
|
||||
|
||||
The receiving device upserts the peer into its local `Peers` store. After applying the sync, the sibling device is a full participant in the conversation: it can send and receive messages using the replicated keypairs, has the same DR session state, and monitors the same lookup key queues.
|
||||
|
||||
**DR state sync (Phase 6 only)**: Syncing `DrStateJson` as part of `"peer_update"` gives sibling devices a working DR session at the point of pairing and keeps them in sync during normal single-active-device use. Phase 7 supersedes this with independent per-device DR sessions, eliminating all shared-state concerns. If Phase 7 is implemented, the `DrStateJson` field in `PeerData` can be omitted from the sync payload (each device initialises its own fresh session via the device introduction flow).
|
||||
|
||||
**New peers**: When Device A completes an invitation with a new contact, it dispatches `"peer_update"` to all siblings with the full peer record. Device B immediately becomes a full participant — same keypairs, same lookup key, same DR session start state — and can transparently send and receive messages with that contact without any secondary invitation.
|
||||
|
||||
#### 6.2 Identity profile sync (`sync_type = "identity_update"`)
|
||||
|
||||
Dispatch an `"identity_update"` whenever `Identity.Nickname`, `Identity.DefaultAvatar`, `Identity.Avatars`, or `Identity.Status` changes.
|
||||
|
||||
**Payload**: `DeviceSyncPayload.IdentityData` is a JSON-encoded subset of `Identity`:
|
||||
```go
|
||||
type IdentityProfileSnapshot struct {
|
||||
Nickname string `json:"nickname"`
|
||||
DefaultAvatar string `json:"default_avatar"`
|
||||
Avatars []Avatar `json:"avatars"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
```
|
||||
|
||||
The receiving device deserialises this and updates only the listed fields on its local `Identity`, then calls `identity.Save()`.
|
||||
|
||||
**Explicitly NOT synced** in `IdentityData`:
|
||||
- `RootKp` — the user's root signing keypair is the trust anchor; it should be established once per identity creation and never transmitted, even over a secure channel. Compromise of the root key invalidates the entire identity.
|
||||
- `Device` — device-specific keypair for server auth; each device has its own.
|
||||
- `OwnedDevices` — the device mesh itself; managed separately by the pairing flow.
|
||||
- `HiddenPeers` — sensitive by design; out of scope.
|
||||
- `DefaultDbPassword`, `DbPasswordStore` — local security preferences.
|
||||
- `MessageServers` / `Peers` — covered by their own sync types (`"server_add"`, `"peer_update"`).
|
||||
|
||||
#### 6.3 Server list sync (future — `sync_type = "server_add"`)
|
||||
|
||||
When a new `MessageServer` is added to one device's `MessageServers`, dispatch `"server_add"` so all siblings discover it. Implementation deferred; placeholder `sync_type` reserved.
|
||||
|
||||
#### 6.4 Dispatch hooks
|
||||
|
||||
- After `Identity.InvitePeer` / `FinalizeInvitation` / any peer metadata update: call `DispatchSyncToDevices(..., "peer_update", peer.Uid, nil, uuid.New().String())`.
|
||||
- After `Identity.Save()` when profile fields changed: call `DispatchSyncToDevices(..., "identity_update", "", nil, uuid.New().String())`.
|
||||
|
||||
---
|
||||
|
||||
### Phase 7 — Per-Device DR Sessions (Bullet-proof Forward Secrecy)
|
||||
|
||||
**Goal**: Eliminate the concurrent-send DR race without shared ratchet state and without leaking device count to contacts.
|
||||
|
||||
#### 7.0 Privacy constraint
|
||||
|
||||
The naive per-device DR approach (introduce all devices to all contacts) has a fundamental privacy problem: every contact learns how many devices you own and receives session material for each. This leaks metadata — device count, device rotation events, possibly device fingerprints. This is unacceptable for a privacy-first library.
|
||||
|
||||
Two architecturally sound options are described below. **Option B (primary device relay) is recommended** because it preserves complete contact-side opacity and requires no protocol extension on the contact side.
|
||||
|
||||
---
|
||||
|
||||
#### Option A — Contact-aware per-device sessions (not recommended)
|
||||
|
||||
Each device is introduced to all contacts via a `"device_introduce"` message. The contact maintains one independent DR session per device and sends a separate encrypted copy per device on every message.
|
||||
|
||||
| Property | Value |
|
||||
|---|---|
|
||||
| DR race | ❌ Eliminated |
|
||||
| Contact privacy | ❌ Contacts learn device count and session keys |
|
||||
| Contact protocol change | ✅ Required (handle `DeviceInfo` list, multi-destination send) |
|
||||
| Backward compatibility | ❌ Old clients can't participate |
|
||||
| Server changes | ✅ None |
|
||||
|
||||
This is Signal's model. It is appropriate when contacts are expected to be aware of device multiplicity (e.g. a closed ecosystem). It is **not** appropriate for meowlib's open, privacy-first design.
|
||||
|
||||
---
|
||||
|
||||
#### Option B — Primary device relay (recommended)
|
||||
|
||||
The device that owns the peer relationship (the one whose keypairs are in the `Peer` record — call it the **primary**) is the only device that ever communicates directly with a contact. Its DR session with the contact is singular, unshared, and advances normally.
|
||||
|
||||
Sibling devices that want to send a message do so by dispatching a `"forward"` device sync payload to the primary. The primary re-encrypts with the contact's keys and forwards. From the contact's perspective: one sender, one DR session, zero device awareness.
|
||||
|
||||
| Property | Value |
|
||||
|---|---|
|
||||
| DR race | ❌ Eliminated (only primary drives the DR session) |
|
||||
| Contact privacy | ✅ Contact is completely unaware of sibling devices |
|
||||
| Contact protocol change | ✅ None required |
|
||||
| Backward compatibility | ✅ Full |
|
||||
| Server changes | ✅ None |
|
||||
| Trade-off | If primary is offline, sibling outbound messages queue until it returns |
|
||||
|
||||
##### 7.1 Primary device designation
|
||||
|
||||
The device that completes the invitation flow for a peer (calls `InvitePeer` or `FinalizeInvitation`) is the primary for that peer. The `Peer` record synced to sibling devices carries a `PrimaryDeviceUid string` field (the UID of the device peer that "owns" this peer relationship):
|
||||
|
||||
```go
|
||||
// Add to Peer struct:
|
||||
PrimaryDeviceUid string `json:"primary_device_uid,omitempty"`
|
||||
// empty = this device IS the primary for this peer
|
||||
```
|
||||
|
||||
When a sibling device receives a `"peer_update"` sync, it sets `PrimaryDeviceUid` to the sender's device UID. When the primary device sends a peer update, it leaves `PrimaryDeviceUid` empty (it is the primary).
|
||||
|
||||
##### 7.2 New sync type: `"forward"`
|
||||
|
||||
Add to `DeviceSyncPayload.sync_type`:
|
||||
|
||||
```
|
||||
"forward" — sibling device requests primary to send a message to a peer on its behalf
|
||||
```
|
||||
|
||||
New fields needed in `DeviceSyncPayload`:
|
||||
|
||||
```protobuf
|
||||
bytes forward_payload = 7; // serialized UserMessage (plaintext, will be encrypted by primary)
|
||||
string forward_peer_uid = 8; // local peer UID on the primary device to forward to
|
||||
```
|
||||
|
||||
##### 7.3 Send path on a sibling device
|
||||
|
||||
When a sibling device (one where `peer.PrimaryDeviceUid != ""`) sends a message to peer P:
|
||||
|
||||
1. Build the `UserMessage` normally.
|
||||
2. **Do not** call `peer.ProcessOutboundUserMessage` — the sibling does not have a valid DR state for the contact.
|
||||
3. Serialize the `UserMessage` (plaintext proto bytes).
|
||||
4. Build a `DeviceSyncPayload{SyncType: "forward", ForwardPayload: serialized, ForwardPeerUid: peer.Uid}`.
|
||||
5. Dispatch it to the primary device via the normal device sync send path.
|
||||
6. Store the message locally with a `"pending_forward"` status so the UI reflects it immediately.
|
||||
|
||||
##### 7.4 Receive and forward path on the primary device
|
||||
|
||||
When `ConsumeDeviceSyncMessage` on the primary sees `sync_type == "forward"`:
|
||||
|
||||
1. Deserialize `ForwardPayload` into a `UserMessage`.
|
||||
2. Locate the local peer by `ForwardPeerUid`.
|
||||
3. Call `peer.ProcessOutboundUserMessage(userMessage)` — primary uses its DR session normally.
|
||||
4. Enqueue a `SendJob` to deliver to the contact's server (same path as any outbound message).
|
||||
5. Dispatch a `"msg"` sync back to all siblings with the now-stored `DbMessage` so they update the message status from `"pending_forward"` to sent.
|
||||
|
||||
##### 7.5 Offline queuing
|
||||
|
||||
If the primary device is offline when the sibling dispatches a `"forward"` sync, the sync message sits in the device sync queue on the server (same Redis sorted-set as all device messages). When the primary comes back online and polls, it picks up the forwarded message and delivers it. No message is lost; latency equals the primary's offline window.
|
||||
|
||||
##### 7.6 Result
|
||||
|
||||
- Zero contact protocol changes. Contacts cannot distinguish a primary-only device from a multi-device user.
|
||||
- No device count leakage. Device topology is fully opaque to the outside world.
|
||||
- No DR race. The primary drives a single ratchet per contact.
|
||||
- No server changes.
|
||||
- `ProcessOutboundUserMessage` signature stays `(*PackedUserMessage, error)` — no ripple through callers.
|
||||
- Trade-off is well-bounded: forward latency ≤ primary polling interval, which is already the existing long-poll timeout.
|
||||
|
||||
---
|
||||
|
||||
### Phase 5 — Dedup Store
|
||||
|
||||
**Files to touch:** new `client/devicesyncdedup.go`
|
||||
|
||||
A single SQLite DB per identity folder: `{StoragePath}/{IdentityUuid}/devicesync.db`.
|
||||
|
||||
Schema:
|
||||
```sql
|
||||
CREATE TABLE IF NOT EXISTS seen (
|
||||
id TEXT NOT NULL PRIMARY KEY,
|
||||
seen_at INTEGER NOT NULL
|
||||
);
|
||||
```
|
||||
|
||||
Functions:
|
||||
```go
|
||||
func IsDeviceSyncSeen(storagePath, identityUuid, dedupId string) (bool, error)
|
||||
func MarkDeviceSyncSeen(storagePath, identityUuid, dedupId string) error
|
||||
func PruneDeviceSyncSeen(storagePath, identityUuid string, olderThan time.Duration) error
|
||||
```
|
||||
|
||||
`PruneDeviceSyncSeen` is called periodically (e.g. weekly) from the background thread to remove entries older than 30 days.
|
||||
|
||||
---
|
||||
|
||||
## File Change Summary
|
||||
|
||||
| File | Change |
|
||||
|---|---|
|
||||
| `pb/messages.proto` | Add `DeviceSyncPayload` message (with `peer_data` and `identity_data` fields) |
|
||||
| `pb/protogen.sh` → re-run | Regenerate `.pb.go` |
|
||||
| `client/identity.go` | Add `InitDevicePairing`, `AnswerDevicePairing`, `FinalizeDevicePairing`; add `DeviceStorage PeerStorage` field; extend `GetRequestJobs()`; add profile-change dispatch hooks |
|
||||
| `client/peer.go` | No changes needed (Type field already exists) |
|
||||
| `client/messagestorage.go` | Add `StoreDeviceSyncedMessage` |
|
||||
| `client/devicesyncdedup.go` | **New** — dedup SQLite helpers |
|
||||
| `client/helpers/deviceHelper.go` | **New** — `BuildDeviceSyncMessage`, `DispatchSyncToDevices` (msg + peer_update + identity_update), pairing message helpers |
|
||||
| `client/helpers/deviceSyncHelper.go` | **New** — `ConsumeDeviceSyncMessage` (handles all sync types) |
|
||||
| `client/helpers/messageHelper.go` | Add `DispatchSyncToDevices` call after outbound store; detect primary vs sibling role on send |
|
||||
| `client/helpers/bgPollHelper.go` | Add device message detection in `ConsumeInboxFile` |
|
||||
| `client/peer.go` | Add `PrimaryDeviceUid string` field; sibling send path dispatches `"forward"` instead of direct send |
|
||||
| `client/helpers/deviceSyncHelper.go` | Handle `"forward"` sync type: deserialize, re-encrypt, enqueue SendJob, dispatch `"msg"` sync back |
|
||||
|
||||
Server package: **no changes required**.
|
||||
|
||||
---
|
||||
|
||||
## Sync Scope
|
||||
|
||||
| Data | Synced | Notes |
|
||||
|---|---|---|
|
||||
| Message text / data | ✅ | In `DbMessage.Data` |
|
||||
| Outbound flag | ✅ | In `DbMessage.Outbound` |
|
||||
| Message UUID | ✅ | Via `ConversationStatus.Uuid` |
|
||||
| Sent/received timestamps | ✅ | In `ConversationStatus` |
|
||||
| File content | ❌ | Not synced; only `FilePaths` metadata synced |
|
||||
| Peer full keypairs (private + public) | ✅ | Phase 6 — included in `"peer_update"` `PeerData`; channel is E2E-encrypted on user-owned server |
|
||||
| Peer symmetric key | ✅ | Phase 6 — included in `"peer_update"` `PeerData` |
|
||||
| Peer DR session state (`DrStateJson`) | ✅ | Phase 6 — synced on peer_update; Phase 7 (Option B) eliminates the need: primary drives one DR session, siblings never touch it |
|
||||
| Peer metadata (name, avatar, settings) | ✅ | Phase 6 — `"peer_update"` sync type |
|
||||
| New peer (unknown to sibling) | ✅ | Full peer record synced; sibling becomes immediate full participant |
|
||||
| Identity profile (nickname, avatar, status) | ✅ | Phase 6 — `"identity_update"` sync type |
|
||||
| Identity root keypair (`RootKp`) | ❌ | Trust anchor; never transmitted even over secure channel |
|
||||
| Known/message server list | ⚠️ | Future — `"server_add"` placeholder reserved |
|
||||
| Hidden peers | ❌ | Hidden by design; out of scope |
|
||||
| Device keypair | ❌ | Per-device; each device authenticates to servers with its own key |
|
||||
|
||||
---
|
||||
|
||||
## Privacy Properties
|
||||
|
||||
- Device sync messages are end-to-end encrypted (same X25519 + sym + DR stack as peer messages).
|
||||
- The server sees only the device lookup key as destination; it cannot distinguish sync messages from peer messages.
|
||||
- Including device lookup keys in batch pull requests does not leak which device belongs to you (same privacy model as multiple peer lookup keys per request).
|
||||
- `OwnedDevices` peers should be treated as "hidden" (not shown in contact lists) and stored in the device storage, separate from regular peers.
|
||||
- **Contacts are never made aware of device count or device identity** (Phase 7 Option B). The primary device relay model means the outside world observes exactly one sender per user identity, regardless of how many devices are active.
|
||||
- The device mesh topology (which devices exist, how many) is known only to the user's own devices, and is carried exclusively over the E2E-encrypted device sync channel on the user-owned server.
|
||||
|
||||
---
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
1. **Unit tests** for `DeviceSyncPayload` serialisation round-trip.
|
||||
2. **Unit tests** for dedup store (seen/mark/prune lifecycle).
|
||||
3. **Integration test** extending `TestEndToEnd`:
|
||||
- Create identity, two device peers (DeviceA, DeviceB).
|
||||
- Send a message on DeviceA.
|
||||
- Verify DeviceB's DB contains the synced message after `ConsumeDeviceSyncMessage`.
|
||||
- Resend the same dedup_id — verify no duplicate row created.
|
||||
4. **Integration test** for inbound sync:
|
||||
- DeviceA receives a peer message.
|
||||
- Verify DeviceB gets the sync and stores it correctly.
|
||||
1
go.mod
1
go.mod
@@ -40,6 +40,7 @@ require (
|
||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||
github.com/onsi/gomega v1.30.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/status-im/doubleratchet v3.0.0+incompatible // indirect
|
||||
github.com/twitchtv/twirp v8.1.3+incompatible // indirect
|
||||
github.com/yuin/gopher-lua v1.1.1 // indirect
|
||||
golang.org/x/crypto v0.41.0 // indirect
|
||||
|
||||
2
go.sum
2
go.sum
@@ -213,6 +213,8 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/status-im/doubleratchet v3.0.0+incompatible h1:aJ1ejcSERpSzmWZBgtfYtiU2nF0Q8ZkGyuEPYETXkCY=
|
||||
github.com/status-im/doubleratchet v3.0.0+incompatible/go.mod h1:1sqR0+yhiM/bd+wrdX79AOt2csZuJOni0nUDzKNuqOU=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
|
||||
363
messages.pb.go
363
messages.pb.go
@@ -668,10 +668,10 @@ func (x *FromServerMessage) GetContactCard() []*ContactCard {
|
||||
|
||||
type MatriochkaServer struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` // Server Url
|
||||
PublicKey string `protobuf:"bytes,2,opt,name=publicKey,proto3" json:"publicKey,omitempty"` // Server Public Key
|
||||
Uuid string `protobuf:"bytes,3,opt,name=uuid,proto3" json:"uuid,omitempty"` // Optional, uuid for delivery confirmation
|
||||
Delay int32 `protobuf:"varint,4,opt,name=delay,proto3" json:"delay,omitempty"` // Max delay requested for message forwarding or delivery tracking
|
||||
Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` // Server Url
|
||||
PublicKey string `protobuf:"bytes,2,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` // Server Public Key
|
||||
Uuid string `protobuf:"bytes,3,opt,name=uuid,proto3" json:"uuid,omitempty"` // Optional, uuid for delivery confirmation
|
||||
Delay int32 `protobuf:"varint,4,opt,name=delay,proto3" json:"delay,omitempty"` // Max delay requested for message forwarding or delivery tracking
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -736,10 +736,10 @@ func (x *MatriochkaServer) GetDelay() int32 {
|
||||
|
||||
type Matriochka struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
LookupKey string `protobuf:"bytes,1,opt,name=lookupKey,proto3" json:"lookupKey,omitempty"` // Optional, only if you want delivery tracking, less stealth
|
||||
Prev *MatriochkaServer `protobuf:"bytes,2,opt,name=prev,proto3" json:"prev,omitempty"` // Optional, like above
|
||||
Next *MatriochkaServer `protobuf:"bytes,3,opt,name=next,proto3" json:"next,omitempty"` // Next server to deliver the message to
|
||||
Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` // Matriochka data
|
||||
LookupKey string `protobuf:"bytes,1,opt,name=lookup_key,json=lookupKey,proto3" json:"lookup_key,omitempty"` // Optional, only if you want delivery tracking, less stealth
|
||||
Prev *MatriochkaServer `protobuf:"bytes,2,opt,name=prev,proto3" json:"prev,omitempty"` // Optional, like above
|
||||
Next *MatriochkaServer `protobuf:"bytes,3,opt,name=next,proto3" json:"next,omitempty"` // Next server to deliver the message to
|
||||
Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` // Matriochka data
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -902,10 +902,13 @@ type ContactCard struct {
|
||||
ContactPublicKey string `protobuf:"bytes,2,opt,name=contact_public_key,json=contactPublicKey,proto3" json:"contact_public_key,omitempty"` // contact public key, will be used to authenticate her/his messages
|
||||
EncryptionPublicKey string `protobuf:"bytes,3,opt,name=encryption_public_key,json=encryptionPublicKey,proto3" json:"encryption_public_key,omitempty"` // public key you must use to to write encrypted messages to that contact
|
||||
LookupPublicKey string `protobuf:"bytes,4,opt,name=lookup_public_key,json=lookupPublicKey,proto3" json:"lookup_public_key,omitempty"` // public key you will use as "destination identifier" for her/him to lookup for your messages on the servers
|
||||
PullServers []*ServerCard `protobuf:"bytes,5,rep,name=pull_servers,json=pullServers,proto3" json:"pull_servers,omitempty"` // list the servers where the contact will look for messages from you
|
||||
Version uint32 `protobuf:"varint,6,opt,name=version,proto3" json:"version,omitempty"`
|
||||
InvitationId string `protobuf:"bytes,7,opt,name=invitation_id,json=invitationId,proto3" json:"invitation_id,omitempty"`
|
||||
InvitationMessage string `protobuf:"bytes,8,opt,name=invitation_message,json=invitationMessage,proto3" json:"invitation_message,omitempty"`
|
||||
SymetricKey string `protobuf:"bytes,5,opt,name=symetric_key,json=symetricKey,proto3" json:"symetric_key,omitempty"` // agreed key for payload symetric encryption
|
||||
PullServers []*ServerCard `protobuf:"bytes,6,rep,name=pull_servers,json=pullServers,proto3" json:"pull_servers,omitempty"` // list the servers where the contact will look for messages from you
|
||||
Version uint32 `protobuf:"varint,7,opt,name=version,proto3" json:"version,omitempty"`
|
||||
InvitationId string `protobuf:"bytes,8,opt,name=invitation_id,json=invitationId,proto3" json:"invitation_id,omitempty"`
|
||||
InvitationMessage string `protobuf:"bytes,9,opt,name=invitation_message,json=invitationMessage,proto3" json:"invitation_message,omitempty"`
|
||||
DrRootKey string `protobuf:"bytes,10,opt,name=dr_root_key,json=drRootKey,proto3" json:"dr_root_key,omitempty"` // DR pre-shared root key (base64, 32 bytes)
|
||||
DrPublicKey string `protobuf:"bytes,11,opt,name=dr_public_key,json=drPublicKey,proto3" json:"dr_public_key,omitempty"` // DR DH public key of the initiator (base64)
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -968,6 +971,13 @@ func (x *ContactCard) GetLookupPublicKey() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ContactCard) GetSymetricKey() string {
|
||||
if x != nil {
|
||||
return x.SymetricKey
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ContactCard) GetPullServers() []*ServerCard {
|
||||
if x != nil {
|
||||
return x.PullServers
|
||||
@@ -996,14 +1006,29 @@ func (x *ContactCard) GetInvitationMessage() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ContactCard) GetDrRootKey() string {
|
||||
if x != nil {
|
||||
return x.DrRootKey
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ContactCard) GetDrPublicKey() string {
|
||||
if x != nil {
|
||||
return x.DrPublicKey
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// structure for sending a message to be forwarded to another user in protobuf format
|
||||
type PackedUserMessage struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Destination string `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` // the peer's current conversation lookup public key
|
||||
Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` // the message UserMessage encrypted with the destination peer's public key
|
||||
Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"` // the payload signature with the client identity private key
|
||||
ServerTimestamp []int64 `protobuf:"varint,4,rep,packed,name=serverTimestamp,proto3" json:"serverTimestamp,omitempty"` // server time stamp, might be several in matriochka mode
|
||||
ServerTimestamp []int64 `protobuf:"varint,4,rep,packed,name=server_timestamp,json=serverTimestamp,proto3" json:"server_timestamp,omitempty"` // server time stamp, might be several in matriochka mode
|
||||
ServerDeliveryUuid string `protobuf:"bytes,5,opt,name=server_delivery_uuid,json=serverDeliveryUuid,proto3" json:"server_delivery_uuid,omitempty"` // message uuid, for server delivery tracking, omitted if not delivery tracking desired
|
||||
DrHeader []byte `protobuf:"bytes,6,opt,name=dr_header,json=drHeader,proto3" json:"dr_header,omitempty"` // serialized doubleratchet MessageHeader; empty = no DR layer
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -1073,16 +1098,24 @@ func (x *PackedUserMessage) GetServerDeliveryUuid() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *PackedUserMessage) GetDrHeader() []byte {
|
||||
if x != nil {
|
||||
return x.DrHeader
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ConversationStatus struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"`
|
||||
AnswerToUuid string `protobuf:"bytes,2,opt,name=answer_to_uuid,json=answerToUuid,proto3" json:"answer_to_uuid,omitempty"` // message is an answer to another one, specify uuid here
|
||||
LocalSequence uint64 `protobuf:"varint,3,opt,name=localSequence,proto3" json:"localSequence,omitempty"` // seq number in local conversation for custom reordering
|
||||
Sent uint64 `protobuf:"varint,4,opt,name=sent,proto3" json:"sent,omitempty"` // timestamp of the message sent
|
||||
Received uint64 `protobuf:"varint,5,opt,name=received,proto3" json:"received,omitempty"` // timestamp of the message received
|
||||
Processed uint64 `protobuf:"varint,6,opt,name=processed,proto3" json:"processed,omitempty"` // timestamp of the message processed
|
||||
MyNextIdentity *ContactCard `protobuf:"bytes,7,opt,name=my_next_identity,json=myNextIdentity,proto3" json:"my_next_identity,omitempty"`
|
||||
PeerNextIdentityAck int32 `protobuf:"varint,8,opt,name=peer_next_identityAck,json=peerNextIdentityAck,proto3" json:"peer_next_identityAck,omitempty"` // version of the new peer accepted id
|
||||
Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` // uuid of message, or uuid of related message if uuid_action is not empty
|
||||
Reactions []*Reaction `protobuf:"bytes,2,rep,name=reactions,proto3" json:"reactions,omitempty"` // empty => normal message, 1: receivedack, 2: processedack, 3:reaction
|
||||
ReplyToUuid string `protobuf:"bytes,3,opt,name=reply_to_uuid,json=replyToUuid,proto3" json:"reply_to_uuid,omitempty"` // this message replies to the specified uuid
|
||||
LocalSequence uint64 `protobuf:"varint,4,opt,name=local_sequence,json=localSequence,proto3" json:"local_sequence,omitempty"` // seq number in local conversation for custom reordering
|
||||
Sent uint64 `protobuf:"varint,5,opt,name=sent,proto3" json:"sent,omitempty"` // timestamp of the message sent
|
||||
Received uint64 `protobuf:"varint,6,opt,name=received,proto3" json:"received,omitempty"` // timestamp of the message received
|
||||
Processed uint64 `protobuf:"varint,7,opt,name=processed,proto3" json:"processed,omitempty"` // timestamp of the message processed
|
||||
MyNextIdentity *ContactCard `protobuf:"bytes,8,opt,name=my_next_identity,json=myNextIdentity,proto3" json:"my_next_identity,omitempty"`
|
||||
PeerNextIdentityAck int32 `protobuf:"varint,9,opt,name=peer_next_identity_ack,json=peerNextIdentityAck,proto3" json:"peer_next_identity_ack,omitempty"` // version of the new peer accepted id
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -1124,9 +1157,16 @@ func (x *ConversationStatus) GetUuid() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ConversationStatus) GetAnswerToUuid() string {
|
||||
func (x *ConversationStatus) GetReactions() []*Reaction {
|
||||
if x != nil {
|
||||
return x.AnswerToUuid
|
||||
return x.Reactions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ConversationStatus) GetReplyToUuid() string {
|
||||
if x != nil {
|
||||
return x.ReplyToUuid
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -1173,6 +1213,58 @@ func (x *ConversationStatus) GetPeerNextIdentityAck() int32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
type Reaction struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Reaction string `protobuf:"bytes,1,opt,name=reaction,proto3" json:"reaction,omitempty"`
|
||||
ContactUuid string `protobuf:"bytes,2,opt,name=contact_uuid,json=contactUuid,proto3" json:"contact_uuid,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *Reaction) Reset() {
|
||||
*x = Reaction{}
|
||||
mi := &file_messages_proto_msgTypes[13]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *Reaction) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Reaction) ProtoMessage() {}
|
||||
|
||||
func (x *Reaction) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_messages_proto_msgTypes[13]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Reaction.ProtoReflect.Descriptor instead.
|
||||
func (*Reaction) Descriptor() ([]byte, []int) {
|
||||
return file_messages_proto_rawDescGZIP(), []int{13}
|
||||
}
|
||||
|
||||
func (x *Reaction) GetReaction() string {
|
||||
if x != nil {
|
||||
return x.Reaction
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Reaction) GetContactUuid() string {
|
||||
if x != nil {
|
||||
return x.ContactUuid
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Group struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
@@ -1183,7 +1275,7 @@ type Group struct {
|
||||
|
||||
func (x *Group) Reset() {
|
||||
*x = Group{}
|
||||
mi := &file_messages_proto_msgTypes[13]
|
||||
mi := &file_messages_proto_msgTypes[14]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -1195,7 +1287,7 @@ func (x *Group) String() string {
|
||||
func (*Group) ProtoMessage() {}
|
||||
|
||||
func (x *Group) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_messages_proto_msgTypes[13]
|
||||
mi := &file_messages_proto_msgTypes[14]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -1208,7 +1300,7 @@ func (x *Group) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use Group.ProtoReflect.Descriptor instead.
|
||||
func (*Group) Descriptor() ([]byte, []int) {
|
||||
return file_messages_proto_rawDescGZIP(), []int{13}
|
||||
return file_messages_proto_rawDescGZIP(), []int{14}
|
||||
}
|
||||
|
||||
func (x *Group) GetName() string {
|
||||
@@ -1234,7 +1326,7 @@ type UserMessage struct {
|
||||
Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
|
||||
Status *ConversationStatus `protobuf:"bytes,5,opt,name=status,proto3" json:"status,omitempty"`
|
||||
Contact *ContactCard `protobuf:"bytes,6,opt,name=contact,proto3" json:"contact,omitempty"`
|
||||
KnownServers *ServerCard `protobuf:"bytes,7,opt,name=knownServers,proto3" json:"knownServers,omitempty"`
|
||||
KnownServers *ServerCard `protobuf:"bytes,7,opt,name=known_servers,json=knownServers,proto3" json:"known_servers,omitempty"`
|
||||
Group *Group `protobuf:"bytes,8,opt,name=group,proto3" json:"group,omitempty"`
|
||||
Files []*File `protobuf:"bytes,9,rep,name=files,proto3" json:"files,omitempty"`
|
||||
CurrentLocation *Location `protobuf:"bytes,10,opt,name=current_location,json=currentLocation,proto3" json:"current_location,omitempty"`
|
||||
@@ -1247,7 +1339,7 @@ type UserMessage struct {
|
||||
|
||||
func (x *UserMessage) Reset() {
|
||||
*x = UserMessage{}
|
||||
mi := &file_messages_proto_msgTypes[14]
|
||||
mi := &file_messages_proto_msgTypes[15]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -1259,7 +1351,7 @@ func (x *UserMessage) String() string {
|
||||
func (*UserMessage) ProtoMessage() {}
|
||||
|
||||
func (x *UserMessage) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_messages_proto_msgTypes[14]
|
||||
mi := &file_messages_proto_msgTypes[15]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -1272,7 +1364,7 @@ func (x *UserMessage) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use UserMessage.ProtoReflect.Descriptor instead.
|
||||
func (*UserMessage) Descriptor() ([]byte, []int) {
|
||||
return file_messages_proto_rawDescGZIP(), []int{14}
|
||||
return file_messages_proto_rawDescGZIP(), []int{15}
|
||||
}
|
||||
|
||||
func (x *UserMessage) GetDestination() string {
|
||||
@@ -1378,7 +1470,7 @@ type File struct {
|
||||
|
||||
func (x *File) Reset() {
|
||||
*x = File{}
|
||||
mi := &file_messages_proto_msgTypes[15]
|
||||
mi := &file_messages_proto_msgTypes[16]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -1390,7 +1482,7 @@ func (x *File) String() string {
|
||||
func (*File) ProtoMessage() {}
|
||||
|
||||
func (x *File) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_messages_proto_msgTypes[15]
|
||||
mi := &file_messages_proto_msgTypes[16]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -1403,7 +1495,7 @@ func (x *File) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use File.ProtoReflect.Descriptor instead.
|
||||
func (*File) Descriptor() ([]byte, []int) {
|
||||
return file_messages_proto_rawDescGZIP(), []int{15}
|
||||
return file_messages_proto_rawDescGZIP(), []int{16}
|
||||
}
|
||||
|
||||
func (x *File) GetFilename() string {
|
||||
@@ -1446,7 +1538,7 @@ type Location struct {
|
||||
|
||||
func (x *Location) Reset() {
|
||||
*x = Location{}
|
||||
mi := &file_messages_proto_msgTypes[16]
|
||||
mi := &file_messages_proto_msgTypes[17]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -1458,7 +1550,7 @@ func (x *Location) String() string {
|
||||
func (*Location) ProtoMessage() {}
|
||||
|
||||
func (x *Location) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_messages_proto_msgTypes[16]
|
||||
mi := &file_messages_proto_msgTypes[17]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -1471,7 +1563,7 @@ func (x *Location) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use Location.ProtoReflect.Descriptor instead.
|
||||
func (*Location) Descriptor() ([]byte, []int) {
|
||||
return file_messages_proto_rawDescGZIP(), []int{16}
|
||||
return file_messages_proto_rawDescGZIP(), []int{17}
|
||||
}
|
||||
|
||||
func (x *Location) GetTime() uint64 {
|
||||
@@ -1503,25 +1595,27 @@ func (x *Location) GetAltitude() int32 {
|
||||
}
|
||||
|
||||
type DbMessage struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Outbound bool `protobuf:"varint,1,opt,name=outbound,proto3" json:"outbound,omitempty"` // direction of the message
|
||||
Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
|
||||
Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` // text data
|
||||
Status *ConversationStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"`
|
||||
Contact *ContactCard `protobuf:"bytes,5,opt,name=contact,proto3" json:"contact,omitempty"`
|
||||
Group *Group `protobuf:"bytes,6,opt,name=group,proto3" json:"group,omitempty"`
|
||||
FilePaths []string `protobuf:"bytes,7,rep,name=file_paths,json=filePaths,proto3" json:"file_paths,omitempty"`
|
||||
CurrentLocation *Location `protobuf:"bytes,8,opt,name=current_location,json=currentLocation,proto3" json:"current_location,omitempty"`
|
||||
Appdata []byte `protobuf:"bytes,9,opt,name=appdata,proto3" json:"appdata,omitempty"`
|
||||
Invitation *Invitation `protobuf:"bytes,10,opt,name=invitation,proto3" json:"invitation,omitempty"`
|
||||
From string `protobuf:"bytes,11,opt,name=from,proto3" json:"from,omitempty"` // source peer uid, used when storing group conversations with more than one peer
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Outbound bool `protobuf:"varint,1,opt,name=outbound,proto3" json:"outbound,omitempty"` // direction of the message
|
||||
Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
|
||||
Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` // text data
|
||||
Status *ConversationStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"`
|
||||
Contact *ContactCard `protobuf:"bytes,5,opt,name=contact,proto3" json:"contact,omitempty"`
|
||||
Group *Group `protobuf:"bytes,6,opt,name=group,proto3" json:"group,omitempty"`
|
||||
FilePaths []string `protobuf:"bytes,7,rep,name=file_paths,json=filePaths,proto3" json:"file_paths,omitempty"`
|
||||
CurrentLocation *Location `protobuf:"bytes,8,opt,name=current_location,json=currentLocation,proto3" json:"current_location,omitempty"`
|
||||
Appdata []byte `protobuf:"bytes,9,opt,name=appdata,proto3" json:"appdata,omitempty"`
|
||||
Invitation *Invitation `protobuf:"bytes,10,opt,name=invitation,proto3" json:"invitation,omitempty"`
|
||||
From string `protobuf:"bytes,11,opt,name=from,proto3" json:"from,omitempty"` // source peer uid, used when storing group conversations with more than one peer
|
||||
ServerDeliveryUuid string `protobuf:"bytes,12,opt,name=server_delivery_uuid,json=serverDeliveryUuid,proto3" json:"server_delivery_uuid,omitempty"` // uuid returned by the server upon delivery
|
||||
ServerDeliveryTimestamp uint64 `protobuf:"varint,13,opt,name=server_delivery_timestamp,json=serverDeliveryTimestamp,proto3" json:"server_delivery_timestamp,omitempty"` // timestamp of the server delivery
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *DbMessage) Reset() {
|
||||
*x = DbMessage{}
|
||||
mi := &file_messages_proto_msgTypes[17]
|
||||
mi := &file_messages_proto_msgTypes[18]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -1533,7 +1627,7 @@ func (x *DbMessage) String() string {
|
||||
func (*DbMessage) ProtoMessage() {}
|
||||
|
||||
func (x *DbMessage) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_messages_proto_msgTypes[17]
|
||||
mi := &file_messages_proto_msgTypes[18]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -1546,7 +1640,7 @@ func (x *DbMessage) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use DbMessage.ProtoReflect.Descriptor instead.
|
||||
func (*DbMessage) Descriptor() ([]byte, []int) {
|
||||
return file_messages_proto_rawDescGZIP(), []int{17}
|
||||
return file_messages_proto_rawDescGZIP(), []int{18}
|
||||
}
|
||||
|
||||
func (x *DbMessage) GetOutbound() bool {
|
||||
@@ -1626,6 +1720,20 @@ func (x *DbMessage) GetFrom() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *DbMessage) GetServerDeliveryUuid() string {
|
||||
if x != nil {
|
||||
return x.ServerDeliveryUuid
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *DbMessage) GetServerDeliveryTimestamp() uint64 {
|
||||
if x != nil {
|
||||
return x.ServerDeliveryTimestamp
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type VideoData struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
|
||||
@@ -1639,7 +1747,7 @@ type VideoData struct {
|
||||
|
||||
func (x *VideoData) Reset() {
|
||||
*x = VideoData{}
|
||||
mi := &file_messages_proto_msgTypes[18]
|
||||
mi := &file_messages_proto_msgTypes[19]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -1651,7 +1759,7 @@ func (x *VideoData) String() string {
|
||||
func (*VideoData) ProtoMessage() {}
|
||||
|
||||
func (x *VideoData) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_messages_proto_msgTypes[18]
|
||||
mi := &file_messages_proto_msgTypes[19]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -1664,7 +1772,7 @@ func (x *VideoData) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use VideoData.ProtoReflect.Descriptor instead.
|
||||
func (*VideoData) Descriptor() ([]byte, []int) {
|
||||
return file_messages_proto_rawDescGZIP(), []int{18}
|
||||
return file_messages_proto_rawDescGZIP(), []int{19}
|
||||
}
|
||||
|
||||
func (x *VideoData) GetUrl() string {
|
||||
@@ -1713,7 +1821,7 @@ type VideoCredential struct {
|
||||
|
||||
func (x *VideoCredential) Reset() {
|
||||
*x = VideoCredential{}
|
||||
mi := &file_messages_proto_msgTypes[19]
|
||||
mi := &file_messages_proto_msgTypes[20]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -1725,7 +1833,7 @@ func (x *VideoCredential) String() string {
|
||||
func (*VideoCredential) ProtoMessage() {}
|
||||
|
||||
func (x *VideoCredential) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_messages_proto_msgTypes[19]
|
||||
mi := &file_messages_proto_msgTypes[20]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -1738,7 +1846,7 @@ func (x *VideoCredential) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use VideoCredential.ProtoReflect.Descriptor instead.
|
||||
func (*VideoCredential) Descriptor() ([]byte, []int) {
|
||||
return file_messages_proto_rawDescGZIP(), []int{19}
|
||||
return file_messages_proto_rawDescGZIP(), []int{20}
|
||||
}
|
||||
|
||||
func (x *VideoCredential) GetUsername() string {
|
||||
@@ -1833,15 +1941,17 @@ const file_messages_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"video_data\x18\n" +
|
||||
" \x01(\v2\x12.meowlib.VideoDataR\tvideoData\x127\n" +
|
||||
"\fcontact_card\x18\v \x03(\v2\x14.meowlib.ContactCardR\vcontactCard\"l\n" +
|
||||
"\fcontact_card\x18\v \x03(\v2\x14.meowlib.ContactCardR\vcontactCard\"m\n" +
|
||||
"\x10MatriochkaServer\x12\x10\n" +
|
||||
"\x03url\x18\x01 \x01(\tR\x03url\x12\x1c\n" +
|
||||
"\tpublicKey\x18\x02 \x01(\tR\tpublicKey\x12\x12\n" +
|
||||
"\x04uuid\x18\x03 \x01(\tR\x04uuid\x12\x14\n" +
|
||||
"\x05delay\x18\x04 \x01(\x05R\x05delay\"\x9c\x01\n" +
|
||||
"\x03url\x18\x01 \x01(\tR\x03url\x12\x1d\n" +
|
||||
"\n" +
|
||||
"Matriochka\x12\x1c\n" +
|
||||
"\tlookupKey\x18\x01 \x01(\tR\tlookupKey\x12-\n" +
|
||||
"public_key\x18\x02 \x01(\tR\tpublicKey\x12\x12\n" +
|
||||
"\x04uuid\x18\x03 \x01(\tR\x04uuid\x12\x14\n" +
|
||||
"\x05delay\x18\x04 \x01(\x05R\x05delay\"\x9d\x01\n" +
|
||||
"\n" +
|
||||
"Matriochka\x12\x1d\n" +
|
||||
"\n" +
|
||||
"lookup_key\x18\x01 \x01(\tR\tlookupKey\x12-\n" +
|
||||
"\x04prev\x18\x02 \x01(\v2\x19.meowlib.MatriochkaServerR\x04prev\x12-\n" +
|
||||
"\x04next\x18\x03 \x01(\v2\x19.meowlib.MatriochkaServerR\x04next\x12\x12\n" +
|
||||
"\x04data\x18\x04 \x01(\fR\x04data\"\xc3\x01\n" +
|
||||
@@ -1854,42 +1964,51 @@ const file_messages_proto_rawDesc = "" +
|
||||
"\x03url\x18\x04 \x01(\tR\x03url\x12\x14\n" +
|
||||
"\x05login\x18\x05 \x01(\tR\x05login\x12\x1a\n" +
|
||||
"\bpassword\x18\x06 \x01(\tR\bpassword\x12\x1c\n" +
|
||||
"\tsignature\x18\a \x01(\tR\tsignature\"\xd5\x02\n" +
|
||||
"\tsignature\x18\a \x01(\tR\tsignature\"\xbc\x03\n" +
|
||||
"\vContactCard\x12\x12\n" +
|
||||
"\x04name\x18\x01 \x01(\tR\x04name\x12,\n" +
|
||||
"\x12contact_public_key\x18\x02 \x01(\tR\x10contactPublicKey\x122\n" +
|
||||
"\x15encryption_public_key\x18\x03 \x01(\tR\x13encryptionPublicKey\x12*\n" +
|
||||
"\x11lookup_public_key\x18\x04 \x01(\tR\x0flookupPublicKey\x126\n" +
|
||||
"\fpull_servers\x18\x05 \x03(\v2\x13.meowlib.ServerCardR\vpullServers\x12\x18\n" +
|
||||
"\aversion\x18\x06 \x01(\rR\aversion\x12#\n" +
|
||||
"\rinvitation_id\x18\a \x01(\tR\finvitationId\x12-\n" +
|
||||
"\x12invitation_message\x18\b \x01(\tR\x11invitationMessage\"\xc9\x01\n" +
|
||||
"\x11lookup_public_key\x18\x04 \x01(\tR\x0flookupPublicKey\x12!\n" +
|
||||
"\fsymetric_key\x18\x05 \x01(\tR\vsymetricKey\x126\n" +
|
||||
"\fpull_servers\x18\x06 \x03(\v2\x13.meowlib.ServerCardR\vpullServers\x12\x18\n" +
|
||||
"\aversion\x18\a \x01(\rR\aversion\x12#\n" +
|
||||
"\rinvitation_id\x18\b \x01(\tR\finvitationId\x12-\n" +
|
||||
"\x12invitation_message\x18\t \x01(\tR\x11invitationMessage\x12\x1e\n" +
|
||||
"\vdr_root_key\x18\n" +
|
||||
" \x01(\tR\tdrRootKey\x12\"\n" +
|
||||
"\rdr_public_key\x18\v \x01(\tR\vdrPublicKey\"\xe7\x01\n" +
|
||||
"\x11PackedUserMessage\x12 \n" +
|
||||
"\vdestination\x18\x01 \x01(\tR\vdestination\x12\x18\n" +
|
||||
"\apayload\x18\x02 \x01(\fR\apayload\x12\x1c\n" +
|
||||
"\tsignature\x18\x03 \x01(\fR\tsignature\x12(\n" +
|
||||
"\x0fserverTimestamp\x18\x04 \x03(\x03R\x0fserverTimestamp\x120\n" +
|
||||
"\x14server_delivery_uuid\x18\x05 \x01(\tR\x12serverDeliveryUuid\"\xb6\x02\n" +
|
||||
"\tsignature\x18\x03 \x01(\fR\tsignature\x12)\n" +
|
||||
"\x10server_timestamp\x18\x04 \x03(\x03R\x0fserverTimestamp\x120\n" +
|
||||
"\x14server_delivery_uuid\x18\x05 \x01(\tR\x12serverDeliveryUuid\x12\x1b\n" +
|
||||
"\tdr_header\x18\x06 \x01(\fR\bdrHeader\"\xe7\x02\n" +
|
||||
"\x12ConversationStatus\x12\x12\n" +
|
||||
"\x04uuid\x18\x01 \x01(\tR\x04uuid\x12$\n" +
|
||||
"\x0eanswer_to_uuid\x18\x02 \x01(\tR\fanswerToUuid\x12$\n" +
|
||||
"\rlocalSequence\x18\x03 \x01(\x04R\rlocalSequence\x12\x12\n" +
|
||||
"\x04sent\x18\x04 \x01(\x04R\x04sent\x12\x1a\n" +
|
||||
"\breceived\x18\x05 \x01(\x04R\breceived\x12\x1c\n" +
|
||||
"\tprocessed\x18\x06 \x01(\x04R\tprocessed\x12>\n" +
|
||||
"\x10my_next_identity\x18\a \x01(\v2\x14.meowlib.ContactCardR\x0emyNextIdentity\x122\n" +
|
||||
"\x15peer_next_identityAck\x18\b \x01(\x05R\x13peerNextIdentityAck\"K\n" +
|
||||
"\x04uuid\x18\x01 \x01(\tR\x04uuid\x12/\n" +
|
||||
"\treactions\x18\x02 \x03(\v2\x11.meowlib.ReactionR\treactions\x12\"\n" +
|
||||
"\rreply_to_uuid\x18\x03 \x01(\tR\vreplyToUuid\x12%\n" +
|
||||
"\x0elocal_sequence\x18\x04 \x01(\x04R\rlocalSequence\x12\x12\n" +
|
||||
"\x04sent\x18\x05 \x01(\x04R\x04sent\x12\x1a\n" +
|
||||
"\breceived\x18\x06 \x01(\x04R\breceived\x12\x1c\n" +
|
||||
"\tprocessed\x18\a \x01(\x04R\tprocessed\x12>\n" +
|
||||
"\x10my_next_identity\x18\b \x01(\v2\x14.meowlib.ContactCardR\x0emyNextIdentity\x123\n" +
|
||||
"\x16peer_next_identity_ack\x18\t \x01(\x05R\x13peerNextIdentityAck\"I\n" +
|
||||
"\bReaction\x12\x1a\n" +
|
||||
"\breaction\x18\x01 \x01(\tR\breaction\x12!\n" +
|
||||
"\fcontact_uuid\x18\x02 \x01(\tR\vcontactUuid\"K\n" +
|
||||
"\x05Group\x12\x12\n" +
|
||||
"\x04name\x18\x01 \x01(\tR\x04name\x12.\n" +
|
||||
"\amembers\x18\x02 \x03(\v2\x14.meowlib.ContactCardR\amembers\"\x94\x04\n" +
|
||||
"\amembers\x18\x02 \x03(\v2\x14.meowlib.ContactCardR\amembers\"\x95\x04\n" +
|
||||
"\vUserMessage\x12 \n" +
|
||||
"\vdestination\x18\x01 \x01(\tR\vdestination\x12\x12\n" +
|
||||
"\x04from\x18\x02 \x01(\tR\x04from\x12\x12\n" +
|
||||
"\x04type\x18\x03 \x01(\tR\x04type\x12\x12\n" +
|
||||
"\x04data\x18\x04 \x01(\fR\x04data\x123\n" +
|
||||
"\x06status\x18\x05 \x01(\v2\x1b.meowlib.ConversationStatusR\x06status\x12.\n" +
|
||||
"\acontact\x18\x06 \x01(\v2\x14.meowlib.ContactCardR\acontact\x127\n" +
|
||||
"\fknownServers\x18\a \x01(\v2\x13.meowlib.ServerCardR\fknownServers\x12$\n" +
|
||||
"\acontact\x18\x06 \x01(\v2\x14.meowlib.ContactCardR\acontact\x128\n" +
|
||||
"\rknown_servers\x18\a \x01(\v2\x13.meowlib.ServerCardR\fknownServers\x12$\n" +
|
||||
"\x05group\x18\b \x01(\v2\x0e.meowlib.GroupR\x05group\x12#\n" +
|
||||
"\x05files\x18\t \x03(\v2\r.meowlib.FileR\x05files\x12<\n" +
|
||||
"\x10current_location\x18\n" +
|
||||
@@ -1909,7 +2028,7 @@ const file_messages_proto_rawDesc = "" +
|
||||
"\x04time\x18\x01 \x01(\x04R\x04time\x12\x1a\n" +
|
||||
"\blatitude\x18\x02 \x01(\x02R\blatitude\x12\x1c\n" +
|
||||
"\tlongitude\x18\x03 \x01(\x02R\tlongitude\x12\x1a\n" +
|
||||
"\baltitude\x18\x04 \x01(\x05R\baltitude\"\x9a\x03\n" +
|
||||
"\baltitude\x18\x04 \x01(\x05R\baltitude\"\x88\x04\n" +
|
||||
"\tDbMessage\x12\x1a\n" +
|
||||
"\boutbound\x18\x01 \x01(\bR\boutbound\x12\x12\n" +
|
||||
"\x04type\x18\x02 \x01(\tR\x04type\x12\x12\n" +
|
||||
@@ -1925,7 +2044,9 @@ const file_messages_proto_rawDesc = "" +
|
||||
"invitation\x18\n" +
|
||||
" \x01(\v2\x13.meowlib.InvitationR\n" +
|
||||
"invitation\x12\x12\n" +
|
||||
"\x04from\x18\v \x01(\tR\x04from\"\xaa\x01\n" +
|
||||
"\x04from\x18\v \x01(\tR\x04from\x120\n" +
|
||||
"\x14server_delivery_uuid\x18\f \x01(\tR\x12serverDeliveryUuid\x12:\n" +
|
||||
"\x19server_delivery_timestamp\x18\r \x01(\x04R\x17serverDeliveryTimestamp\"\xaa\x01\n" +
|
||||
"\tVideoData\x12\x10\n" +
|
||||
"\x03url\x18\x01 \x01(\tR\x03url\x12\x12\n" +
|
||||
"\x04room\x18\x02 \x01(\tR\x04room\x12\x1a\n" +
|
||||
@@ -1951,7 +2072,7 @@ func file_messages_proto_rawDescGZIP() []byte {
|
||||
return file_messages_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 20)
|
||||
var file_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 21)
|
||||
var file_messages_proto_goTypes = []any{
|
||||
(*PackedServerMessage)(nil), // 0: meowlib.PackedServerMessage
|
||||
(*Invitation)(nil), // 1: meowlib.Invitation
|
||||
@@ -1966,13 +2087,14 @@ var file_messages_proto_goTypes = []any{
|
||||
(*ContactCard)(nil), // 10: meowlib.ContactCard
|
||||
(*PackedUserMessage)(nil), // 11: meowlib.PackedUserMessage
|
||||
(*ConversationStatus)(nil), // 12: meowlib.ConversationStatus
|
||||
(*Group)(nil), // 13: meowlib.Group
|
||||
(*UserMessage)(nil), // 14: meowlib.UserMessage
|
||||
(*File)(nil), // 15: meowlib.File
|
||||
(*Location)(nil), // 16: meowlib.Location
|
||||
(*DbMessage)(nil), // 17: meowlib.DbMessage
|
||||
(*VideoData)(nil), // 18: meowlib.VideoData
|
||||
(*VideoCredential)(nil), // 19: meowlib.VideoCredential
|
||||
(*Reaction)(nil), // 13: meowlib.Reaction
|
||||
(*Group)(nil), // 14: meowlib.Group
|
||||
(*UserMessage)(nil), // 15: meowlib.UserMessage
|
||||
(*File)(nil), // 16: meowlib.File
|
||||
(*Location)(nil), // 17: meowlib.Location
|
||||
(*DbMessage)(nil), // 18: meowlib.DbMessage
|
||||
(*VideoData)(nil), // 19: meowlib.VideoData
|
||||
(*VideoCredential)(nil), // 20: meowlib.VideoCredential
|
||||
}
|
||||
var file_messages_proto_depIdxs = []int32{
|
||||
10, // 0: meowlib.Meet.contact_card:type_name -> meowlib.ContactCard
|
||||
@@ -1982,38 +2104,39 @@ var file_messages_proto_depIdxs = []int32{
|
||||
8, // 4: meowlib.ToServerMessage.matriochka_message:type_name -> meowlib.Matriochka
|
||||
1, // 5: meowlib.ToServerMessage.invitation:type_name -> meowlib.Invitation
|
||||
11, // 6: meowlib.ToServerMessage.device_messages:type_name -> meowlib.PackedUserMessage
|
||||
18, // 7: meowlib.ToServerMessage.video_data:type_name -> meowlib.VideoData
|
||||
19, // 7: meowlib.ToServerMessage.video_data:type_name -> meowlib.VideoData
|
||||
4, // 8: meowlib.ToServerMessage.credentials:type_name -> meowlib.Credentials
|
||||
11, // 9: meowlib.FromServerMessage.chat:type_name -> meowlib.PackedUserMessage
|
||||
9, // 10: meowlib.FromServerMessage.known_servers:type_name -> meowlib.ServerCard
|
||||
1, // 11: meowlib.FromServerMessage.invitation:type_name -> meowlib.Invitation
|
||||
11, // 12: meowlib.FromServerMessage.device_messages:type_name -> meowlib.PackedUserMessage
|
||||
18, // 13: meowlib.FromServerMessage.video_data:type_name -> meowlib.VideoData
|
||||
19, // 13: meowlib.FromServerMessage.video_data:type_name -> meowlib.VideoData
|
||||
10, // 14: meowlib.FromServerMessage.contact_card:type_name -> meowlib.ContactCard
|
||||
7, // 15: meowlib.Matriochka.prev:type_name -> meowlib.MatriochkaServer
|
||||
7, // 16: meowlib.Matriochka.next:type_name -> meowlib.MatriochkaServer
|
||||
9, // 17: meowlib.ContactCard.pull_servers:type_name -> meowlib.ServerCard
|
||||
10, // 18: meowlib.ConversationStatus.my_next_identity:type_name -> meowlib.ContactCard
|
||||
10, // 19: meowlib.Group.members:type_name -> meowlib.ContactCard
|
||||
12, // 20: meowlib.UserMessage.status:type_name -> meowlib.ConversationStatus
|
||||
10, // 21: meowlib.UserMessage.contact:type_name -> meowlib.ContactCard
|
||||
9, // 22: meowlib.UserMessage.knownServers:type_name -> meowlib.ServerCard
|
||||
13, // 23: meowlib.UserMessage.group:type_name -> meowlib.Group
|
||||
15, // 24: meowlib.UserMessage.files:type_name -> meowlib.File
|
||||
16, // 25: meowlib.UserMessage.current_location:type_name -> meowlib.Location
|
||||
1, // 26: meowlib.UserMessage.invitation:type_name -> meowlib.Invitation
|
||||
18, // 27: meowlib.UserMessage.video_data:type_name -> meowlib.VideoData
|
||||
12, // 28: meowlib.DbMessage.status:type_name -> meowlib.ConversationStatus
|
||||
10, // 29: meowlib.DbMessage.contact:type_name -> meowlib.ContactCard
|
||||
13, // 30: meowlib.DbMessage.group:type_name -> meowlib.Group
|
||||
16, // 31: meowlib.DbMessage.current_location:type_name -> meowlib.Location
|
||||
1, // 32: meowlib.DbMessage.invitation:type_name -> meowlib.Invitation
|
||||
19, // 33: meowlib.VideoData.credentials:type_name -> meowlib.VideoCredential
|
||||
34, // [34:34] is the sub-list for method output_type
|
||||
34, // [34:34] is the sub-list for method input_type
|
||||
34, // [34:34] is the sub-list for extension type_name
|
||||
34, // [34:34] is the sub-list for extension extendee
|
||||
0, // [0:34] is the sub-list for field type_name
|
||||
13, // 18: meowlib.ConversationStatus.reactions:type_name -> meowlib.Reaction
|
||||
10, // 19: meowlib.ConversationStatus.my_next_identity:type_name -> meowlib.ContactCard
|
||||
10, // 20: meowlib.Group.members:type_name -> meowlib.ContactCard
|
||||
12, // 21: meowlib.UserMessage.status:type_name -> meowlib.ConversationStatus
|
||||
10, // 22: meowlib.UserMessage.contact:type_name -> meowlib.ContactCard
|
||||
9, // 23: meowlib.UserMessage.known_servers:type_name -> meowlib.ServerCard
|
||||
14, // 24: meowlib.UserMessage.group:type_name -> meowlib.Group
|
||||
16, // 25: meowlib.UserMessage.files:type_name -> meowlib.File
|
||||
17, // 26: meowlib.UserMessage.current_location:type_name -> meowlib.Location
|
||||
1, // 27: meowlib.UserMessage.invitation:type_name -> meowlib.Invitation
|
||||
19, // 28: meowlib.UserMessage.video_data:type_name -> meowlib.VideoData
|
||||
12, // 29: meowlib.DbMessage.status:type_name -> meowlib.ConversationStatus
|
||||
10, // 30: meowlib.DbMessage.contact:type_name -> meowlib.ContactCard
|
||||
14, // 31: meowlib.DbMessage.group:type_name -> meowlib.Group
|
||||
17, // 32: meowlib.DbMessage.current_location:type_name -> meowlib.Location
|
||||
1, // 33: meowlib.DbMessage.invitation:type_name -> meowlib.Invitation
|
||||
20, // 34: meowlib.VideoData.credentials:type_name -> meowlib.VideoCredential
|
||||
35, // [35:35] is the sub-list for method output_type
|
||||
35, // [35:35] is the sub-list for method input_type
|
||||
35, // [35:35] is the sub-list for extension type_name
|
||||
35, // [35:35] is the sub-list for extension extendee
|
||||
0, // [0:35] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_messages_proto_init() }
|
||||
@@ -2027,7 +2150,7 @@ func file_messages_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_messages_proto_rawDesc), len(file_messages_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 20,
|
||||
NumMessages: 21,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
// You should use the field numbers 1 through 15
|
||||
// for the most-frequently-set fields.
|
||||
// Lower field number values take less space in the wire format.
|
||||
// For example, field numbers in the range 1 through 15 take one byte to encode.
|
||||
// Field numbers in the range 16 through 2047 take two bytes.
|
||||
// You should use the field numbers 1 through 15
|
||||
// for the most-frequently-set fields.
|
||||
// Lower field number values take less space in the wire format.
|
||||
// For example, field numbers in the range 1 through 15 take one byte to encode.
|
||||
// Field numbers in the range 16 through 2047 take two bytes.
|
||||
/**
|
||||
* Meow messages
|
||||
*
|
||||
* This is the Meow protocol protobuf messages description.
|
||||
*
|
||||
*
|
||||
*/
|
||||
syntax = "proto3";
|
||||
package meowlib;
|
||||
@@ -15,172 +15,181 @@ option go_package = "forge.redroom.link/yves/meowlib";
|
||||
|
||||
// structure definnig a message as received by a server in protobuf format
|
||||
message PackedServerMessage {
|
||||
string from = 1; // The client public key for that server to get an answer
|
||||
bytes payload = 2; // The ToServerMessage encrypted with the server public key |or| symetrical encryption as agreed earlier
|
||||
bytes signature = 3; // The message signature with the client public key |eo| the reference to teh symetrical key used
|
||||
string from = 1; // The client public key for that server to get an answer
|
||||
bytes payload = 2; // The ToServerMessage encrypted with the server public key |or| symetrical encryption as agreed earlier
|
||||
bytes signature = 3; // The message signature with the client public key |eo| the reference to teh symetrical key used
|
||||
}
|
||||
|
||||
// structure to hold an invitation through a server
|
||||
message Invitation {
|
||||
bytes payload = 1; // invitation payload, encrypted after step 2
|
||||
int32 timeout = 2; // how long do I want the invitation to remain available on the server
|
||||
int32 shortcodeLen = 3; // len of the shortcode you wish for short url transmission
|
||||
string shortcode = 4; // shortcode that the friend shall request to get the invitation
|
||||
string password = 5; // password to set for accessing invitation (optional)
|
||||
string uuid = 6; // id that the friend gave you, that you should include to your reply to get recognized
|
||||
int64 expiry = 7; // the server allowed expiry date, it may be samller than the requested timeout according to server policy
|
||||
int32 step = 8; // progress in the inviattion process : 1=invite friend, 2=friend requests invitation, 3=friend's answer
|
||||
string from=9; // used in step 3 the answer public key to check the signature in user message
|
||||
bytes payload = 1; // invitation payload, encrypted after step 2
|
||||
int32 timeout = 2; // how long do I want the invitation to remain available on the server
|
||||
int32 shortcodeLen = 3; // len of the shortcode you wish for short url transmission
|
||||
string shortcode = 4; // shortcode that the friend shall request to get the invitation
|
||||
string password = 5; // password to set for accessing invitation (optional)
|
||||
string uuid = 6; // id that the friend gave you, that you should include to your reply to get recognized
|
||||
int64 expiry = 7; // the server allowed expiry date, it may be samller than the requested timeout according to server policy
|
||||
int32 step = 8; // progress in the inviattion process : 1=invite friend, 2=friend requests invitation, 3=friend's answer
|
||||
string from = 9; // used in step 3 the answer public key to check the signature in user message
|
||||
}
|
||||
|
||||
// structure for requesting incoming messages
|
||||
message ConversationRequest {
|
||||
string lookup_key = 1; // lookup key for a conversation
|
||||
bool delivery_request = 2; // look for for delivery tracking, key is implicit, "from" field is used
|
||||
int64 send_timestamp = 3;
|
||||
string lookup_signature = 4; // prove that I own the private key by signing that block
|
||||
}
|
||||
message ConversationRequest {
|
||||
string lookup_key = 1; // lookup key for a conversation
|
||||
bool delivery_request = 2; // look for for delivery tracking, key is implicit, "from" field is used
|
||||
int64 send_timestamp = 3;
|
||||
string lookup_signature = 4; // prove that I own the private key by signing that block
|
||||
}
|
||||
|
||||
message Meet {
|
||||
string public_status = 1; // Publish my online status, if the server is a meeting server
|
||||
ContactCard contact_card = 2; // mine or the requester
|
||||
string message = 3; // short description
|
||||
string public_status = 1; // Publish my online status, if the server is a meeting server
|
||||
ContactCard contact_card = 2; // mine or the requester
|
||||
string message = 3; // short description
|
||||
}
|
||||
|
||||
message Credentials {
|
||||
string login = 1; // login
|
||||
string password = 2; // password
|
||||
string public_key = 3; // public key
|
||||
string private_key = 4; // private key
|
||||
string login = 1; // login
|
||||
string password = 2; // password
|
||||
string public_key = 3; // public key
|
||||
string private_key = 4; // private key
|
||||
}
|
||||
|
||||
// structure defining a message for a server, that will be encrypted, then sent in a "packedmessage" payload
|
||||
message ToServerMessage {
|
||||
string type = 1; // Type 1 : final destination / 2 : forward
|
||||
string from = 2 ; // My pub key for the server to send me an encrypter answer
|
||||
bytes payload = 3 ; // optional payload for server
|
||||
string type = 1; // Type 1 : final destination / 2 : forward
|
||||
string from = 2 ; // My pub key for the server to send me an encrypter answer
|
||||
bytes payload = 3 ; // optional payload for server
|
||||
|
||||
repeated ConversationRequest pull_request = 4;
|
||||
repeated ConversationRequest pull_request = 4;
|
||||
|
||||
repeated PackedUserMessage messages = 5;
|
||||
repeated PackedUserMessage messages = 5;
|
||||
|
||||
repeated ServerCard known_servers = 6;
|
||||
repeated ServerCard known_servers = 6;
|
||||
|
||||
Matriochka matriochka_message = 7;
|
||||
|
||||
string uuid = 8;
|
||||
Matriochka matriochka_message = 7;
|
||||
|
||||
Invitation invitation = 9; // invitation for the 2 first steps of a "through server" invitation process
|
||||
string uuid = 8;
|
||||
|
||||
repeated PackedUserMessage device_messages = 10; // messages to another device belonging to the same user
|
||||
Invitation invitation = 9; // invitation for the 2 first steps of a "through server" invitation process
|
||||
|
||||
int64 timeout = 11; // timeout expected by the client for the server to answer (long polling)
|
||||
repeated PackedUserMessage device_messages = 10; // messages to another device belonging to the same user
|
||||
|
||||
VideoData video_data = 12; // video call data
|
||||
int64 timeout = 11; // timeout expected by the client for the server to answer (long polling)
|
||||
|
||||
Credentials credentials = 13; // credentials for a new user or mandatory server creds
|
||||
VideoData video_data = 12; // video call data
|
||||
|
||||
Credentials credentials = 13; // credentials for a new user or mandatory server creds
|
||||
}
|
||||
|
||||
|
||||
// structure defining a from server receiver message decrypted from a "packedmessage" payload
|
||||
message FromServerMessage {
|
||||
string type = 1; // Type
|
||||
string server_public_key = 2 ; // Pub key from the server
|
||||
bytes payload = 3 ; //
|
||||
string uuid_ack = 4 ; // Ack for the last received ToServerMessage Uuid
|
||||
string server_uuid = 5 ; // Provides the server uuid that replaced the client uuid
|
||||
string type = 1; // Type
|
||||
string server_public_key = 2 ; // Pub key from the server
|
||||
bytes payload = 3 ; //
|
||||
string uuid_ack = 4 ; // Ack for the last received ToServerMessage Uuid
|
||||
string server_uuid = 5 ; // Provides the server uuid that replaced the client uuid
|
||||
|
||||
repeated PackedUserMessage chat = 6;
|
||||
repeated PackedUserMessage chat = 6;
|
||||
|
||||
repeated ServerCard known_servers = 7;
|
||||
repeated ServerCard known_servers = 7;
|
||||
|
||||
Invitation invitation = 8; // invitation answer, for the third steps of any invitation
|
||||
Invitation invitation = 8; // invitation answer, for the third steps of any invitation
|
||||
|
||||
repeated PackedUserMessage device_messages = 9; // messages from other devices belonging to the same user
|
||||
repeated PackedUserMessage device_messages = 9; // messages from other devices belonging to the same user
|
||||
|
||||
VideoData video_data = 10; // video call data
|
||||
VideoData video_data = 10; // video call data
|
||||
|
||||
repeated ContactCard contact_card = 11; // contact list for a personae
|
||||
repeated ContactCard contact_card = 11; // contact list for a personae
|
||||
}
|
||||
|
||||
message MatriochkaServer {
|
||||
string url = 1; // Server Url
|
||||
string publicKey = 2; // Server Public Key
|
||||
string uuid = 3 ; // Optional, uuid for delivery confirmation
|
||||
int32 delay = 4; // Max delay requested for message forwarding or delivery tracking
|
||||
string url = 1; // Server Url
|
||||
string public_key = 2; // Server Public Key
|
||||
string uuid = 3 ; // Optional, uuid for delivery confirmation
|
||||
int32 delay = 4; // Max delay requested for message forwarding or delivery tracking
|
||||
}
|
||||
|
||||
message Matriochka {
|
||||
string lookupKey = 1; // Optional, only if you want delivery tracking, less stealth
|
||||
MatriochkaServer prev = 2; // Optional, like above
|
||||
MatriochkaServer next = 3; // Next server to deliver the message to
|
||||
bytes data = 4; // Matriochka data
|
||||
string lookup_key = 1; // Optional, only if you want delivery tracking, less stealth
|
||||
MatriochkaServer prev = 2; // Optional, like above
|
||||
MatriochkaServer next = 3; // Next server to deliver the message to
|
||||
bytes data = 4; // Matriochka data
|
||||
}
|
||||
|
||||
// structure describing required server attributes
|
||||
message ServerCard {
|
||||
string name = 1; // friendly server name
|
||||
string description=2; // description : owner type (company/private/university...),
|
||||
string public_key = 3; // public key you must use to send encrypted messages to that server
|
||||
string url = 4; // meow server url
|
||||
string login = 5; // required login to access the server
|
||||
string password = 6; // password associated to the login
|
||||
string signature = 7; // signature of all previous fields by the server itself
|
||||
}
|
||||
string name = 1; // friendly server name
|
||||
string description = 2; // description : owner type (company/private/university...),
|
||||
string public_key = 3; // public key you must use to send encrypted messages to that server
|
||||
string url = 4; // meow server url
|
||||
string login = 5; // required login to access the server
|
||||
string password = 6; // password associated to the login
|
||||
string signature = 7; // signature of all previous fields by the server itself
|
||||
}
|
||||
|
||||
// structure describing a user contact card ie the minimum set of attributes for exchanging identities
|
||||
message ContactCard {
|
||||
string name=1; // contact nickname
|
||||
string contact_public_key =2; // contact public key, will be used to authenticate her/his messages
|
||||
string encryption_public_key= 3; // public key you must use to to write encrypted messages to that contact
|
||||
string lookup_public_key =4; // public key you will use as "destination identifier" for her/him to lookup for your messages on the servers
|
||||
repeated ServerCard pull_servers =5; // list the servers where the contact will look for messages from you
|
||||
uint32 version = 6;
|
||||
string invitation_id=7;
|
||||
string invitation_message=8;
|
||||
string name = 1; // contact nickname
|
||||
string contact_public_key = 2; // contact public key, will be used to authenticate her/his messages
|
||||
string encryption_public_key = 3; // public key you must use to to write encrypted messages to that contact
|
||||
string lookup_public_key = 4; // public key you will use as "destination identifier" for her/him to lookup for your messages on the servers
|
||||
string symetric_key = 5; // agreed key for payload symetric encryption
|
||||
repeated ServerCard pull_servers = 6; // list the servers where the contact will look for messages from you
|
||||
uint32 version = 7;
|
||||
string invitation_id = 8;
|
||||
string invitation_message = 9;
|
||||
string dr_root_key = 10; // DR pre-shared root key (base64, 32 bytes)
|
||||
string dr_public_key = 11; // DR DH public key of the initiator (base64)
|
||||
}
|
||||
|
||||
// structure for sending a message to be forwarded to another user in protobuf format
|
||||
message PackedUserMessage {
|
||||
string destination=1; // the peer's current conversation lookup public key
|
||||
bytes payload=2; // the message UserMessage encrypted with the destination peer's public key
|
||||
bytes signature=3; // the payload signature with the client identity private key
|
||||
repeated int64 serverTimestamp=4; // server time stamp, might be several in matriochka mode
|
||||
string server_delivery_uuid=5; // message uuid, for server delivery tracking, omitted if not delivery tracking desired
|
||||
string destination = 1; // the peer's current conversation lookup public key
|
||||
bytes payload = 2; // the message UserMessage encrypted with the destination peer's public key
|
||||
bytes signature = 3; // the payload signature with the client identity private key
|
||||
repeated int64 server_timestamp = 4; // server time stamp, might be several in matriochka mode
|
||||
string server_delivery_uuid = 5; // message uuid, for server delivery tracking, omitted if not delivery tracking desired
|
||||
bytes dr_header = 6; // serialized doubleratchet MessageHeader; empty = no DR layer
|
||||
}
|
||||
|
||||
message ConversationStatus {
|
||||
string uuid = 1;
|
||||
string answer_to_uuid=2; // message is an answer to another one, specify uuid here
|
||||
uint64 localSequence = 3 ; // seq number in local conversation for custom reordering
|
||||
uint64 sent = 4 ; // timestamp of the message sent
|
||||
uint64 received = 5; // timestamp of the message received
|
||||
uint64 processed = 6; // timestamp of the message processed
|
||||
ContactCard my_next_identity = 7;
|
||||
int32 peer_next_identityAck = 8; // version of the new peer accepted id
|
||||
}
|
||||
string uuid = 1; // uuid of message, or uuid of related message if uuid_action is not empty
|
||||
repeated Reaction reactions = 2; // reaction to the message per peer
|
||||
string reply_to_uuid = 3; // this message replies to the specified uuid
|
||||
uint64 local_sequence = 4 ; // seq number in local conversation for custom reordering
|
||||
uint64 sent = 5 ; // timestamp of the message sent
|
||||
uint64 received = 6; // timestamp of the message received
|
||||
uint64 processed = 7; // timestamp of the message processed
|
||||
ContactCard my_next_identity = 8;
|
||||
int32 peer_next_identity_ack = 9; // version of the new peer accepted id
|
||||
|
||||
message Group{
|
||||
string name=1;
|
||||
repeated ContactCard members = 2;
|
||||
}
|
||||
|
||||
message Reaction {
|
||||
string reaction = 1;
|
||||
string contact_uuid = 2;
|
||||
}
|
||||
|
||||
message Group{
|
||||
string name = 1;
|
||||
repeated ContactCard members = 2;
|
||||
}
|
||||
|
||||
// structure defining information that might be exchanged between two peers.
|
||||
message UserMessage {
|
||||
string destination = 1; // Lookupkey
|
||||
string from = 2; // My public key for that contact
|
||||
string type = 3; // Message type
|
||||
bytes data = 4;
|
||||
ConversationStatus status = 5;
|
||||
ContactCard contact = 6;
|
||||
ServerCard knownServers = 7;
|
||||
Group group = 8;
|
||||
repeated File files = 9;
|
||||
Location current_location = 10;
|
||||
bytes appdata = 11;
|
||||
Invitation invitation = 12;
|
||||
VideoData video_data = 13;
|
||||
string destination = 1; // Lookupkey
|
||||
string from = 2; // My public key for that contact
|
||||
string type = 3; // Message type
|
||||
bytes data = 4;
|
||||
ConversationStatus status = 5;
|
||||
ContactCard contact = 6;
|
||||
ServerCard known_servers = 7;
|
||||
Group group = 8;
|
||||
repeated File files = 9;
|
||||
Location current_location = 10;
|
||||
bytes appdata = 11;
|
||||
Invitation invitation = 12;
|
||||
VideoData video_data = 13;
|
||||
}
|
||||
|
||||
// UserMessage types :
|
||||
@@ -190,45 +199,46 @@ message UserMessage {
|
||||
// 4 : location request
|
||||
// 5 : location response
|
||||
|
||||
|
||||
message File {
|
||||
string filename=1; // the proposed filename
|
||||
uint64 size=2; // the file size
|
||||
uint32 chunk=3; // the chunk counter if file is sent by chunks
|
||||
bytes data=4; // the file/chunk content
|
||||
string filename = 1; // the proposed filename
|
||||
uint64 size = 2; // the file size
|
||||
uint32 chunk = 3; // the chunk counter if file is sent by chunks
|
||||
bytes data = 4; // the file/chunk content
|
||||
}
|
||||
|
||||
message Location {
|
||||
uint64 time=1;
|
||||
float latitude=2;
|
||||
float longitude=3;
|
||||
int32 altitude=4;
|
||||
uint64 time = 1;
|
||||
float latitude = 2;
|
||||
float longitude = 3;
|
||||
int32 altitude = 4;
|
||||
}
|
||||
|
||||
message DbMessage {
|
||||
bool outbound = 1; // direction of the message
|
||||
string type = 2;
|
||||
bytes data = 3; // text data
|
||||
ConversationStatus status = 4;
|
||||
ContactCard contact = 5;
|
||||
Group group = 6;
|
||||
repeated string file_paths = 7;
|
||||
Location current_location = 8;
|
||||
bytes appdata = 9;
|
||||
Invitation invitation = 10;
|
||||
string from = 11; // source peer uid, used when storing group conversations with more than one peer
|
||||
bool outbound = 1; // direction of the message
|
||||
string type = 2;
|
||||
bytes data = 3; // text data
|
||||
ConversationStatus status = 4;
|
||||
ContactCard contact = 5;
|
||||
Group group = 6;
|
||||
repeated string file_paths = 7;
|
||||
Location current_location = 8;
|
||||
bytes appdata = 9;
|
||||
Invitation invitation = 10;
|
||||
string from = 11; // source peer uid, used when storing group conversations with more than one peer
|
||||
string server_delivery_uuid = 12; // uuid returned by the server upon delivery
|
||||
uint64 server_delivery_timestamp = 13; // timestamp of the server delivery
|
||||
}
|
||||
|
||||
message VideoData {
|
||||
string url = 1;
|
||||
string room = 2;
|
||||
uint64 duration = 3;
|
||||
repeated VideoCredential credentials = 4;
|
||||
repeated string media_query = 5;
|
||||
string url = 1;
|
||||
string room = 2;
|
||||
uint64 duration = 3;
|
||||
repeated VideoCredential credentials = 4;
|
||||
repeated string media_query = 5;
|
||||
}
|
||||
|
||||
message VideoCredential {
|
||||
string username = 1;
|
||||
string shared_key = 2;
|
||||
string token = 3;
|
||||
string username = 1;
|
||||
string shared_key = 2;
|
||||
string token = 3;
|
||||
}
|
||||
Reference in New Issue
Block a user