From 4a382fc12a681f31aeec28bb4c4336e86352fd83 Mon Sep 17 00:00:00 2001 From: Cassandra Heart Date: Fri, 21 Nov 2025 04:34:24 -0600 Subject: [PATCH] v2.1.0.11, the later half --- node/consensus/app/app_consensus_engine.go | 15 +- .../app/consensus_liveness_provider.go | 66 ++- node/consensus/app/message_collector.go | 269 +++++++++ node/consensus/app/message_processors.go | 14 +- .../global/consensus_leader_provider.go | 9 - .../global/consensus_liveness_provider.go | 78 ++- node/consensus/global/event_distributor.go | 11 +- node/consensus/global/genesis.go | 13 +- .../global/global_consensus_engine.go | 208 ++++++- node/consensus/global/message_collector.go | 230 ++++++++ node/consensus/global/message_processors.go | 19 +- node/consensus/global/message_validation.go | 7 +- node/datarpc/data_worker_ipc_server.go | 5 +- .../engines/compute_execution_engine.go | 5 +- .../engines/global_execution_engine.go | 5 +- .../engines/hypergraph_execution_engine.go | 5 +- .../engines/token_execution_engine.go | 5 +- node/keyedaggregator/aggregator.go | 266 +++++++++ node/keyedaggregator/aggregator_test.go | 201 +++++++ node/keyedaggregator/collectors.go | 144 +++++ node/keyedaggregator/errors.go | 13 + node/keyedcollector/cache.go | 116 ++++ node/keyedcollector/collector.go | 111 ++++ node/keyedcollector/collector_test.go | 272 +++++++++ node/keyedcollector/errors.go | 74 +++ node/keyedcollector/factory.go | 58 ++ node/p2p/blossomsub.go | 222 ++++++- node/rpc/hypergraph_sync_rpc_server_test.go | 13 +- node/rpc/node_rpc_server.go | 1 + node/worker/manager.go | 272 +++++++-- protobufs/dispatch.go | 78 +++ protobufs/dispatch_test.go | 2 +- protobufs/global.go | 550 +++++++++++++++++- protobufs/global_test.go | 118 ++-- protobufs/keys.go | 244 +++++++- protobufs/keys_test.go | 31 - protobufs/node.pb.go | 534 ++++++++--------- protobufs/node.proto | 1 + types/worker/manager.go | 1 + 39 files changed, 3711 insertions(+), 575 deletions(-) create mode 100644 node/consensus/app/message_collector.go create mode 100644 node/consensus/global/message_collector.go create mode 100644 node/keyedaggregator/aggregator.go create mode 100644 node/keyedaggregator/aggregator_test.go create mode 100644 node/keyedaggregator/collectors.go create mode 100644 node/keyedaggregator/errors.go create mode 100644 node/keyedcollector/cache.go create mode 100644 node/keyedcollector/collector.go create mode 100644 node/keyedcollector/collector_test.go create mode 100644 node/keyedcollector/errors.go create mode 100644 node/keyedcollector/factory.go diff --git a/node/consensus/app/app_consensus_engine.go b/node/consensus/app/app_consensus_engine.go index 5f02510..ee8d206 100644 --- a/node/consensus/app/app_consensus_engine.go +++ b/node/consensus/app/app_consensus_engine.go @@ -41,6 +41,7 @@ import ( "source.quilibrium.com/quilibrium/monorepo/node/dispatch" "source.quilibrium.com/quilibrium/monorepo/node/execution/manager" hgstate "source.quilibrium.com/quilibrium/monorepo/node/execution/state/hypergraph" + keyedaggregator "source.quilibrium.com/quilibrium/monorepo/node/keyedaggregator" "source.quilibrium.com/quilibrium/monorepo/node/keys" "source.quilibrium.com/quilibrium/monorepo/node/p2p" "source.quilibrium.com/quilibrium/monorepo/node/p2p/onion" @@ -107,8 +108,10 @@ type AppConsensusEngine struct { peerInfoManager tp2p.PeerInfoManager currentDifficulty uint32 currentDifficultyMu sync.RWMutex - pendingMessages []*protobufs.Message - pendingMessagesMu sync.RWMutex + messageCollectors *keyedaggregator.SequencedCollectors[sequencedAppMessage] + messageAggregator *keyedaggregator.SequencedAggregator[sequencedAppMessage] + lastProposalRank uint64 + lastProposalRankMu sync.RWMutex collectedMessages []*protobufs.Message collectedMessagesMu sync.RWMutex provingMessages []*protobufs.Message @@ -256,7 +259,6 @@ func NewAppConsensusEngine( proposalCache: make(map[uint64]*protobufs.AppShardProposal), pendingCertifiedParents: make(map[uint64]*protobufs.AppShardProposal), proofCache: make(map[uint64][516]byte), - pendingMessages: []*protobufs.Message{}, collectedMessages: []*protobufs.Message{}, provingMessages: []*protobufs.Message{}, consensusMessageQueue: make(chan *pb.Message, 1000), @@ -457,11 +459,16 @@ func NewAppConsensusEngine( executorsRegistered.WithLabelValues(engine.appAddressHex).Set(0) pendingMessagesCount.WithLabelValues(engine.appAddressHex).Set(0) + if err := engine.initAppMessageAggregator(); err != nil { + return nil, errors.Wrap(err, "new app consensus engine") + } + componentBuilder := lifecycle.NewComponentManagerBuilder() // Add execution engines componentBuilder.AddWorker(engine.executionManager.Start) componentBuilder.AddWorker(engine.eventDistributor.Start) componentBuilder.AddWorker(engine.appTimeReel.Start) + componentBuilder.AddWorker(engine.startAppMessageAggregator) latest, err := engine.consensusStore.GetConsensusState(engine.appAddress) var state *models.CertifiedState[*protobufs.AppShardFrame] @@ -518,6 +525,8 @@ func NewAppConsensusEngine( } pending = engine.getPendingProposals(frame.Header.FrameNumber) } + + engine.recordProposalRank(state.Rank()) liveness, err := engine.consensusStore.GetLivenessState(appAddress) if err == nil { engine.currentRank = liveness.CurrentRank diff --git a/node/consensus/app/consensus_liveness_provider.go b/node/consensus/app/consensus_liveness_provider.go index 632fffe..de44b80 100644 --- a/node/consensus/app/consensus_liveness_provider.go +++ b/node/consensus/app/consensus_liveness_provider.go @@ -2,10 +2,10 @@ package app import ( "context" - "slices" "github.com/pkg/errors" "go.uber.org/zap" + keyedaggregator "source.quilibrium.com/quilibrium/monorepo/node/keyedaggregator" "source.quilibrium.com/quilibrium/monorepo/protobufs" ) @@ -42,23 +42,62 @@ func (p *AppLivenessProvider) Collect( mixnetMessages = p.engine.mixnet.GetMessages() } - finalizedMessages := []*protobufs.Message{} - - // Get and clear pending messages - p.engine.pendingMessagesMu.Lock() - pendingMessages := p.engine.pendingMessages - p.engine.pendingMessages = []*protobufs.Message{} - p.engine.pendingMessagesMu.Unlock() + var collectorRecords []*sequencedAppMessage + var collector keyedaggregator.Collector[sequencedAppMessage] + if p.engine.messageCollectors != nil { + var err error + var found bool + collector, found, err = p.engine.getAppMessageCollector(rank) + if err != nil && !errors.Is(err, keyedaggregator.ErrSequenceBelowRetention) { + p.engine.logger.Warn( + "could not fetch collector for rank", + zap.Uint64("rank", rank), + zap.Error(err), + ) + } else if found { + collectorRecords = collector.Records() + } + } txMap := map[string][][]byte{} - for i, message := range slices.Concat(mixnetMessages, pendingMessages) { + finalizedMessages := make( + []*protobufs.Message, + 0, + len(collectorRecords)+len(mixnetMessages), + ) + + for _, record := range collectorRecords { + if record == nil || record.message == nil { + continue + } + lockedAddrs, err := p.engine.executionManager.Lock( + record.frameNumber, + record.message.Address, + record.message.Payload, + ) + if err != nil { + p.engine.logger.Debug( + "message failed lock", + zap.Uint64("rank", rank), + zap.Error(err), + ) + if collector != nil { + collector.Remove(record) + } + continue + } + + txMap[string(record.message.Hash)] = lockedAddrs + finalizedMessages = append(finalizedMessages, record.message) + } + + for i, message := range mixnetMessages { lockedAddrs, err := p.validateAndLockMessage(frameNumber, i, message) if err != nil { continue } txMap[string(message.Hash)] = lockedAddrs - finalizedMessages = append(finalizedMessages, message) } @@ -71,7 +110,7 @@ func (p *AppLivenessProvider) Collect( "collected messages", zap.Int( "total_message_count", - len(mixnetMessages)+len(pendingMessages), + len(mixnetMessages)+len(collectorRecords), ), zap.Int("valid_message_count", len(finalizedMessages)), zap.Uint64( @@ -89,6 +128,11 @@ func (p *AppLivenessProvider) Collect( return CollectedCommitments{}, errors.Wrap(err, "collect") } + if p.engine.messageAggregator != nil { + p.engine.messageAggregator.OnSequenceChange(rank, rank+1) + } + pendingMessagesCount.WithLabelValues(p.engine.appAddressHex).Set(0) + p.engine.collectedMessagesMu.Lock() p.engine.collectedMessages = finalizedMessages p.engine.collectedMessagesMu.Unlock() diff --git a/node/consensus/app/message_collector.go b/node/consensus/app/message_collector.go new file mode 100644 index 0000000..8873c6d --- /dev/null +++ b/node/consensus/app/message_collector.go @@ -0,0 +1,269 @@ +package app + +import ( + "fmt" + + "golang.org/x/crypto/sha3" + "google.golang.org/protobuf/proto" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" + "source.quilibrium.com/quilibrium/monorepo/node/consensus/tracing" + keyedaggregator "source.quilibrium.com/quilibrium/monorepo/node/keyedaggregator" + keyedcollector "source.quilibrium.com/quilibrium/monorepo/node/keyedcollector" + "source.quilibrium.com/quilibrium/monorepo/protobufs" +) + +const maxAppMessagesPerRank = 100 + +type sequencedAppMessage struct { + rank uint64 + frameNumber uint64 + identity models.Identity + message *protobufs.Message +} + +func newSequencedAppMessage( + rank uint64, + message *protobufs.Message, +) *sequencedAppMessage { + if message == nil { + return nil + } + cloned := proto.Clone(message).(*protobufs.Message) + return &sequencedAppMessage{ + rank: rank, + identity: models.Identity(string(cloned.Hash)), + message: cloned, + } +} + +var appMessageTraits = keyedcollector.RecordTraits[sequencedAppMessage]{ + Sequence: func(m *sequencedAppMessage) uint64 { + if m == nil { + return 0 + } + return m.rank + }, + Identity: func(m *sequencedAppMessage) models.Identity { + if m == nil { + return "" + } + return m.identity + }, + Equals: func(a, b *sequencedAppMessage) bool { + if a == nil || b == nil { + return a == b + } + return string(a.identity) == string(b.identity) + }, +} + +type appMessageProcessorFactory struct { + engine *AppConsensusEngine +} + +func (f *appMessageProcessorFactory) Create( + sequence uint64, +) (keyedcollector.Processor[sequencedAppMessage], error) { + return &appMessageProcessor{ + engine: f.engine, + rank: sequence, + }, nil +} + +type appMessageProcessor struct { + engine *AppConsensusEngine + rank uint64 +} + +func (p *appMessageProcessor) Process( + record *sequencedAppMessage, +) error { + if record == nil || record.message == nil { + return keyedcollector.NewInvalidRecordError( + record, + fmt.Errorf("nil app message"), + ) + } + + if err := p.enforceCollectorLimit(record); err != nil { + return err + } + + frameNumber, err := p.frameNumberForRank() + if err != nil { + return keyedcollector.NewInvalidRecordError(record, err) + } + + if err := p.engine.executionManager.ValidateMessage( + frameNumber, + record.message.Address, + record.message.Payload, + ); err != nil { + return keyedcollector.NewInvalidRecordError(record, err) + } + + record.frameNumber = frameNumber + p.engine.updatePendingMessagesGauge(p.rank) + + return nil +} + +func (p *appMessageProcessor) frameNumberForRank() (uint64, error) { + rank := p.rank + if rank == 0 { + rank = 1 + } + qc, err := p.engine.clockStore.GetQuorumCertificate( + p.engine.appAddress, + rank-1, + ) + if err != nil { + qc, err = p.engine.clockStore.GetLatestQuorumCertificate( + p.engine.appAddress, + ) + if err != nil { + return 0, err + } + } + + return qc.GetFrameNumber() + 1, nil +} + +func (p *appMessageProcessor) enforceCollectorLimit( + record *sequencedAppMessage, +) error { + collector, found, err := p.engine.getAppMessageCollector(p.rank) + if err != nil || !found { + return nil + } + + if len(collector.Records()) >= maxAppMessagesPerRank { + collector.Remove(record) + return keyedcollector.NewInvalidRecordError( + record, + fmt.Errorf("message limit reached for rank %d", p.rank), + ) + } + + return nil +} + +func (e *AppConsensusEngine) initAppMessageAggregator() error { + tracer := tracing.NewZapTracer(e.logger.Named("app_message_collector")) + processorFactory := &appMessageProcessorFactory{engine: e} + collectorFactory, err := keyedcollector.NewFactory( + tracer, + appMessageTraits, + nil, + processorFactory, + ) + if err != nil { + return err + } + + e.messageCollectors = keyedaggregator.NewSequencedCollectors[sequencedAppMessage]( + tracer, + 0, + collectorFactory, + ) + + aggregator, err := keyedaggregator.NewSequencedAggregator[sequencedAppMessage]( + tracer, + 0, + e.messageCollectors, + func(m *sequencedAppMessage) uint64 { + if m == nil { + return 0 + } + return m.rank + }, + ) + if err != nil { + return err + } + + e.messageAggregator = aggregator + return nil +} + +func (e *AppConsensusEngine) startAppMessageAggregator( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, +) { + if e.messageAggregator == nil { + ready() + <-ctx.Done() + return + } + + go func() { + if err := e.messageAggregator.ComponentManager.Start(ctx); err != nil { + ctx.Throw(err) + } + }() + + <-e.messageAggregator.ComponentManager.Ready() + ready() + <-e.messageAggregator.ComponentManager.Done() +} + +func (e *AppConsensusEngine) addAppMessage(message *protobufs.Message) { + if e.messageAggregator == nil || message == nil { + return + } + if len(message.Hash) == 0 { + hash := sha3.Sum256(message.Payload) + message.Hash = hash[:] + } + rank := e.nextRank() + record := newSequencedAppMessage(rank, message) + if record == nil { + return + } + e.messageAggregator.Add(record) +} + +func (e *AppConsensusEngine) nextRank() uint64 { + e.lastProposalRankMu.RLock() + last := e.lastProposalRank + e.lastProposalRankMu.RUnlock() + if last > 0 { + return last + 1 + } + return e.currentRank + 1 +} + +func (e *AppConsensusEngine) getAppMessageCollector( + rank uint64, +) (keyedaggregator.Collector[sequencedAppMessage], bool, error) { + if e.messageCollectors == nil { + return nil, false, nil + } + return e.messageCollectors.GetCollector(rank) +} + +func (e *AppConsensusEngine) recordProposalRank(rank uint64) { + if rank == 0 { + return + } + e.lastProposalRankMu.Lock() + if rank > e.lastProposalRank { + e.lastProposalRank = rank + } + e.lastProposalRankMu.Unlock() +} + +func (e *AppConsensusEngine) updatePendingMessagesGauge(rank uint64) { + if e.messageCollectors == nil { + return + } + collector, found, err := e.getAppMessageCollector(rank) + if err != nil || !found { + return + } + pendingMessagesCount.WithLabelValues(e.appAddressHex).Set( + float64(len(collector.Records())), + ) +} diff --git a/node/consensus/app/message_processors.go b/node/consensus/app/message_processors.go index 1a67fad..45a8e12 100644 --- a/node/consensus/app/message_processors.go +++ b/node/consensus/app/message_processors.go @@ -5,6 +5,7 @@ import ( "context" "encoding/binary" "encoding/hex" + "slices" "github.com/iden3/go-iden3-crypto/poseidon" "github.com/libp2p/go-libp2p/core/peer" @@ -376,6 +377,10 @@ func (e *AppConsensusEngine) processProposal( e.trySealParentWithChild(proposal) e.registerPendingCertifiedParent(proposal) + if proposal.State != nil { + e.recordProposalRank(proposal.State.GetRank()) + } + return true } @@ -714,17 +719,12 @@ func (e *AppConsensusEngine) handleProverMessage(message *pb.Message) { ) switch typePrefix { case protobufs.MessageBundleType: - // MessageBundle messages need to be collected for execution - // Store them in pendingMessages to be processed during Collect hash := sha3.Sum256(message.Data) - e.pendingMessagesMu.Lock() - e.pendingMessages = append(e.pendingMessages, &protobufs.Message{ + e.addAppMessage(&protobufs.Message{ Address: e.appAddress[:32], Hash: hash[:], - Payload: message.Data, + Payload: slices.Clone(message.Data), }) - e.pendingMessagesMu.Unlock() - e.logger.Debug( "collected app request for execution", zap.Uint32("type", typePrefix), diff --git a/node/consensus/global/consensus_leader_provider.go b/node/consensus/global/consensus_leader_provider.go index 07d1e11..259547b 100644 --- a/node/consensus/global/consensus_leader_provider.go +++ b/node/consensus/global/consensus_leader_provider.go @@ -145,15 +145,6 @@ func (p *GlobalLeaderProvider) ProveNextState( ) } - _, err = p.engine.livenessProvider.Collect( - ctx, - prior.Header.FrameNumber+1, - rank, - ) - if err != nil { - return nil, models.NewNoVoteErrorf("could not collect: %+w", err) - } - timer := prometheus.NewTimer(frameProvingDuration) defer timer.ObserveDuration() diff --git a/node/consensus/global/consensus_liveness_provider.go b/node/consensus/global/consensus_liveness_provider.go index faee23b..5ecd698 100644 --- a/node/consensus/global/consensus_liveness_provider.go +++ b/node/consensus/global/consensus_liveness_provider.go @@ -10,6 +10,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "golang.org/x/crypto/sha3" + keyedaggregator "source.quilibrium.com/quilibrium/monorepo/node/keyedaggregator" "source.quilibrium.com/quilibrium/monorepo/protobufs" "source.quilibrium.com/quilibrium/monorepo/types/tries" ) @@ -42,37 +43,55 @@ func (p *GlobalLivenessProvider) Collect( mixnetMessages = p.engine.mixnet.GetMessages() } - // Get and clear pending prover messages - p.engine.pendingMessagesMu.Lock() - pendingMessages := p.engine.pendingMessages - p.engine.pendingMessages = [][]byte{} - p.engine.pendingMessagesMu.Unlock() - - // Convert pending messages to protobuf.Message format - globalAddress := make([]byte, 32) - for i := range globalAddress { - globalAddress[i] = 0xff + var collector keyedaggregator.Collector[sequencedGlobalMessage] + var collectorRecords []*sequencedGlobalMessage + if p.engine.messageCollectors != nil { + var err error + var found bool + collector, found, err = p.engine.getMessageCollector(rank) + if err != nil && !errors.Is(err, keyedaggregator.ErrSequenceBelowRetention) { + p.engine.logger.Warn( + "could not fetch collector for rank", + zap.Uint64("rank", rank), + zap.Error(err), + ) + } else if found { + collectorRecords = collector.Records() + } } - messages := make( + acceptedMessages := make( []*protobufs.Message, 0, - len(mixnetMessages)+len(pendingMessages), + len(collectorRecords)+len(mixnetMessages), ) - messages = append(messages, mixnetMessages...) - for _, msgData := range pendingMessages { - messages = append(messages, &protobufs.Message{ - Address: globalAddress, - Payload: msgData, - }) + if collector != nil { + for _, record := range collectorRecords { + if record == nil || record.message == nil { + continue + } + if err := p.lockCollectorMessage( + frameNumber, + record.message, + ); err != nil { + p.engine.logger.Debug( + "message failed lock", + zap.Uint64("frame_number", frameNumber), + zap.Error(err), + ) + collector.Remove(record) + continue + } + acceptedMessages = append(acceptedMessages, record.message) + } } - acceptedMessages := []*protobufs.Message{} + messages := append([]*protobufs.Message{}, mixnetMessages...) p.engine.logger.Debug( "collected messages, validating", - zap.Int("message_count", len(messages)), + zap.Int("message_count", len(messages)+len(collectorRecords)), ) for i, message := range messages { @@ -84,6 +103,10 @@ func (p *GlobalLivenessProvider) Collect( acceptedMessages = append(acceptedMessages, message) } + if p.engine.messageAggregator != nil { + p.engine.messageAggregator.OnSequenceChange(rank, rank+1) + } + err := p.engine.executionManager.Unlock() if err != nil { p.engine.logger.Error( @@ -232,3 +255,18 @@ func (p *GlobalLivenessProvider) validateAndLockMessage( return nil } + +func (p *GlobalLivenessProvider) lockCollectorMessage( + frameNumber uint64, + message *protobufs.Message, +) error { + if message == nil { + return errors.New("nil message") + } + _, err := p.engine.executionManager.Lock( + frameNumber, + message.Address, + message.Payload, + ) + return err +} diff --git a/node/consensus/global/event_distributor.go b/node/consensus/global/event_distributor.go index dee9dfb..b1718de 100644 --- a/node/consensus/global/event_distributor.go +++ b/node/consensus/global/event_distributor.go @@ -7,6 +7,7 @@ import ( "fmt" "math/big" "slices" + "strings" "time" pcrypto "github.com/libp2p/go-libp2p/core/crypto" @@ -515,6 +516,7 @@ func (e *GlobalConsensusEngine) evaluateForProposals( shardsPaused := 0 logicalShards := 0 shardDivisions := 0 + awaitingFrame := map[uint64]struct{}{} for _, info := range shards { resp, err := e.getAppShardsFromProver( client, @@ -547,6 +549,7 @@ func (e *GlobalConsensusEngine) evaluateForProposals( allocated = allocation.Status != 4 if allocation.Status == typesconsensus.ProverStatusJoining { shardsPending++ + awaitingFrame[allocation.JoinFrameNumber+360] = struct{}{} } if allocation.Status == typesconsensus.ProverStatusActive { shardsActive++ @@ -607,9 +610,15 @@ func (e *GlobalConsensusEngine) evaluateForProposals( } } + awaitingFrames := []string{} + for frame := range awaitingFrame { + awaitingFrames = append(awaitingFrames, fmt.Sprintf("%d", frame)) + } + e.logger.Info( "status for allocations", zap.Int("pending_joins", shardsPending), + zap.String("pending_join_frames", strings.Join(awaitingFrames, ", ")), zap.Int("pending_leaves", shardsLeaving), zap.Int("active", shardsActive), zap.Int("paused", shardsPaused), @@ -621,7 +630,7 @@ func (e *GlobalConsensusEngine) evaluateForProposals( proposals, err := e.proposer.PlanAndAllocate( uint64(data.Frame.Header.Difficulty), proposalDescriptors, - 0, + 100, worldBytes, ) if err != nil { diff --git a/node/consensus/global/genesis.go b/node/consensus/global/genesis.go index f7080d2..2459129 100644 --- a/node/consensus/global/genesis.go +++ b/node/consensus/global/genesis.go @@ -386,7 +386,14 @@ func (e *GlobalConsensusEngine) createStubGenesis() *protobufs.GlobalFrame { ) for _, prover := range proverPubKeys { - addrbi, err := poseidon.HashBytes(prover) + proverAddrBI, err := poseidon.HashBytes(prover) + if err != nil { + panic(err) + } + addrbi, err := poseidon.HashBytes(slices.Concat( + token.QUIL_TOKEN_ADDRESS, + proverAddrBI.FillBytes(make([]byte, 32)), + )) if err != nil { panic(err) } @@ -396,7 +403,7 @@ func (e *GlobalConsensusEngine) createStubGenesis() *protobufs.GlobalFrame { err = rdfMultiprover.Set( globalintrinsics.GLOBAL_RDF_SCHEMA, - token.QUIL_TOKEN_ADDRESS, + intrinsics.GLOBAL_INTRINSIC_ADDRESS[:], "reward:ProverReward", "DelegateAddress", addrbi.FillBytes(make([]byte, 32)), @@ -412,7 +419,7 @@ func (e *GlobalConsensusEngine) createStubGenesis() *protobufs.GlobalFrame { balance = balanceBI.FillBytes(balance) err = rdfMultiprover.Set( globalintrinsics.GLOBAL_RDF_SCHEMA, - token.QUIL_TOKEN_ADDRESS, + intrinsics.GLOBAL_INTRINSIC_ADDRESS[:], "reward:ProverReward", "Balance", balance, diff --git a/node/consensus/global/global_consensus_engine.go b/node/consensus/global/global_consensus_engine.go index 90b74d6..fe9625d 100644 --- a/node/consensus/global/global_consensus_engine.go +++ b/node/consensus/global/global_consensus_engine.go @@ -22,6 +22,7 @@ import ( "github.com/mr-tron/base58" ma "github.com/multiformats/go-multiaddr" "github.com/pkg/errors" + "github.com/shopspring/decimal" "go.uber.org/zap" "golang.org/x/crypto/sha3" "golang.org/x/sync/errgroup" @@ -46,9 +47,11 @@ import ( "source.quilibrium.com/quilibrium/monorepo/node/dispatch" "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/global" "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/global/compat" + tokenintrinsics "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token" "source.quilibrium.com/quilibrium/monorepo/node/execution/manager" hgstate "source.quilibrium.com/quilibrium/monorepo/node/execution/state/hypergraph" qgrpc "source.quilibrium.com/quilibrium/monorepo/node/internal/grpc" + keyedaggregator "source.quilibrium.com/quilibrium/monorepo/node/keyedaggregator" "source.quilibrium.com/quilibrium/monorepo/node/keys" "source.quilibrium.com/quilibrium/monorepo/node/p2p" "source.quilibrium.com/quilibrium/monorepo/node/p2p/onion" @@ -154,8 +157,8 @@ type GlobalConsensusEngine struct { minimumProvers func() uint64 blacklistMap map[string]bool blacklistMu sync.RWMutex - pendingMessages [][]byte - pendingMessagesMu sync.RWMutex + messageCollectors *keyedaggregator.SequencedCollectors[sequencedGlobalMessage] + messageAggregator *keyedaggregator.SequencedAggregator[sequencedGlobalMessage] currentDifficulty uint32 currentDifficultyMu sync.RWMutex lastProvenFrameTime time.Time @@ -296,7 +299,6 @@ func NewGlobalConsensusEngine( currentDifficulty: config.Engine.Difficulty, lastProvenFrameTime: time.Now(), blacklistMap: make(map[string]bool), - pendingMessages: [][]byte{}, peerInfoDigestCache: make(map[string]struct{}), keyRegistryDigestCache: make(map[string]struct{}), peerAuthCache: make(map[string]time.Time), @@ -305,6 +307,10 @@ func NewGlobalConsensusEngine( appShardCache: make(map[string]*appShardCacheEntry), } + if err := engine.initGlobalMessageAggregator(); err != nil { + return nil, err + } + if config.Engine.AlertKey != "" { alertPublicKey, err := hex.DecodeString(config.Engine.AlertKey) if err != nil { @@ -521,6 +527,7 @@ func NewGlobalConsensusEngine( // Add execution engines componentBuilder.AddWorker(engine.executionManager.Start) componentBuilder.AddWorker(engine.globalTimeReel.Start) + componentBuilder.AddWorker(engine.startGlobalMessageAggregator) if engine.config.P2P.Network == 99 || engine.config.Engine.ArchiveMode { latest, err := engine.consensusStore.GetConsensusState(nil) @@ -846,6 +853,16 @@ func NewGlobalConsensusEngine( engine.updateMetrics(ctx) }) + if !engine.config.Engine.ArchiveMode { + componentBuilder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + engine.monitorNodeHealth(ctx) + }) + } + // Start periodic tx lock pruning componentBuilder.AddWorker(func( ctx lifecycle.SignalerContext, @@ -2037,6 +2054,177 @@ func (e *GlobalConsensusEngine) pruneTxLocks() { } } +func (e *GlobalConsensusEngine) monitorNodeHealth( + ctx lifecycle.SignalerContext, +) { + ticker := time.NewTicker(time.Minute) + defer ticker.Stop() + + e.runNodeHealthCheck() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + e.runNodeHealthCheck() + } + } +} + +func (e *GlobalConsensusEngine) runNodeHealthCheck() { + if e.workerManager == nil { + return + } + + workers, err := e.workerManager.RangeWorkers() + if err != nil { + e.logger.Warn("node health check failed to load workers", zap.Error(err)) + return + } + + allocated := 0 + for _, worker := range workers { + if worker.Allocated { + allocated++ + } + } + + baseFields := []zap.Field{ + zap.Int("total_workers", len(workers)), + zap.Int("allocated_workers", allocated), + } + + unreachable, err := e.workerManager.CheckWorkersConnected() + if err != nil { + e.logger.Warn( + "node health check could not verify worker connectivity", + append(baseFields, zap.Error(err))..., + ) + return + } + + if len(unreachable) != 0 { + unreachable64 := make([]uint64, len(unreachable)) + for i, id := range unreachable { + unreachable64[i] = uint64(id) + } + e.logger.Warn( + "workers unreachable", + append( + baseFields, + zap.Uint64s("unreachable_workers", unreachable64), + )..., + ) + return + } + + headFrame, err := e.globalTimeReel.GetHead() + if err != nil { + e.logger.Warn( + "global head not yet available", + append(baseFields, zap.Error(err))..., + ) + return + } + if headFrame == nil || headFrame.Header == nil { + e.logger.Warn("global head not yet available", baseFields...) + return + } + + headTime := time.UnixMilli(headFrame.Header.Timestamp) + if time.Since(headTime) > time.Minute { + e.logger.Warn( + "latest frame is older than 60 seconds; node may still be synchronizing", + append( + baseFields, + zap.Uint64("head_frame_number", headFrame.Header.FrameNumber), + zap.Time("head_frame_time", headTime), + )..., + ) + return + } + + units, readable, err := e.getUnmintedRewardBalance() + if err != nil { + e.logger.Warn( + "unable to read prover reward balance", + append(baseFields, zap.Error(err))..., + ) + return + } + + e.logger.Info( + "node health check passed", + append( + baseFields, + zap.Uint64("head_frame_number", headFrame.Header.FrameNumber), + zap.Time("head_frame_time", headTime), + zap.String("unminted_reward_quil", readable), + zap.String("unminted_reward_raw_units", units.String()), + )..., + ) +} + +const rewardUnitsPerInterval int64 = 8_000_000_000 + +func (e *GlobalConsensusEngine) getUnmintedRewardBalance() ( + *big.Int, + string, + error, +) { + rewardAddress, err := e.deriveRewardAddress() + if err != nil { + return nil, "", errors.Wrap(err, "derive reward address") + } + + var vertexID [64]byte + copy(vertexID[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:]) + copy(vertexID[32:], rewardAddress) + + tree, err := e.hypergraph.GetVertexData(vertexID) + if err != nil { + return big.NewInt(0), "0", nil + } + if tree == nil { + return big.NewInt(0), "0", nil + } + + rdf := schema.NewRDFMultiprover(&schema.TurtleRDFParser{}, e.inclusionProver) + balanceBytes, err := rdf.Get( + global.GLOBAL_RDF_SCHEMA, + "reward:ProverReward", + "Balance", + tree, + ) + if err != nil { + return nil, "", errors.Wrap(err, "read reward balance") + } + + units := new(big.Int).SetBytes(balanceBytes) + rewardReadable := decimal.NewFromBigInt(units, 0).Div( + decimal.NewFromInt(rewardUnitsPerInterval), + ).String() + + return units, rewardReadable, nil +} + +func (e *GlobalConsensusEngine) deriveRewardAddress() ([]byte, error) { + proverAddr := e.getProverAddress() + if len(proverAddr) == 0 { + return nil, errors.New("missing prover address") + } + + hash, err := poseidon.HashBytes( + slices.Concat(tokenintrinsics.QUIL_TOKEN_ADDRESS[:], proverAddr), + ) + if err != nil { + return nil, err + } + + return hash.FillBytes(make([]byte, 32)), nil +} + // validatePeerInfoSignature validates the signature of a peer info message func (e *GlobalConsensusEngine) validatePeerInfoSignature( peerInfo *protobufs.PeerInfo, @@ -3000,6 +3188,20 @@ func (e *GlobalConsensusEngine) OnQuorumCertificateTriggeredRankChange( // OnRankChange implements consensus.Consumer. func (e *GlobalConsensusEngine) OnRankChange(oldRank uint64, newRank uint64) { e.currentRank = newRank + + prior, err := e.clockStore.GetLatestGlobalClockFrame() + if err != nil { + frameProvingTotal.WithLabelValues("error").Inc() + return + } + _, err = e.livenessProvider.Collect( + context.TODO(), + prior.Header.FrameNumber+1, + newRank, + ) + if err != nil { + return + } } // OnReceiveProposal implements consensus.Consumer. diff --git a/node/consensus/global/message_collector.go b/node/consensus/global/message_collector.go new file mode 100644 index 0000000..a269169 --- /dev/null +++ b/node/consensus/global/message_collector.go @@ -0,0 +1,230 @@ +package global + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "slices" + + "golang.org/x/crypto/sha3" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" + "source.quilibrium.com/quilibrium/monorepo/node/consensus/tracing" + keyedaggregator "source.quilibrium.com/quilibrium/monorepo/node/keyedaggregator" + keyedcollector "source.quilibrium.com/quilibrium/monorepo/node/keyedcollector" + "source.quilibrium.com/quilibrium/monorepo/protobufs" +) + +const maxGlobalMessagesPerFrame = 100 + +var globalMessageAddress = bytes.Repeat([]byte{0xff}, 32) + +type sequencedGlobalMessage struct { + sequence uint64 + identity models.Identity + payload []byte + message *protobufs.Message +} + +func newSequencedGlobalMessage( + sequence uint64, + payload []byte, +) *sequencedGlobalMessage { + copyPayload := slices.Clone(payload) + hash := sha3.Sum256(copyPayload) + return &sequencedGlobalMessage{ + sequence: sequence, + identity: models.Identity(string(hash[:])), + payload: copyPayload, + } +} + +var globalMessageTraits = keyedcollector.RecordTraits[sequencedGlobalMessage]{ + Sequence: func(m *sequencedGlobalMessage) uint64 { + if m == nil { + return 0 + } + return m.sequence + }, + Identity: func(m *sequencedGlobalMessage) models.Identity { + if m == nil { + return "" + } + return m.identity + }, + Equals: func(a, b *sequencedGlobalMessage) bool { + if a == nil || b == nil { + return a == b + } + return slices.Equal(a.payload, b.payload) + }, +} + +type globalMessageProcessorFactory struct { + engine *GlobalConsensusEngine +} + +func (f *globalMessageProcessorFactory) Create( + sequence uint64, +) (keyedcollector.Processor[sequencedGlobalMessage], error) { + return &globalMessageProcessor{ + engine: f.engine, + sequence: sequence, + }, nil +} + +type globalMessageProcessor struct { + engine *GlobalConsensusEngine + sequence uint64 +} + +func (p *globalMessageProcessor) Process( + record *sequencedGlobalMessage, +) error { + if record == nil { + return keyedcollector.NewInvalidRecordError( + record, + errors.New("nil global message"), + ) + } + + if len(record.payload) < 4 { + return keyedcollector.NewInvalidRecordError( + record, + errors.New("global message payload too short"), + ) + } + + typePrefix := binary.BigEndian.Uint32(record.payload[:4]) + if typePrefix != protobufs.MessageBundleType { + return keyedcollector.NewInvalidRecordError( + record, + fmt.Errorf("unexpected message type: %d", typePrefix), + ) + } + + message := &protobufs.Message{ + Address: globalMessageAddress, + Payload: record.payload, + } + + if err := p.enforceCollectorLimit(record); err != nil { + return err + } + + qc, err := p.engine.clockStore.GetQuorumCertificate(nil, record.sequence-1) + if err != nil { + qc, err = p.engine.clockStore.GetLatestQuorumCertificate(nil) + } + if err != nil { + return keyedcollector.NewInvalidRecordError(record, err) + } + + if err := p.engine.executionManager.ValidateMessage( + qc.FrameNumber+1, + message.Address, + message.Payload, + ); err != nil { + return keyedcollector.NewInvalidRecordError(record, err) + } + + record.message = message + return nil +} + +func (p *globalMessageProcessor) enforceCollectorLimit( + record *sequencedGlobalMessage, +) error { + collector, found, err := p.engine.getMessageCollector(p.sequence) + if err != nil || !found { + return nil + } + + if len(collector.Records()) >= maxGlobalMessagesPerFrame { + collector.Remove(record) + return keyedcollector.NewInvalidRecordError( + record, + fmt.Errorf("message limit reached for frame %d", p.sequence), + ) + } + + return nil +} + +func (e *GlobalConsensusEngine) initGlobalMessageAggregator() error { + tracer := tracing.NewZapTracer(e.logger.Named("global_message_collector")) + processorFactory := &globalMessageProcessorFactory{engine: e} + collectorFactory, err := keyedcollector.NewFactory( + tracer, + globalMessageTraits, + nil, + processorFactory, + ) + if err != nil { + return fmt.Errorf("global message collector factory: %w", err) + } + + e.messageCollectors = keyedaggregator.NewSequencedCollectors[sequencedGlobalMessage]( + tracer, + 0, + collectorFactory, + ) + + aggregator, err := keyedaggregator.NewSequencedAggregator[sequencedGlobalMessage]( + tracer, + 0, + e.messageCollectors, + func(m *sequencedGlobalMessage) uint64 { + if m == nil { + return 0 + } + return m.sequence + }, + ) + if err != nil { + return fmt.Errorf("global message aggregator: %w", err) + } + + e.messageAggregator = aggregator + return nil +} + +func (e *GlobalConsensusEngine) startGlobalMessageAggregator( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, +) { + if e.messageAggregator == nil { + ready() + <-ctx.Done() + return + } + + go func() { + if err := e.messageAggregator.ComponentManager.Start(ctx); err != nil { + ctx.Throw(err) + } + }() + + <-e.messageAggregator.ComponentManager.Ready() + ready() + <-e.messageAggregator.ComponentManager.Done() +} + +func (e *GlobalConsensusEngine) addGlobalMessage(data []byte) { + if e.messageAggregator == nil { + return + } + record := newSequencedGlobalMessage(e.currentRank+1, data) + e.messageAggregator.Add(record) +} + +func (e *GlobalConsensusEngine) getMessageCollector( + rank uint64, +) (keyedaggregator.Collector[sequencedGlobalMessage], bool, error) { + if e.messageCollectors == nil { + return nil, false, nil + } + return e.messageCollectors.GetCollector(rank) +} diff --git a/node/consensus/global/message_processors.go b/node/consensus/global/message_processors.go index 1d477ae..4304464 100644 --- a/node/consensus/global/message_processors.go +++ b/node/consensus/global/message_processors.go @@ -219,11 +219,7 @@ func (e *GlobalConsensusEngine) handleProverMessage(message *pb.Message) { switch typePrefix { case protobufs.MessageBundleType: - // MessageBundle messages need to be collected for execution - // Store them in pendingMessages to be processed during Collect - e.pendingMessagesMu.Lock() - e.pendingMessages = append(e.pendingMessages, message.Data) - e.pendingMessagesMu.Unlock() + e.addGlobalMessage(message.Data) e.logger.Debug( "collected global request for execution", @@ -338,7 +334,6 @@ func (e *GlobalConsensusEngine) handleAppFrameMessage(message *pb.Message) { shardFramesProcessedTotal.WithLabelValues("error").Inc() } - e.pendingMessagesMu.Lock() bundle := &protobufs.MessageBundle{ Requests: []*protobufs.MessageRequest{ &protobufs.MessageRequest{ @@ -353,12 +348,10 @@ func (e *GlobalConsensusEngine) handleAppFrameMessage(message *pb.Message) { bundleBytes, err := bundle.ToCanonicalBytes() if err != nil { e.logger.Error("failed to add shard bundle", zap.Error(err)) - e.pendingMessagesMu.Unlock() return } - e.pendingMessages = append(e.pendingMessages, bundleBytes) - e.pendingMessagesMu.Unlock() + e.addGlobalMessage(bundleBytes) e.frameStoreMu.Lock() defer e.frameStoreMu.Unlock() e.appFrameStore[string(frame.Header.Address)] = frame @@ -1528,6 +1521,7 @@ func (e *GlobalConsensusEngine) handleVote(message *pb.Message) { } e.voteAggregator.AddVote(&vote) + voteProcessedTotal.WithLabelValues("success").Inc() } @@ -1596,12 +1590,7 @@ func (e *GlobalConsensusEngine) handleTimeoutState(message *pb.Message) { } func (e *GlobalConsensusEngine) handleMessageBundle(message *pb.Message) { - // MessageBundle messages need to be collected for execution - // Store them in pendingMessages to be processed during Collect - e.pendingMessagesMu.Lock() - e.pendingMessages = append(e.pendingMessages, message.Data) - e.pendingMessagesMu.Unlock() - + e.addGlobalMessage(message.Data) e.logger.Debug("collected global request for execution") } diff --git a/node/consensus/global/message_validation.go b/node/consensus/global/message_validation.go index d8ce9b4..2cd9fd5 100644 --- a/node/consensus/global/message_validation.go +++ b/node/consensus/global/message_validation.go @@ -387,7 +387,7 @@ func (e *GlobalConsensusEngine) validateAppFrameMessage( return tp2p.ValidationResultReject } - if frametime.AppFrameSince(frame) > 20*time.Second { + if frametime.AppFrameSince(frame) > 120*time.Second { shardFrameValidationTotal.WithLabelValues("ignore").Inc() return tp2p.ValidationResultIgnore } @@ -457,6 +457,11 @@ func (e *GlobalConsensusEngine) validateFrameMessage( return tp2p.ValidationResultIgnore } + if frametime.GlobalFrameSince(frame) > 120*time.Second { + frameValidationTotal.WithLabelValues("ignore").Inc() + return tp2p.ValidationResultIgnore + } + frameValidationTotal.WithLabelValues("accept").Inc() default: e.logger.Debug("received unknown type", zap.Uint32("type", typePrefix)) diff --git a/node/datarpc/data_worker_ipc_server.go b/node/datarpc/data_worker_ipc_server.go index a5c1afa..d0d3d10 100644 --- a/node/datarpc/data_worker_ipc_server.go +++ b/node/datarpc/data_worker_ipc_server.go @@ -125,10 +125,7 @@ func (r *DataWorkerIPCServer) Respawn( ctx context.Context, req *protobufs.RespawnRequest, ) (*protobufs.RespawnResponse, error) { - err := r.RespawnServer(req.Filter) - if err != nil { - return nil, err - } + go r.RespawnServer(req.Filter) return &protobufs.RespawnResponse{}, nil } diff --git a/node/execution/engines/compute_execution_engine.go b/node/execution/engines/compute_execution_engine.go index a273915..543cc4e 100644 --- a/node/execution/engines/compute_execution_engine.go +++ b/node/execution/engines/compute_execution_engine.go @@ -735,6 +735,7 @@ func (e *ComputeExecutionEngine) handleBundle( if err := fees.SanityCheck(feeQueue, consumers); err != nil { return nil, errors.Wrap(err, "handle bundle") } + responses.State = state // Process each operation in the bundle sequentially for i, op := range bundle.Requests { @@ -787,7 +788,7 @@ func (e *ComputeExecutionEngine) handleBundle( movingAddress, op, true, - state, + responses.State, ) if err != nil { return nil, errors.Wrapf(err, "handle bundle: operation %d failed", i) @@ -807,7 +808,7 @@ func (e *ComputeExecutionEngine) handleBundle( // Collect responses responses.Messages = append(responses.Messages, opResponses.Messages...) - responses.State = state + responses.State = opResponses.State } e.logger.Info( diff --git a/node/execution/engines/global_execution_engine.go b/node/execution/engines/global_execution_engine.go index 46aea06..39e0f77 100644 --- a/node/execution/engines/global_execution_engine.go +++ b/node/execution/engines/global_execution_engine.go @@ -346,6 +346,7 @@ func (e *GlobalExecutionEngine) handleBundle( } responses := &execution.ProcessMessageResult{} + responses.State = state // Process each operation in the bundle sequentially for i, op := range bundle.Requests { @@ -356,7 +357,7 @@ func (e *GlobalExecutionEngine) handleBundle( address, op, true, - state, + responses.State, ) if err != nil { // Skip non-global operations (e.g., token payments, compute ops) @@ -370,7 +371,7 @@ func (e *GlobalExecutionEngine) handleBundle( // Collect responses responses.Messages = append(responses.Messages, opResponses.Messages...) - responses.State = state + responses.State = opResponses.State } e.logger.Info( diff --git a/node/execution/engines/hypergraph_execution_engine.go b/node/execution/engines/hypergraph_execution_engine.go index c46ded4..6dbbc3d 100644 --- a/node/execution/engines/hypergraph_execution_engine.go +++ b/node/execution/engines/hypergraph_execution_engine.go @@ -512,6 +512,7 @@ func (e *HypergraphExecutionEngine) handleBundle( return nil, errors.Wrap(err, "handle bundle") } + responses.State = state // Process each operation in the bundle sequentially for i, op := range bundle.Requests { e.logger.Debug( @@ -564,7 +565,7 @@ func (e *HypergraphExecutionEngine) handleBundle( address, op, true, - state, + responses.State, ) if err != nil { return nil, errors.Wrapf(err, "handle bundle: operation %d failed", i) @@ -572,7 +573,7 @@ func (e *HypergraphExecutionEngine) handleBundle( // Collect responses responses.Messages = append(responses.Messages, opResponses.Messages...) - responses.State = state + responses.State = opResponses.State } e.logger.Info( diff --git a/node/execution/engines/token_execution_engine.go b/node/execution/engines/token_execution_engine.go index 7099578..3943b92 100644 --- a/node/execution/engines/token_execution_engine.go +++ b/node/execution/engines/token_execution_engine.go @@ -578,6 +578,7 @@ func (e *TokenExecutionEngine) handleBundle( } responses := &execution.ProcessMessageResult{} + responses.State = state // Process each operation in the bundle sequentially for i, op := range bundle.Requests { @@ -630,7 +631,7 @@ func (e *TokenExecutionEngine) handleBundle( address, op, true, - state, + responses.State, ) if err != nil { return nil, errors.Wrapf(err, "handle bundle: operation %d failed", i) @@ -638,7 +639,7 @@ func (e *TokenExecutionEngine) handleBundle( // Collect responses responses.Messages = append(responses.Messages, opResponses.Messages...) - responses.State = state + responses.State = opResponses.State } e.logger.Info( diff --git a/node/keyedaggregator/aggregator.go b/node/keyedaggregator/aggregator.go new file mode 100644 index 0000000..fdcc9ce --- /dev/null +++ b/node/keyedaggregator/aggregator.go @@ -0,0 +1,266 @@ +package keyedaggregator + +import ( + "context" + "errors" + "fmt" + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/counters" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" +) + +const ( + defaultWorkerCount = 4 + defaultQueueSize = 1000 +) + +// SequenceExtractor returns the sequence identifier for a given item. The +// sequence is typically the logical round/rank/height that an item belongs to. +type SequenceExtractor[ItemT any] func(*ItemT) uint64 + +// SequencedAggregator is a generic event dispatcher that fans out sequenced +// items to lazily-created collectors keyed by the item's sequence. Items are +// processed asynchronously by worker goroutines. The aggregator drops stale +// items (items whose sequence is below the currently retained threshold) and +// relies on the CollectorCache implementation to prune old collectors. +type SequencedAggregator[ItemT any] struct { + *lifecycle.ComponentManager + + tracer consensus.TraceLogger + lowestRetained counters.StrictMonotonicCounter + collectors CollectorCache[ItemT] + sequenceExtractor SequenceExtractor[ItemT] + queuedItems chan *ItemT + itemsNotifier chan struct{} + sequenceNotifier chan struct{} + wg sync.WaitGroup + workerCount int + queueCapacity int +} + +// AggregatorOption customizes the behaviour of the SequencedAggregator. +type AggregatorOption func(*aggregatorConfig) + +type aggregatorConfig struct { + workerCount int + queueCapacity int +} + +// WithWorkerCount overrides the default number of worker goroutines used to +// drain the inbound queue. Values smaller than one are ignored. +func WithWorkerCount(count int) AggregatorOption { + return func(cfg *aggregatorConfig) { + if count > 0 { + cfg.workerCount = count + } + } +} + +// WithQueueCapacity overrides the size of the buffered queue that stores +// pending items. Values smaller than one are ignored. +func WithQueueCapacity(capacity int) AggregatorOption { + return func(cfg *aggregatorConfig) { + if capacity > 0 { + cfg.queueCapacity = capacity + } + } +} + +// NewSequencedAggregator wires a SequencedAggregator using the provided +// CollectorCache and SequenceExtractor. The aggregator starts workers via the +// lifecycle.ComponentManager built during construction. +func NewSequencedAggregator[ItemT any]( + tracer consensus.TraceLogger, + lowestRetained uint64, + collectors CollectorCache[ItemT], + extractor SequenceExtractor[ItemT], + opts ...AggregatorOption, +) (*SequencedAggregator[ItemT], error) { + if collectors == nil { + return nil, fmt.Errorf("collector cache is required") + } + if extractor == nil { + return nil, fmt.Errorf("sequence extractor is required") + } + + cfg := aggregatorConfig{ + workerCount: defaultWorkerCount, + queueCapacity: defaultQueueSize, + } + for _, opt := range opts { + if opt != nil { + opt(&cfg) + } + } + if cfg.workerCount <= 0 { + cfg.workerCount = defaultWorkerCount + } + if cfg.queueCapacity <= 0 { + cfg.queueCapacity = defaultQueueSize + } + + aggregator := &SequencedAggregator[ItemT]{ + tracer: tracer, + lowestRetained: counters.NewMonotonicCounter(lowestRetained), + collectors: collectors, + sequenceExtractor: extractor, + queuedItems: make(chan *ItemT, cfg.queueCapacity), + itemsNotifier: make(chan struct{}, 1), + sequenceNotifier: make(chan struct{}, 1), + workerCount: cfg.workerCount, + queueCapacity: cfg.queueCapacity, + } + + aggregator.wg.Add(aggregator.workerCount + 1) + builder := lifecycle.NewComponentManagerBuilder() + for i := 0; i < aggregator.workerCount; i++ { + builder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + aggregator.queuedItemsProcessingLoop(ctx) + }) + } + builder.AddWorker(func( + ctx lifecycle.SignalerContext, + ready lifecycle.ReadyFunc, + ) { + ready() + aggregator.sequenceProcessingLoop(ctx) + }) + + aggregator.ComponentManager = builder.Build() + return aggregator, nil +} + +// Add enqueues an item for asynchronous processing. Items whose sequence is +// below the retained threshold are silently discarded. +func (a *SequencedAggregator[ItemT]) Add(item *ItemT) { + if item == nil { + return + } + sequence := a.sequenceExtractor(item) + if sequence < a.lowestRetained.Value() { + a.tracer.Trace( + "dropping item added below lowest retained value", + consensus.Uint64Param("lowest_retained", a.lowestRetained.Value()), + consensus.Uint64Param("sequence", sequence), + ) + return + } + + select { + case a.queuedItems <- item: + select { + case a.itemsNotifier <- struct{}{}: + default: + } + default: + a.tracer.Trace("dropping sequenced item: queue at capacity") + } +} + +// PruneUpToSequence prunes all collectors with sequence lower than the provided +// threshold. If the provided threshold is behind the current value, this call +// is treated as a no-op. +func (a *SequencedAggregator[ItemT]) PruneUpToSequence(sequence uint64) { + a.collectors.PruneUpToSequence(sequence) +} + +// OnSequenceChange notifies the aggregator that the active sequence advanced. +// When the internal counter is updated the pruning worker is notified to prune +// the collector cache. +func (a *SequencedAggregator[ItemT]) OnSequenceChange(oldSeq, newSeq uint64) { + if a.lowestRetained.Set(newSeq) { + select { + case a.sequenceNotifier <- struct{}{}: + default: + } + } +} + +func (a *SequencedAggregator[ItemT]) queuedItemsProcessingLoop( + ctx lifecycle.SignalerContext, +) { + defer a.wg.Done() + for { + select { + case <-ctx.Done(): + return + case <-a.itemsNotifier: + a.tracer.Trace("processing queued sequenced items") + if err := a.processQueuedItems(ctx); err != nil { + ctx.Throw(fmt.Errorf("processing queued items failed: %w", err)) + return + } + } + } +} + +func (a *SequencedAggregator[ItemT]) processQueuedItems( + ctx context.Context, +) error { + for { + select { + case <-ctx.Done(): + return nil + case item, ok := <-a.queuedItems: + if !ok { + return nil + } + if item == nil { + continue + } + if err := a.processQueuedItem(item); err != nil { + return err + } + a.tracer.Trace("sequenced item processed successfully") + default: + return nil + } + } +} + +func (a *SequencedAggregator[ItemT]) processQueuedItem(item *ItemT) error { + sequence := a.sequenceExtractor(item) + collector, _, err := a.collectors.GetOrCreateCollector(sequence) + if err != nil { + switch { + case errors.Is(err, ErrSequenceUnknown): + a.tracer.Error("dropping item for unknown sequence", err) + return nil + case errors.Is(err, ErrSequenceBelowRetention): + return nil + default: + return fmt.Errorf("could not get collector for sequence %d: %w", + sequence, + err, + ) + } + } + + if err := collector.Add(item); err != nil { + return fmt.Errorf("collector processing failed for sequence %d: %w", + sequence, + err, + ) + } + return nil +} + +func (a *SequencedAggregator[ItemT]) sequenceProcessingLoop( + ctx context.Context, +) { + defer a.wg.Done() + for { + select { + case <-ctx.Done(): + return + case <-a.sequenceNotifier: + a.PruneUpToSequence(a.lowestRetained.Value()) + } + } +} diff --git a/node/keyedaggregator/aggregator_test.go b/node/keyedaggregator/aggregator_test.go new file mode 100644 index 0000000..6ae2c0b --- /dev/null +++ b/node/keyedaggregator/aggregator_test.go @@ -0,0 +1,201 @@ +package keyedaggregator + +import ( + "context" + "errors" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" +) + +type testItem struct { + sequence uint64 + value string +} + +type stubCollector struct { + mu sync.Mutex + items []*testItem +} + +func (c *stubCollector) Add(item *testItem) error { + c.mu.Lock() + defer c.mu.Unlock() + c.items = append(c.items, item) + return nil +} + +func (c *stubCollector) Records() []*testItem { + c.mu.Lock() + defer c.mu.Unlock() + out := make([]*testItem, len(c.items)) + copy(out, c.items) + return out +} + +func (c *stubCollector) Remove(item *testItem) { + c.mu.Lock() + defer c.mu.Unlock() + for i, existing := range c.items { + if existing == item { + c.items = append(c.items[:i], c.items[i+1:]...) + break + } + } +} + +type trackingFactory struct { + mu sync.Mutex + collectors map[uint64]*stubCollector +} + +func newTrackingFactory() *trackingFactory { + return &trackingFactory{ + collectors: make(map[uint64]*stubCollector), + } +} + +func (f *trackingFactory) Create( + sequence uint64, +) (Collector[testItem], error) { + f.mu.Lock() + defer f.mu.Unlock() + collector := &stubCollector{} + f.collectors[sequence] = collector + return collector, nil +} + +func (f *trackingFactory) collector(sequence uint64) *stubCollector { + f.mu.Lock() + defer f.mu.Unlock() + return f.collectors[sequence] +} + +type noopTracer struct{} + +func (noopTracer) Trace(string, ...consensus.LogParam) {} +func (noopTracer) Error(string, error, ...consensus.LogParam) {} +func (noopTracer) With(...consensus.LogParam) consensus.TraceLogger { return noopTracer{} } + +func startTestAggregator( + t *testing.T, + aggregator *SequencedAggregator[testItem], +) func() { + t.Helper() + ctx, cancel := context.WithCancel(context.Background()) + signalCtx, _ := lifecycle.WithSignaler(ctx) + require.NoError(t, aggregator.ComponentManager.Start(signalCtx)) + <-aggregator.ComponentManager.Ready() + return func() { + cancel() + <-aggregator.ComponentManager.Done() + } +} + +func TestSequencedAggregatorDispatchesItems(t *testing.T) { + t.Parallel() + factory := newTrackingFactory() + collectors := NewSequencedCollectors[testItem](noopTracer{}, 0, factory) + aggregator, err := NewSequencedAggregator[testItem]( + noopTracer{}, + 0, + collectors, + func(item *testItem) uint64 { return item.sequence }, + ) + require.NoError(t, err) + stop := startTestAggregator(t, aggregator) + defer stop() + + expected := &testItem{sequence: 2, value: "payload"} + aggregator.Add(expected) + + require.Eventually(t, func() bool { + c := factory.collector(2) + if c == nil { + return false + } + items := c.Records() + return len(items) == 1 && items[0] == expected + }, time.Second, 10*time.Millisecond) +} + +func TestSequencedAggregatorDropsStaleItems(t *testing.T) { + t.Parallel() + factory := newTrackingFactory() + collectors := NewSequencedCollectors[testItem](noopTracer{}, 5, factory) + aggregator, err := NewSequencedAggregator[testItem]( + noopTracer{}, + 5, + collectors, + func(item *testItem) uint64 { return item.sequence }, + ) + require.NoError(t, err) + stop := startTestAggregator(t, aggregator) + defer stop() + + aggregator.Add(&testItem{sequence: 2}) + + // Item is dropped before it ever enters the queue, so no collector + // should have been created for the stale sequence. + time.Sleep(50 * time.Millisecond) + require.Nil(t, factory.collector(2)) +} + +func TestSequencedAggregatorPrunesCollectorsOnSequenceChange(t *testing.T) { + t.Parallel() + factory := newTrackingFactory() + collectors := NewSequencedCollectors[testItem](noopTracer{}, 0, factory) + aggregator, err := NewSequencedAggregator[testItem]( + noopTracer{}, + 0, + collectors, + func(item *testItem) uint64 { return item.sequence }, + ) + require.NoError(t, err) + stop := startTestAggregator(t, aggregator) + defer stop() + + aggregator.Add(&testItem{sequence: 1}) + require.Eventually(t, func() bool { + _, found, err := collectors.getCollector(1) + return err == nil && found + }, time.Second, 10*time.Millisecond) + + aggregator.OnSequenceChange(0, 3) + require.Eventually(t, func() bool { + _, _, err := collectors.getCollector(1) + return errors.Is(err, ErrSequenceBelowRetention) + }, time.Second, 10*time.Millisecond) +} + +func TestSequencedCollectorsGetOrCreateReusesInstances(t *testing.T) { + t.Parallel() + collectors := NewSequencedCollectors[testItem](noopTracer{}, 0, newTrackingFactory()) + + first, created, err := collectors.GetOrCreateCollector(4) + require.NoError(t, err) + require.True(t, created) + + second, created, err := collectors.GetOrCreateCollector(4) + require.NoError(t, err) + require.False(t, created) + require.Equal(t, first, second) +} + +func TestSequencedCollectorsPruneRemovesOldSequences(t *testing.T) { + t.Parallel() + collectors := NewSequencedCollectors[testItem](noopTracer{}, 0, newTrackingFactory()) + + _, _, err := collectors.GetOrCreateCollector(2) + require.NoError(t, err) + + collectors.PruneUpToSequence(5) + + _, _, err = collectors.GetOrCreateCollector(2) + require.ErrorIs(t, err, ErrSequenceBelowRetention) +} diff --git a/node/keyedaggregator/collectors.go b/node/keyedaggregator/collectors.go new file mode 100644 index 0000000..4fa98e2 --- /dev/null +++ b/node/keyedaggregator/collectors.go @@ -0,0 +1,144 @@ +package keyedaggregator + +import ( + "fmt" + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" +) + +// Collector represents a per-sequence worker that processes items belonging to +// that sequence. +type Collector[ItemT any] interface { + Add(*ItemT) error + Records() []*ItemT + Remove(*ItemT) +} + +// CollectorFactory constructs collectors for a specific sequence. +type CollectorFactory[ItemT any] interface { + Create(sequence uint64) (Collector[ItemT], error) +} + +// CollectorCache provides lazy access to collectors keyed by sequence and +// pruning of stale sequences. +type CollectorCache[ItemT any] interface { + GetOrCreateCollector(sequence uint64) (Collector[ItemT], bool, error) + GetCollector(sequence uint64) (Collector[ItemT], bool, error) + PruneUpToSequence(sequence uint64) +} + +// SequencedCollectors is a threadsafe CollectorCache implementation that +// lazily instantiates collectors and prunes them when the retained sequence +// advances. +type SequencedCollectors[ItemT any] struct { + tracer consensus.TraceLogger + lock sync.RWMutex + lowestRetained uint64 + highestCached uint64 + collectors map[uint64]Collector[ItemT] + collectorFactory CollectorFactory[ItemT] +} + +// NewSequencedCollectors creates a SequencedCollectors backed by the provided +// factory. The lowestRetained sequence is kept even if pruning is invoked with +// smaller values. +func NewSequencedCollectors[ItemT any]( + tracer consensus.TraceLogger, + lowestRetained uint64, + factory CollectorFactory[ItemT], +) *SequencedCollectors[ItemT] { + if factory == nil { + panic("collector factory is required") + } + return &SequencedCollectors[ItemT]{ + tracer: tracer, + lowestRetained: lowestRetained, + highestCached: lowestRetained, + collectors: make(map[uint64]Collector[ItemT]), + collectorFactory: factory, + } +} + +// GetOrCreateCollector retrieves the collector for the provided sequence. If no +// collector exists, one is created using the factory. +func (c *SequencedCollectors[ItemT]) GetOrCreateCollector( + sequence uint64, +) (Collector[ItemT], bool, error) { + cached, found, err := c.getCollector(sequence) + if err != nil { + return nil, false, err + } + if found { + return cached, false, nil + } + + col, err := c.collectorFactory.Create(sequence) + if err != nil { + return nil, false, fmt.Errorf( + "could not create collector for sequence %d: %w", + sequence, + err, + ) + } + + c.lock.Lock() + defer c.lock.Unlock() + + if existing, ok := c.collectors[sequence]; ok { + return existing, false, nil + } + c.collectors[sequence] = col + if c.highestCached < sequence { + c.highestCached = sequence + } + return col, true, nil +} + +func (c *SequencedCollectors[ItemT]) getCollector( + sequence uint64, +) (Collector[ItemT], bool, error) { + c.lock.RLock() + defer c.lock.RUnlock() + if sequence < c.lowestRetained { + return nil, false, ErrSequenceBelowRetention + } + col, found := c.collectors[sequence] + return col, found, nil +} + +// GetCollector retrieves a collector for the provided sequence without creating +// a new one. +func (c *SequencedCollectors[ItemT]) GetCollector( + sequence uint64, +) (Collector[ItemT], bool, error) { + return c.getCollector(sequence) +} + +// PruneUpToSequence removes collectors whose sequence is below the provided +// threshold. +func (c *SequencedCollectors[ItemT]) PruneUpToSequence(sequence uint64) { + c.lock.Lock() + defer c.lock.Unlock() + if c.lowestRetained >= sequence { + return + } + before := len(c.collectors) + if before == 0 { + c.lowestRetained = sequence + return + } + + if uint64(before) < sequence-c.lowestRetained { + for seq := range c.collectors { + if seq < sequence { + delete(c.collectors, seq) + } + } + } else { + for seq := c.lowestRetained; seq < sequence; seq++ { + delete(c.collectors, seq) + } + } + c.lowestRetained = sequence +} diff --git a/node/keyedaggregator/errors.go b/node/keyedaggregator/errors.go new file mode 100644 index 0000000..e1765c0 --- /dev/null +++ b/node/keyedaggregator/errors.go @@ -0,0 +1,13 @@ +package keyedaggregator + +import "errors" + +var ( + // ErrSequenceBelowRetention indicates that a collector for the requested + // sequence can no longer be accessed because it was pruned. + ErrSequenceBelowRetention = errors.New("sequence below retention threshold") + + // ErrSequenceUnknown indicates that the requested sequence is not known to + // the collaborating components and should be dropped. + ErrSequenceUnknown = errors.New("unknown sequence") +) diff --git a/node/keyedcollector/cache.go b/node/keyedcollector/cache.go new file mode 100644 index 0000000..a24116a --- /dev/null +++ b/node/keyedcollector/cache.go @@ -0,0 +1,116 @@ +package keyedcollector + +import ( + "fmt" + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// RecordTraits specifies how to extract attributes from a record that are +// required by the collector infrastructure. +type RecordTraits[RecordT any] struct { + Sequence func(*RecordT) uint64 + Identity func(*RecordT) models.Identity + Equals func(*RecordT, *RecordT) bool +} + +func (t RecordTraits[RecordT]) validate() error { + switch { + case t.Sequence == nil: + return fmt.Errorf("sequence accessor is required") + case t.Identity == nil: + return fmt.Errorf("identity accessor is required") + case t.Equals == nil: + return fmt.Errorf("equality comparator is required") + default: + return nil + } +} + +// RecordCache stores the first record per identity for a particular sequence. +// Subsequent duplicates are ignored, while conflicting records produce a +// ConflictingRecordError. +type RecordCache[RecordT any] struct { + lock sync.RWMutex + sequence uint64 + entries map[models.Identity]*RecordT + traits RecordTraits[RecordT] +} + +func NewRecordCache[RecordT any]( + sequence uint64, + traits RecordTraits[RecordT], +) *RecordCache[RecordT] { + return &RecordCache[RecordT]{ + sequence: sequence, + entries: make(map[models.Identity]*RecordT), + traits: traits, + } +} + +func (c *RecordCache[RecordT]) Sequence() uint64 { return c.sequence } + +// Add stores the record in the cache, returning ErrRepeatedRecord when the +// record already exists (same identity and equal contents) and +// ConflictingRecordError when the record already exists but with different +// contents. When an error is returned the record is not stored. +func (c *RecordCache[RecordT]) Add(record *RecordT) error { + if c.traits.Sequence(record) != c.sequence { + return ErrRecordForDifferentSequence + } + + identity := c.traits.Identity(record) + + c.lock.Lock() + defer c.lock.Unlock() + + if existing, ok := c.entries[identity]; ok { + if c.traits.Equals(existing, record) { + return ErrRepeatedRecord + } + return NewConflictingRecordError(existing, record) + } + + c.entries[identity] = record + return nil +} + +// Get returns the stored record for the given identity. +func (c *RecordCache[RecordT]) Get( + identity models.Identity, +) (*RecordT, bool) { + c.lock.RLock() + defer c.lock.RUnlock() + record, ok := c.entries[identity] + return record, ok +} + +// Size returns the number of cached records. +func (c *RecordCache[RecordT]) Size() int { + c.lock.RLock() + defer c.lock.RUnlock() + return len(c.entries) +} + +// All returns a snapshot of all cached records. +func (c *RecordCache[RecordT]) All() []*RecordT { + c.lock.RLock() + defer c.lock.RUnlock() + result := make([]*RecordT, 0, len(c.entries)) + for _, record := range c.entries { + result = append(result, record) + } + return result +} + +// Remove deletes the record from the cache. +func (c *RecordCache[RecordT]) Remove(record *RecordT) { + if record == nil { + return + } + identity := c.traits.Identity(record) + c.lock.Lock() + delete(c.entries, identity) + c.lock.Unlock() +} diff --git a/node/keyedcollector/collector.go b/node/keyedcollector/collector.go new file mode 100644 index 0000000..ac1904a --- /dev/null +++ b/node/keyedcollector/collector.go @@ -0,0 +1,111 @@ +package keyedcollector + +import ( + "errors" + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" +) + +// Processor handles validated records. Implementations are expected to be +// concurrency-safe. +type Processor[RecordT any] interface { + Process(record *RecordT) error +} + +// CollectorConsumer receives notifications about collector events, such as +// conflicting records or invalid records detected by the Processor. +type CollectorConsumer[RecordT any] interface { + OnRecordProcessed(record *RecordT) + OnConflictingRecords(first, second *RecordT) + OnInvalidRecord(err *InvalidRecordError[RecordT]) +} + +// Collector implements the record caching/deduplication flow for a single +// sequence. It stores the first record per identity, detects equivocations, and +// delegates valid records to the configured Processor. +type Collector[RecordT any] struct { + tracer consensus.TraceLogger + cache *RecordCache[RecordT] + processor Processor[RecordT] + consumer CollectorConsumer[RecordT] + traits RecordTraits[RecordT] +} + +func NewCollector[RecordT any]( + tracer consensus.TraceLogger, + sequence uint64, + traits RecordTraits[RecordT], + processor Processor[RecordT], + consumer CollectorConsumer[RecordT], +) (*Collector[RecordT], error) { + if err := traits.validate(); err != nil { + return nil, err + } + if processor == nil { + return nil, fmt.Errorf("processor is required") + } + cache := NewRecordCache(sequence, traits) + return &Collector[RecordT]{ + tracer: tracer, + cache: cache, + processor: processor, + consumer: consumer, + traits: traits, + }, nil +} + +func (c *Collector[RecordT]) Sequence() uint64 { + return c.cache.Sequence() +} + +// Add inserts the record into the cache and triggers processing. Duplicate +// records are ignored, conflicting records are surfaced to the consumer, and +// invalid records (as indicated by the Processor) are reported via +// CollectorConsumer.OnInvalidRecord. +func (c *Collector[RecordT]) Add(record *RecordT) error { + err := c.cache.Add(record) + if err != nil { + switch { + case errors.Is(err, ErrRepeatedRecord): + return nil + case errors.Is(err, ErrRecordForDifferentSequence): + return fmt.Errorf("record sequence mismatch: %w", err) + default: + var conflict *ConflictingRecordError[RecordT] + if errors.As(err, &conflict) { + if c.consumer != nil { + c.consumer.OnConflictingRecords(conflict.First(), conflict.Second()) + } + return nil + } + return fmt.Errorf("adding record to cache failed: %w", err) + } + } + + if err := c.processor.Process(record); err != nil { + if invalid, ok := AsInvalidRecordError[RecordT](err); ok { + c.tracer.Error("invalid record detected", err) + if c.consumer != nil { + c.consumer.OnInvalidRecord(invalid) + } + return nil + } + return fmt.Errorf("processing record failed: %w", err) + } + + if c.consumer != nil { + c.consumer.OnRecordProcessed(record) + } + return nil +} + +// Records returns a snapshot of all cached records. +func (c *Collector[RecordT]) Records() []*RecordT { + return c.cache.All() +} + +// Remove deletes the provided record from the cache. +func (c *Collector[RecordT]) Remove(record *RecordT) { + c.cache.Remove(record) +} diff --git a/node/keyedcollector/collector_test.go b/node/keyedcollector/collector_test.go new file mode 100644 index 0000000..1ce05a5 --- /dev/null +++ b/node/keyedcollector/collector_test.go @@ -0,0 +1,272 @@ +package keyedcollector + +import ( + "context" + "errors" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/lifecycle" + "source.quilibrium.com/quilibrium/monorepo/node/keyedaggregator" +) + +type fakeRecord struct { + sequence uint64 + identity models.Identity + payload string +} + +func recordTraits() RecordTraits[fakeRecord] { + return RecordTraits[fakeRecord]{ + Sequence: func(r *fakeRecord) uint64 { return r.sequence }, + Identity: func(r *fakeRecord) models.Identity { return r.identity }, + Equals: func(a, b *fakeRecord) bool { + if a == nil || b == nil { + return a == b + } + return a.payload == b.payload + }, + } +} + +type noopProcessor struct { + mu sync.Mutex + records []*fakeRecord + err error +} + +func (p *noopProcessor) Process(record *fakeRecord) error { + p.mu.Lock() + defer p.mu.Unlock() + p.records = append(p.records, record) + return p.err +} + +type capturingConsumer struct { + mu sync.Mutex + processed []*fakeRecord + conflicts [][2]*fakeRecord + invalid []*InvalidRecordError[fakeRecord] +} + +func (c *capturingConsumer) OnRecordProcessed(record *fakeRecord) { + c.mu.Lock() + defer c.mu.Unlock() + c.processed = append(c.processed, record) +} + +func (c *capturingConsumer) OnConflictingRecords(first, second *fakeRecord) { + c.mu.Lock() + defer c.mu.Unlock() + c.conflicts = append(c.conflicts, [2]*fakeRecord{first, second}) +} + +func (c *capturingConsumer) OnInvalidRecord(err *InvalidRecordError[fakeRecord]) { + c.mu.Lock() + defer c.mu.Unlock() + c.invalid = append(c.invalid, err) +} + +type noopTracer struct{} + +func (noopTracer) Trace(string, ...consensus.LogParam) {} +func (noopTracer) Error(string, error, ...consensus.LogParam) {} +func (noopTracer) With(...consensus.LogParam) consensus.TraceLogger { return noopTracer{} } + +func TestCollectorProcessesRecord(t *testing.T) { + t.Parallel() + processor := &noopProcessor{} + consumer := &capturingConsumer{} + collector, err := NewCollector[fakeRecord]( + noopTracer{}, + 1, + recordTraits(), + processor, + consumer, + ) + require.NoError(t, err) + + record := &fakeRecord{sequence: 1, identity: "id", payload: "a"} + require.NoError(t, collector.Add(record)) + + require.Len(t, consumer.processed, 1) + require.Equal(t, record, consumer.processed[0]) + require.Len(t, processor.records, 1) + require.Equal(t, record, processor.records[0]) +} + +func TestCollectorIgnoresDuplicates(t *testing.T) { + t.Parallel() + processor := &noopProcessor{} + collector, err := NewCollector[fakeRecord]( + noopTracer{}, + 1, + recordTraits(), + processor, + nil, + ) + require.NoError(t, err) + + record := &fakeRecord{sequence: 1, identity: "id", payload: "a"} + require.NoError(t, collector.Add(record)) + require.NoError(t, collector.Add(&fakeRecord{sequence: 1, identity: "id", payload: "a"})) + require.Len(t, processor.records, 1) +} + +func TestCollectorNotifiesConflicts(t *testing.T) { + t.Parallel() + processor := &noopProcessor{} + consumer := &capturingConsumer{} + collector, err := NewCollector[fakeRecord]( + noopTracer{}, + 1, + recordTraits(), + processor, + consumer, + ) + require.NoError(t, err) + + require.NoError(t, collector.Add(&fakeRecord{sequence: 1, identity: "id", payload: "a"})) + require.NoError(t, collector.Add(&fakeRecord{sequence: 1, identity: "id", payload: "b"})) + + require.Len(t, consumer.conflicts, 1) + require.Equal(t, "a", consumer.conflicts[0][0].payload) + require.Equal(t, "b", consumer.conflicts[0][1].payload) + require.Len(t, processor.records, 1) +} + +func TestCollectorHandlesInvalidRecords(t *testing.T) { + t.Parallel() + invalid := NewInvalidRecordError(&fakeRecord{sequence: 1}, errors.New("boom")) + processor := &noopProcessor{err: invalid} + consumer := &capturingConsumer{} + collector, err := NewCollector[fakeRecord]( + noopTracer{}, + 1, + recordTraits(), + processor, + consumer, + ) + require.NoError(t, err) + + require.NoError(t, collector.Add(&fakeRecord{sequence: 1, identity: "id"})) + require.Len(t, consumer.invalid, 1) +} + +func TestCollectorPropagatesProcessorErrors(t *testing.T) { + t.Parallel() + processor := &noopProcessor{err: errors.New("fatal")} + collector, err := NewCollector[fakeRecord]( + noopTracer{}, + 1, + recordTraits(), + processor, + nil, + ) + require.NoError(t, err) + + err = collector.Add(&fakeRecord{sequence: 1, identity: "id"}) + require.Error(t, err) + require.ErrorContains(t, err, "processing record failed") +} + +func TestCollectorRejectsIncompatibleSequence(t *testing.T) { + t.Parallel() + processor := &noopProcessor{} + collector, err := NewCollector[fakeRecord]( + noopTracer{}, + 2, + recordTraits(), + processor, + nil, + ) + require.NoError(t, err) + + err = collector.Add(&fakeRecord{sequence: 1, identity: "id"}) + require.Error(t, err) + require.ErrorIs(t, err, ErrRecordForDifferentSequence) +} + +type mockProcessorFactory struct { + mu sync.Mutex + sequences []uint64 + processor Processor[fakeRecord] + err error +} + +func (f *mockProcessorFactory) Create(sequence uint64) (Processor[fakeRecord], error) { + f.mu.Lock() + defer f.mu.Unlock() + if f.err != nil { + return nil, f.err + } + f.sequences = append(f.sequences, sequence) + if f.processor != nil { + return f.processor, nil + } + return &noopProcessor{}, nil +} + +func TestFactoryCreatesCollector(t *testing.T) { + t.Parallel() + processorFactory := &mockProcessorFactory{} + factory, err := NewFactory[fakeRecord]( + noopTracer{}, + recordTraits(), + nil, + processorFactory, + ) + require.NoError(t, err) + + collectorIface, err := factory.Create(3) + require.NoError(t, err) + require.NotNil(t, collectorIface) + require.Len(t, processorFactory.sequences, 1) + require.Equal(t, uint64(3), processorFactory.sequences[0]) +} + +func TestFactorySatisfiesKeyedAggregatorInterface(t *testing.T) { + t.Parallel() + processorFactory := &mockProcessorFactory{} + factory, err := NewFactory[fakeRecord]( + noopTracer{}, + recordTraits(), + nil, + processorFactory, + ) + require.NoError(t, err) + + collectors := keyedaggregator.NewSequencedCollectors[fakeRecord]( + noopTracer{}, + 0, + factory, + ) + aggregator, err := keyedaggregator.NewSequencedAggregator( + noopTracer{}, + 0, + collectors, + func(r *fakeRecord) uint64 { return r.sequence }, + ) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + signalCtx, _ := lifecycle.WithSignaler(ctx) + require.NoError(t, aggregator.ComponentManager.Start(signalCtx)) + <-aggregator.ComponentManager.Ready() + + record := &fakeRecord{sequence: 0, identity: "id"} + aggregator.Add(record) + + require.Eventually(t, func() bool { + return len(processorFactory.sequences) == 1 + }, time.Second, 10*time.Millisecond) + + cancel() + <-aggregator.ComponentManager.Done() +} diff --git a/node/keyedcollector/errors.go b/node/keyedcollector/errors.go new file mode 100644 index 0000000..257e0e7 --- /dev/null +++ b/node/keyedcollector/errors.go @@ -0,0 +1,74 @@ +package keyedcollector + +import ( + "errors" + "fmt" +) + +var ( + // ErrRepeatedRecord indicates that a record from the same identity with the + // same content was added multiple times. + ErrRepeatedRecord = errors.New("duplicated record") + // ErrRecordForDifferentSequence indicates that a record belongs to a + // different sequence than the collector handles. + ErrRecordForDifferentSequence = errors.New("record for incompatible sequence") +) + +// ConflictingRecordError is emitted when two records for the same sequence and +// identity contain different contents, signaling equivocation. +type ConflictingRecordError[RecordT any] struct { + first *RecordT + second *RecordT +} + +func NewConflictingRecordError[RecordT any]( + first *RecordT, + second *RecordT, +) *ConflictingRecordError[RecordT] { + return &ConflictingRecordError[RecordT]{first: first, second: second} +} + +func (e *ConflictingRecordError[RecordT]) Error() string { + return "conflicting records detected" +} + +func (e *ConflictingRecordError[RecordT]) First() *RecordT { return e.first } +func (e *ConflictingRecordError[RecordT]) Second() *RecordT { return e.second } + +// InvalidRecordError indicates that a record failed validation. Processor +// implementations should wrap contextual information in this error type to +// signal recoverable invalid inputs to the collector. +type InvalidRecordError[RecordT any] struct { + Record *RecordT + Cause error +} + +func NewInvalidRecordError[RecordT any]( + record *RecordT, + cause error, +) *InvalidRecordError[RecordT] { + return &InvalidRecordError[RecordT]{Record: record, Cause: cause} +} + +func (e *InvalidRecordError[RecordT]) Error() string { + if e.Cause == nil { + return "invalid record" + } + return fmt.Sprintf("invalid record: %v", e.Cause) +} + +func (e *InvalidRecordError[RecordT]) Unwrap() error { + return e.Cause +} + +// AsInvalidRecordError performs a typed errors.As lookup for +// InvalidRecordError[RecordT]. +func AsInvalidRecordError[RecordT any]( + err error, +) (*InvalidRecordError[RecordT], bool) { + var invalid *InvalidRecordError[RecordT] + if errors.As(err, &invalid) { + return invalid, true + } + return nil, false +} diff --git a/node/keyedcollector/factory.go b/node/keyedcollector/factory.go new file mode 100644 index 0000000..431ab20 --- /dev/null +++ b/node/keyedcollector/factory.go @@ -0,0 +1,58 @@ +package keyedcollector + +import ( + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/node/keyedaggregator" +) + +// ProcessorFactory creates processors for a given sequence. +type ProcessorFactory[RecordT any] interface { + Create(sequence uint64) (Processor[RecordT], error) +} + +// Factory produces collectors for a given sequence, satisfying the +// keyedaggregator.CollectorFactory interface. +type Factory[RecordT any] struct { + tracer consensus.TraceLogger + traits RecordTraits[RecordT] + consumer CollectorConsumer[RecordT] + processorFactory ProcessorFactory[RecordT] +} + +func NewFactory[RecordT any]( + tracer consensus.TraceLogger, + traits RecordTraits[RecordT], + consumer CollectorConsumer[RecordT], + processorFactory ProcessorFactory[RecordT], +) (*Factory[RecordT], error) { + if err := traits.validate(); err != nil { + return nil, err + } + if processorFactory == nil { + return nil, fmt.Errorf("processor factory is required") + } + return &Factory[RecordT]{ + tracer: tracer, + traits: traits, + consumer: consumer, + processorFactory: processorFactory, + }, nil +} + +func (f *Factory[RecordT]) Create( + sequence uint64, +) (keyedaggregator.Collector[RecordT], error) { + processor, err := f.processorFactory.Create(sequence) + if err != nil { + return nil, fmt.Errorf("could not create processor for sequence %d: %w", sequence, err) + } + return NewCollector( + f.tracer, + sequence, + f.traits, + processor, + f.consumer, + ) +} diff --git a/node/p2p/blossomsub.go b/node/p2p/blossomsub.go index 385b5c1..2bdb945 100644 --- a/node/p2p/blossomsub.go +++ b/node/p2p/blossomsub.go @@ -13,6 +13,7 @@ import ( "math/bits" "net" "runtime/debug" + "slices" "sync" "sync/atomic" "time" @@ -55,7 +56,7 @@ import ( ) const ( - DecayInterval = 10 * time.Second + DecayInterval = 10 * time.Minute AppDecay = .9 ) @@ -309,11 +310,11 @@ func NewBlossomSubWithHost( }, &blossomsub.PeerScoreThresholds{ SkipAtomicValidation: false, - GossipThreshold: -2000, - PublishThreshold: -5000, - GraylistThreshold: -10000, - AcceptPXThreshold: 1, - OpportunisticGraftThreshold: 2, + GossipThreshold: -500, + PublishThreshold: -1000, + GraylistThreshold: -2500, + AcceptPXThreshold: 1000, + OpportunisticGraftThreshold: 3.5, }, )) blossomOpts = append(blossomOpts, observability.WithPrometheusRawTracer()) @@ -711,32 +712,111 @@ func NewBlossomSub( if tracer != nil { blossomOpts = append(blossomOpts, blossomsub.WithEventTracer(tracer)) } - blossomOpts = append(blossomOpts, blossomsub.WithPeerScore( - &blossomsub.PeerScoreParams{ - SkipAtomicValidation: false, - BitmaskScoreCap: 0, - IPColocationFactorWeight: 0, - IPColocationFactorThreshold: 6, - BehaviourPenaltyWeight: -10, - BehaviourPenaltyThreshold: 100, - BehaviourPenaltyDecay: .5, - DecayInterval: DecayInterval, - DecayToZero: .1, - RetainScore: 60 * time.Minute, - AppSpecificScore: func(p peer.ID) float64 { - return float64(bs.GetPeerScore([]byte(p))) + + GLOBAL_CONSENSUS_BITMASK := []byte{0x00} + GLOBAL_FRAME_BITMASK := []byte{0x00, 0x00} + GLOBAL_PROVER_BITMASK := []byte{0x00, 0x00, 0x00} + GLOBAL_PEER_INFO_BITMASK := []byte{0x00, 0x00, 0x00, 0x00} + GLOBAL_ALERT_BITMASK := bytes.Repeat([]byte{0x00}, 16) + sets := getBitmaskSets(bytes.Repeat([]byte{0xff}, 32)) + sets = slices.Concat([][]byte{ + GLOBAL_CONSENSUS_BITMASK, + GLOBAL_FRAME_BITMASK, + GLOBAL_PROVER_BITMASK, + GLOBAL_PEER_INFO_BITMASK, + GLOBAL_ALERT_BITMASK, + }, sets) + bitmasksScoring := map[string]*blossomsub.BitmaskScoreParams{} + for _, set := range sets { + bitmasksScoring[string(set)] = &blossomsub.BitmaskScoreParams{ + SkipAtomicValidation: false, + BitmaskWeight: 0.1, + TimeInMeshWeight: 0.00027, + TimeInMeshQuantum: time.Second, + TimeInMeshCap: 1, + FirstMessageDeliveriesWeight: 5, + FirstMessageDeliveriesDecay: blossomsub.ScoreParameterDecay( + 10 * time.Minute, + ), + FirstMessageDeliveriesCap: 10000, + InvalidMessageDeliveriesWeight: -1000, + InvalidMessageDeliveriesDecay: blossomsub.ScoreParameterDecay(time.Hour), + } + } + + if p2pConfig.Network != 0 { + blossomOpts = append(blossomOpts, blossomsub.WithPeerScore( + &blossomsub.PeerScoreParams{ + SkipAtomicValidation: false, + Bitmasks: bitmasksScoring, + BitmaskScoreCap: 0, + IPColocationFactorWeight: 0, + IPColocationFactorThreshold: 6, + BehaviourPenaltyWeight: -10, + BehaviourPenaltyThreshold: 6, + BehaviourPenaltyDecay: .5, + DecayInterval: DecayInterval, + DecayToZero: .1, + RetainScore: 60 * time.Minute, + AppSpecificScore: func(p peer.ID) float64 { + return float64(bs.GetPeerScore([]byte(p))) + }, + AppSpecificWeight: 10.0, }, - AppSpecificWeight: 10.0, - }, - &blossomsub.PeerScoreThresholds{ - SkipAtomicValidation: false, - GossipThreshold: -2000, - PublishThreshold: -5000, - GraylistThreshold: -10000, - AcceptPXThreshold: 1, - OpportunisticGraftThreshold: 2, - }, - )) + &blossomsub.PeerScoreThresholds{ + SkipAtomicValidation: false, + GossipThreshold: -500, + PublishThreshold: -1000, + GraylistThreshold: -2500, + AcceptPXThreshold: 1000, + OpportunisticGraftThreshold: 3.5, + }, + )) + + } else { + whitelist := []*net.IPNet{} + for _, p := range directPeers { + for _, i := range p.Addrs { + ipnet, err := MultiaddrToIPNet(i) + if err != nil { + logger.Error( + "could not convert direct peer for ip colocation whitelist", + zap.String("peer_addr", i.String()), + zap.Error(err), + ) + } + whitelist = append(whitelist, ipnet) + } + } + blossomOpts = append(blossomOpts, blossomsub.WithPeerScore( + &blossomsub.PeerScoreParams{ + SkipAtomicValidation: false, + Bitmasks: bitmasksScoring, + BitmaskScoreCap: 0, + IPColocationFactorWeight: -100, + IPColocationFactorThreshold: 6, + IPColocationFactorWhitelist: whitelist, + BehaviourPenaltyWeight: -10, + BehaviourPenaltyThreshold: 6, + BehaviourPenaltyDecay: .5, + DecayInterval: DecayInterval, + DecayToZero: .1, + RetainScore: 60 * time.Minute, + AppSpecificScore: func(p peer.ID) float64 { + return float64(bs.GetPeerScore([]byte(p))) + }, + AppSpecificWeight: 10.0, + }, + &blossomsub.PeerScoreThresholds{ + SkipAtomicValidation: false, + GossipThreshold: -500, + PublishThreshold: -1000, + GraylistThreshold: -2500, + AcceptPXThreshold: 1000, + OpportunisticGraftThreshold: 3.5, + }, + )) + } blossomOpts = append(blossomOpts, blossomsub.WithValidateQueueSize(p2pConfig.ValidateQueueSize), blossomsub.WithValidateWorkers(p2pConfig.ValidateWorkers), @@ -1611,3 +1691,83 @@ func getNetworkNamespace(network uint8) string { func (b *BlossomSub) Close() error { return nil } + +// MultiaddrToIPNet converts a multiaddr containing /ip4 or /ip6 +// into a *net.IPNet with a host mask (/32 or /128). +func MultiaddrToIPNet(m ma.Multiaddr) (*net.IPNet, error) { + var ( + ip net.IP + ipBits int + ) + + // Walk components and grab the first IP we see. + ma.ForEach(m, func(c ma.Component, err error) bool { + if err != nil { + return false + } + switch c.Protocol().Code { + case ma.P_IP4: + if ip == nil { + ip = net.IP(c.RawValue()).To4() + ipBits = 32 + } + return false + + case ma.P_IP6: + if ip == nil { + ip = net.IP(c.RawValue()).To16() + ipBits = 128 + } + return false + } + return true + }) + + if ip == nil { + return nil, fmt.Errorf("multiaddr has no ip4/ip6 component: %s", m) + } + + mask := net.CIDRMask(ipBits, ipBits) + + return &net.IPNet{ + IP: ip.Mask(mask), + Mask: mask, + }, nil +} + +func getBitmaskSets(bitmask []byte) [][]byte { + sliced := [][]byte{} + if bytes.Equal(bitmask, make([]byte, len(bitmask))) { + sliced = append(sliced, bitmask) + } else { + for i, b := range bitmask { + if b == 0 { + continue + } + + // fast: one bit in byte + if b&(b-1) == 0 { + slice := make([]byte, len(bitmask)) + slice[i] = b + sliced = append(sliced, slice) + sliced = append(sliced, slices.Concat([]byte{0}, slice)) + sliced = append(sliced, slices.Concat([]byte{0, 0}, slice)) + sliced = append(sliced, slices.Concat([]byte{0, 0, 0}, slice)) + continue + } + + for j := 7; j >= 0; j-- { + if (b>>j)&1 == 1 { + slice := make([]byte, len(bitmask)) + slice[i] = 1 << j + sliced = append(sliced, slice) + sliced = append(sliced, slices.Concat([]byte{0}, slice)) + sliced = append(sliced, slices.Concat([]byte{0, 0}, slice)) + sliced = append(sliced, slices.Concat([]byte{0, 0, 0}, slice)) + } + } + } + } + + return sliced +} diff --git a/node/rpc/hypergraph_sync_rpc_server_test.go b/node/rpc/hypergraph_sync_rpc_server_test.go index f37e15d..d88fcd8 100644 --- a/node/rpc/hypergraph_sync_rpc_server_test.go +++ b/node/rpc/hypergraph_sync_rpc_server_test.go @@ -31,6 +31,7 @@ import ( "source.quilibrium.com/quilibrium/monorepo/protobufs" application "source.quilibrium.com/quilibrium/monorepo/types/hypergraph" "source.quilibrium.com/quilibrium/monorepo/types/tries" + crypto "source.quilibrium.com/quilibrium/monorepo/types/tries" "source.quilibrium.com/quilibrium/monorepo/verenc" ) @@ -58,7 +59,7 @@ func TestHypergraphSyncServer(t *testing.T) { data1 := enc.Encrypt(make([]byte, 20), pub) verenc1 := data1[0].Compress() vertices1 := make([]application.Vertex, numOperations) - dataTree1 := &tries.VectorCommitmentTree{} + dataTree1 := &crypto.VectorCommitmentTree{} logger, _ := zap.NewDevelopment() inclusionProver := bls48581.NewKZGInclusionProver(logger) for _, d := range []application.Encrypted{verenc1} { @@ -272,7 +273,7 @@ func TestHypergraphSyncServer(t *testing.T) { } time.Sleep(10 * time.Second) str.CloseSend() - leaves := tries.CompareLeaves( + leaves := crypto.CompareLeaves( crdts[0].(*hgcrdt.HypergraphCRDT).GetVertexAddsSet(shardKey).GetTree(), crdts[1].(*hgcrdt.HypergraphCRDT).GetVertexAddsSet(shardKey).GetTree(), ) @@ -297,7 +298,7 @@ func TestHypergraphSyncServer(t *testing.T) { crdts[0].(*hgcrdt.HypergraphCRDT).GetVertexAddsSet(shardKey).GetTree().Commit(false), crdts[1].(*hgcrdt.HypergraphCRDT).GetVertexAddsSet(shardKey).GetTree().Commit(false), ) { - leaves := tries.CompareLeaves( + leaves := crypto.CompareLeaves( crdts[0].(*hgcrdt.HypergraphCRDT).GetVertexAddsSet(shardKey).GetTree(), crdts[1].(*hgcrdt.HypergraphCRDT).GetVertexAddsSet(shardKey).GetTree(), ) @@ -330,7 +331,7 @@ func TestHypergraphPartialSync(t *testing.T) { data1 := enc.Encrypt(make([]byte, 20), pub) verenc1 := data1[0].Compress() vertices1 := make([]application.Vertex, numOperations) - dataTree1 := &tries.VectorCommitmentTree{} + dataTree1 := &crypto.VectorCommitmentTree{} logger, _ := zap.NewDevelopment() inclusionProver := bls48581.NewKZGInclusionProver(logger) domain := make([]byte, 32) @@ -627,7 +628,7 @@ func TestHypergraphPartialSync(t *testing.T) { log.Fatalf("Client: failed to sync 1: %v", err) } str.CloseSend() - leaves := tries.CompareLeaves( + leaves := crypto.CompareLeaves( crdts[0].(*hgcrdt.HypergraphCRDT).GetVertexAddsSet(shardKey).GetTree(), crdts[1].(*hgcrdt.HypergraphCRDT).GetVertexAddsSet(shardKey).GetTree(), ) @@ -750,7 +751,7 @@ func TestHypergraphPartialSync(t *testing.T) { crdts[0].(*hgcrdt.HypergraphCRDT).GetVertexAddsSet(shardKey).GetTree().Commit(false), crdts[1].(*hgcrdt.HypergraphCRDT).GetVertexAddsSet(shardKey).GetTree().Commit(false), ) { - leaves := tries.CompareLeaves( + leaves := crypto.CompareLeaves( crdts[0].(*hgcrdt.HypergraphCRDT).GetVertexAddsSet(shardKey).GetTree(), crdts[1].(*hgcrdt.HypergraphCRDT).GetVertexAddsSet(shardKey).GetTree(), ) diff --git a/node/rpc/node_rpc_server.go b/node/rpc/node_rpc_server.go index 90e8858..1908c45 100644 --- a/node/rpc/node_rpc_server.go +++ b/node/rpc/node_rpc_server.go @@ -214,6 +214,7 @@ func (r *RPCServer) GetNodeInfo( PeerId: peer.ID(peerID).String(), PeerScore: uint64(r.pubSub.GetPeerScore(peerID)), Version: append([]byte{}, config.GetVersion()...), + PatchVersion: []byte{config.GetPatchNumber()}, PeerSeniority: seniority.FillBytes(make([]byte, 8)), RunningWorkers: uint32(len(workers)), AllocatedWorkers: allocated, diff --git a/node/worker/manager.go b/node/worker/manager.go index 5e6d24e..4b69a25 100644 --- a/node/worker/manager.go +++ b/node/worker/manager.go @@ -416,18 +416,7 @@ func (w *WorkerManager) AllocateWorker(coreId uint, filter []byte) error { w.setWorkerAllocation(coreId, true) // Refresh worker - svc, err := w.getIPCOfWorker(coreId) - if err != nil { - w.logger.Error("could not get ipc of worker", zap.Error(err)) - return errors.Wrap(err, "allocate worker") - } - - ctx := w.currentContext() - _, err = svc.Respawn(ctx, &protobufs.RespawnRequest{ - Filter: worker.Filter, - }) - if err != nil { - w.logger.Error("could not respawn worker", zap.Error(err)) + if err := w.respawnWorker(coreId, worker.Filter); err != nil { return errors.Wrap(err, "allocate worker") } @@ -496,18 +485,7 @@ func (w *WorkerManager) DeallocateWorker(coreId uint) error { } // Refresh worker - svc, err := w.getIPCOfWorker(coreId) - if err != nil { - w.logger.Error("could not get ipc of worker", zap.Error(err)) - return errors.Wrap(err, "allocate worker") - } - - ctx := w.currentContext() - _, err = svc.Respawn(ctx, &protobufs.RespawnRequest{ - Filter: []byte{}, - }) - if err != nil { - w.logger.Error("could not respawn worker", zap.Error(err)) + if err := w.respawnWorker(coreId, []byte{}); err != nil { return errors.Wrap(err, "allocate worker") } @@ -630,6 +608,34 @@ func (w *WorkerManager) RangeWorkers() ([]*typesStore.WorkerInfo, error) { return workers, nil } +const workerConnectivityTimeout = 5 * time.Second + +func (w *WorkerManager) CheckWorkersConnected() ([]uint, error) { + workers, err := w.store.RangeWorkers() + if err != nil { + return nil, errors.Wrap(err, "check worker connectivity") + } + + unreachable := make([]uint, 0) + for _, worker := range workers { + _, err := w.getIPCOfWorkerWithTimeout( + worker.CoreId, + workerConnectivityTimeout, + ) + if err != nil { + w.logger.Debug( + "worker unreachable during connectivity check", + zap.Uint("core_id", worker.CoreId), + zap.Error(err), + ) + w.closeServiceClient(worker.CoreId) + unreachable = append(unreachable, worker.CoreId) + } + } + + return unreachable, nil +} + // ProposeAllocations invokes a proposal function set by the parent of the // manager. func (w *WorkerManager) ProposeAllocations( @@ -737,18 +743,12 @@ func (w *WorkerManager) loadWorkersFromStore() error { w.setWorkerAllocation(worker.CoreId, false) } totalStorage += uint64(worker.TotalStorage) - svc, err := w.getIPCOfWorker(worker.CoreId) - if err != nil { - w.logger.Error("could not obtain IPC for worker", zap.Error(err)) - continue - } - - ctx := w.currentContext() - _, err = svc.Respawn(ctx, &protobufs.RespawnRequest{ - Filter: worker.Filter, - }) - if err != nil { - w.logger.Error("could not respawn worker", zap.Error(err)) + if err := w.respawnWorker(worker.CoreId, worker.Filter); err != nil { + w.logger.Error( + "could not respawn worker", + zap.Uint("core_id", worker.CoreId), + zap.Error(err), + ) continue } } @@ -800,21 +800,84 @@ func (w *WorkerManager) getP2PMultiaddrOfWorker(coreId uint) ( return ma, errors.Wrap(err, "get p2p multiaddr of worker") } +func (w *WorkerManager) ensureWorkerRegistered( + coreId uint, + p2pAddr multiaddr.Multiaddr, + streamAddr multiaddr.Multiaddr, +) error { + _, err := w.store.GetWorker(coreId) + if err == nil { + return nil + } + if err != nil && !errors.Is(err, store.ErrNotFound) { + return err + } + + return w.registerWorker(&typesStore.WorkerInfo{ + CoreId: coreId, + ListenMultiaddr: p2pAddr.String(), + StreamListenMultiaddr: streamAddr.String(), + Filter: nil, + TotalStorage: 0, + Automatic: len(w.config.Engine.DataWorkerP2PMultiaddrs) == 0, + Allocated: false, + }) +} + func (w *WorkerManager) getIPCOfWorker(coreId uint) ( protobufs.DataIPCServiceClient, error, +) { + ctx := w.currentContext() + if ctx == nil { + ctx = context.Background() + } + return w.getIPCOfWorkerWithContext(ctx, coreId) +} + +func (w *WorkerManager) getIPCOfWorkerWithTimeout( + coreId uint, + timeout time.Duration, +) (protobufs.DataIPCServiceClient, error) { + ctx := w.currentContext() + if ctx == nil { + ctx = context.Background() + } + if timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } + return w.getIPCOfWorkerWithContext(ctx, coreId) +} + +func (w *WorkerManager) getIPCOfWorkerWithContext( + ctx context.Context, + coreId uint, +) ( + protobufs.DataIPCServiceClient, + error, ) { if client, ok := w.getServiceClient(coreId); ok { return protobufs.NewDataIPCServiceClient(client), nil } w.logger.Info("reconnecting to worker", zap.Uint("core_id", coreId)) - addr, err := w.getMultiaddrOfWorker(coreId) + streamAddr, err := w.getMultiaddrOfWorker(coreId) if err != nil { return nil, errors.Wrap(err, "get ipc of worker") } - mga, err := mn.ToNetAddr(addr) + p2pAddr, err := w.getP2PMultiaddrOfWorker(coreId) + if err != nil { + return nil, errors.Wrap(err, "get ipc of worker") + } + + if err := w.ensureWorkerRegistered(coreId, p2pAddr, streamAddr); err != nil { + return nil, errors.Wrap(err, "get ipc of worker") + } + + mga, err := mn.ToNetAddr(streamAddr) if err != nil { return nil, errors.Wrap(err, "get ipc of worker") } @@ -825,25 +888,6 @@ func (w *WorkerManager) getIPCOfWorker(coreId uint) ( return nil, errors.Wrap(err, "get ipc of worker") } - if !w.hasWorkerFilter(coreId) { - p2pAddr, err := w.getP2PMultiaddrOfWorker(coreId) - if err != nil { - return nil, errors.Wrap(err, "get ipc of worker") - } - err = w.registerWorker(&typesStore.WorkerInfo{ - CoreId: coreId, - ListenMultiaddr: p2pAddr.String(), - StreamListenMultiaddr: addr.String(), - Filter: nil, - TotalStorage: 0, - Automatic: len(w.config.Engine.DataWorkerP2PMultiaddrs) == 0, - Allocated: false, - }) - if err != nil { - return nil, errors.Wrap(err, "get ipc of worker") - } - } - privKey, err := crypto.UnmarshalEd448PrivateKey(peerPrivKey) if err != nil { w.logger.Error("error unmarshaling peerkey", zap.Error(err)) @@ -874,16 +918,120 @@ func (w *WorkerManager) getIPCOfWorker(coreId uint) ( return nil, errors.Wrap(err, "get ipc of worker") } - client, err := grpc.NewClient( + return w.dialWorkerWithRetry( + ctx, + coreId, mga.String(), grpc.WithTransportCredentials(creds), ) - if err != nil { - return nil, errors.Wrap(err, "get ipc of worker") +} + +func (w *WorkerManager) dialWorkerWithRetry( + ctx context.Context, + coreId uint, + target string, + opts ...grpc.DialOption, +) (protobufs.DataIPCServiceClient, error) { + if ctx == nil { + ctx = context.Background() } - w.setServiceClient(coreId, client) - return protobufs.NewDataIPCServiceClient(client), nil + const ( + initialBackoff = 50 * time.Millisecond + maxBackoff = 5 * time.Second + ) + + backoff := initialBackoff + for { + client, err := grpc.NewClient(target, opts...) + if err == nil { + w.setServiceClient(coreId, client) + return protobufs.NewDataIPCServiceClient(client), nil + } + + w.logger.Info( + "worker dial failed, retrying", + zap.Uint("core_id", coreId), + zap.String("target", target), + zap.Duration("backoff", backoff), + zap.Error(err), + ) + + select { + case <-time.After(backoff): + case <-ctx.Done(): + return nil, errors.Wrap(ctx.Err(), "get ipc of worker") + } + + if backoff < maxBackoff { + backoff *= 2 + if backoff > maxBackoff { + backoff = maxBackoff + } + } + } +} + +func (w *WorkerManager) respawnWorker( + coreId uint, + filter []byte, +) error { + const ( + respawnTimeout = 5 * time.Second + initialBackoff = 50 * time.Millisecond + maxRespawnBackoff = 2 * time.Second + ) + + managerCtx := w.currentContext() + if managerCtx == nil { + managerCtx = context.Background() + } + + backoff := initialBackoff + for { + svc, err := w.getIPCOfWorker(coreId) + if err != nil { + w.logger.Error( + "could not get ipc of worker", + zap.Uint("core_id", coreId), + zap.Error(err), + ) + select { + case <-time.After(backoff): + case <-managerCtx.Done(): + return errors.Wrap(managerCtx.Err(), "respawn worker") + } + continue + } + + ctx, cancel := context.WithTimeout(managerCtx, respawnTimeout) + _, err = svc.Respawn(ctx, &protobufs.RespawnRequest{Filter: filter}) + cancel() + if err == nil { + return nil + } + + w.logger.Warn( + "worker respawn failed, retrying", + zap.Uint("core_id", coreId), + zap.Duration("backoff", backoff), + zap.Error(err), + ) + w.closeServiceClient(coreId) + + select { + case <-time.After(backoff): + case <-managerCtx.Done(): + return errors.Wrap(managerCtx.Err(), "respawn worker") + } + + if backoff < maxRespawnBackoff { + backoff *= 2 + if backoff > maxRespawnBackoff { + backoff = maxRespawnBackoff + } + } + } } func (w *WorkerManager) spawnDataWorkers() { diff --git a/protobufs/dispatch.go b/protobufs/dispatch.go index 966ae88..03624a6 100644 --- a/protobufs/dispatch.go +++ b/protobufs/dispatch.go @@ -80,6 +80,12 @@ func (m *InboxMessage) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &addressLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if addressLen > 64 { + return errors.Wrap( + errors.New("invalid address length"), + "from canonical bytes", + ) + } m.Address = make([]byte, addressLen) if _, err := buf.Read(m.Address); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -95,6 +101,12 @@ func (m *InboxMessage) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &ephemeralKeyLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if ephemeralKeyLen > 57 { + return errors.Wrap( + errors.New("invalid ephemeral key length"), + "from canonical bytes", + ) + } m.EphemeralPublicKey = make([]byte, ephemeralKeyLen) if _, err := buf.Read(m.EphemeralPublicKey); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -105,6 +117,12 @@ func (m *InboxMessage) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &messageLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if messageLen > 5*1024*1024 { + return errors.Wrap( + errors.New("invalid message length"), + "from canonical bytes", + ) + } m.Message = make([]byte, messageLen) if _, err := buf.Read(m.Message); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -224,6 +242,12 @@ func (m *HubAddInboxMessage) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &addressLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if addressLen > 64 { + return errors.Wrap( + errors.New("invalid address length"), + "from canonical bytes", + ) + } m.Address = make([]byte, addressLen) if _, err := buf.Read(m.Address); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -234,6 +258,12 @@ func (m *HubAddInboxMessage) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &inboxKeyLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if inboxKeyLen > 57 { + return errors.Wrap( + errors.New("invalid inbox key length"), + "from canonical bytes", + ) + } m.InboxPublicKey = make([]byte, inboxKeyLen) if _, err := buf.Read(m.InboxPublicKey); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -244,6 +274,12 @@ func (m *HubAddInboxMessage) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &hubKeyLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if hubKeyLen > 57 { + return errors.Wrap( + errors.New("invalid hub key length"), + "from canonical bytes", + ) + } m.HubPublicKey = make([]byte, hubKeyLen) if _, err := buf.Read(m.HubPublicKey); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -254,6 +290,12 @@ func (m *HubAddInboxMessage) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &inboxSignatureLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if inboxSignatureLen > 114 { + return errors.Wrap( + errors.New("invalid inbox signature length"), + "from canonical bytes", + ) + } m.InboxSignature = make([]byte, inboxSignatureLen) if _, err := buf.Read(m.InboxSignature); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -264,6 +306,12 @@ func (m *HubAddInboxMessage) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &hubSignatureLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if hubSignatureLen > 114 { + return errors.Wrap( + errors.New("invalid hub signature length"), + "from canonical bytes", + ) + } m.HubSignature = make([]byte, hubSignatureLen) if _, err := buf.Read(m.HubSignature); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -386,6 +434,12 @@ func (m *HubDeleteInboxMessage) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &addressLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if addressLen > 64 { + return errors.Wrap( + errors.New("invalid address length"), + "from canonical bytes", + ) + } m.Address = make([]byte, addressLen) if _, err := buf.Read(m.Address); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -396,6 +450,12 @@ func (m *HubDeleteInboxMessage) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &inboxKeyLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if inboxKeyLen > 57 { + return errors.Wrap( + errors.New("invalid inbox key length"), + "from canonical bytes", + ) + } m.InboxPublicKey = make([]byte, inboxKeyLen) if _, err := buf.Read(m.InboxPublicKey); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -406,6 +466,12 @@ func (m *HubDeleteInboxMessage) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &hubKeyLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if hubKeyLen > 57 { + return errors.Wrap( + errors.New("invalid hub key length"), + "from canonical bytes", + ) + } m.HubPublicKey = make([]byte, hubKeyLen) if _, err := buf.Read(m.HubPublicKey); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -416,6 +482,12 @@ func (m *HubDeleteInboxMessage) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &inboxSignatureLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if inboxSignatureLen > 114 { + return errors.Wrap( + errors.New("invalid inbox signature length"), + "from canonical bytes", + ) + } m.InboxSignature = make([]byte, inboxSignatureLen) if _, err := buf.Read(m.InboxSignature); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -426,6 +498,12 @@ func (m *HubDeleteInboxMessage) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &hubSignatureLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if hubSignatureLen > 114 { + return errors.Wrap( + errors.New("invalid hub signature length"), + "from canonical bytes", + ) + } m.HubSignature = make([]byte, hubSignatureLen) if _, err := buf.Read(m.HubSignature); err != nil { return errors.Wrap(err, "from canonical bytes") diff --git a/protobufs/dispatch_test.go b/protobufs/dispatch_test.go index d94bcb8..f28fdcc 100644 --- a/protobufs/dispatch_test.go +++ b/protobufs/dispatch_test.go @@ -37,7 +37,7 @@ func TestInboxMessage_Serialization(t *testing.T) { msg: &InboxMessage{ Address: []byte{0xAA, 0xBB, 0xCC}, Timestamp: uint64(time.Now().UnixMilli()), - EphemeralPublicKey: randomBytesDispatch(t, 64), + EphemeralPublicKey: randomBytesDispatch(t, 57), Message: randomBytesDispatch(t, 1024), }, }, diff --git a/protobufs/global.go b/protobufs/global.go index 2610235..d7df3a5 100644 --- a/protobufs/global.go +++ b/protobufs/global.go @@ -312,6 +312,12 @@ func (s *AppShardProposal) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &stateLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if stateLen > 7500000 { + return errors.Wrap( + errors.New("invalid state length"), + "from canonical bytes", + ) + } stateBytes := make([]byte, stateLen) if _, err := buf.Read(stateBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -327,6 +333,12 @@ func (s *AppShardProposal) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &parentQCLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if parentQCLen > 33871 { + return errors.Wrap( + errors.New("invalid quorum certificate length"), + "from canonical bytes", + ) + } parentQCBytes := make([]byte, parentQCLen) if _, err := buf.Read(parentQCBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -344,7 +356,12 @@ func (s *AppShardProposal) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &priorRankTCLen); err != nil { return errors.Wrap(err, "from canonical bytes") } - + if priorRankTCLen > 35194 { + return errors.Wrap( + errors.New("invalid prior rank timeout certificate length"), + "from canonical bytes", + ) + } if priorRankTCLen != 0 { priorRankTCBytes := make([]byte, priorRankTCLen) if _, err := buf.Read(priorRankTCBytes); err != nil { @@ -364,6 +381,12 @@ func (s *AppShardProposal) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &voteLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if voteLen > 770 { + return errors.Wrap( + errors.New("invalid vote length"), + "from canonical bytes", + ) + } voteBytes := make([]byte, voteLen) if _, err := buf.Read(voteBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -504,6 +527,12 @@ func (s *GlobalProposal) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &stateLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if stateLen > 7800000 { + return errors.Wrap( + errors.New("invalid state length"), + "from canonical bytes", + ) + } stateBytes := make([]byte, stateLen) if _, err := buf.Read(stateBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -519,6 +548,12 @@ func (s *GlobalProposal) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &parentQCLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if parentQCLen > 847 { + return errors.Wrap( + errors.New("invalid parent quorum certificate length"), + "from canonical bytes", + ) + } parentQCBytes := make([]byte, parentQCLen) if _, err := buf.Read(parentQCBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -536,7 +571,12 @@ func (s *GlobalProposal) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &priorRankTCLen); err != nil { return errors.Wrap(err, "from canonical bytes") } - + if priorRankTCLen > 2170 { + return errors.Wrap( + errors.New("invalid prior rank timeout certificate length"), + "from canonical bytes", + ) + } if priorRankTCLen != 0 { priorRankTCBytes := make([]byte, priorRankTCLen) if _, err := buf.Read(priorRankTCBytes); err != nil { @@ -556,6 +596,12 @@ func (s *GlobalProposal) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &voteLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if voteLen > 770 { + return errors.Wrap( + errors.New("invalid vote length"), + "from canonical bytes", + ) + } voteBytes := make([]byte, voteLen) if _, err := buf.Read(voteBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -631,11 +677,17 @@ func (s *SeniorityMerge) FromCanonicalBytes(data []byte) error { } // Read signature - var commitmentLen uint32 - if err := binary.Read(buf, binary.BigEndian, &commitmentLen); err != nil { + var signatureLen uint32 + if err := binary.Read(buf, binary.BigEndian, &signatureLen); err != nil { return errors.Wrap(err, "from canonical bytes") } - s.Signature = make([]byte, commitmentLen) + if signatureLen > 114 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } + s.Signature = make([]byte, signatureLen) if _, err := buf.Read(s.Signature); err != nil { return errors.Wrap(err, "from canonical bytes") } @@ -650,6 +702,12 @@ func (s *SeniorityMerge) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &keyLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if keyLen > 585 { + return errors.Wrap( + errors.New("invalid key length"), + "from canonical bytes", + ) + } s.ProverPublicKey = make([]byte, keyLen) if _, err := buf.Read(s.ProverPublicKey); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -718,12 +776,24 @@ func (l *LegacyProverRequest) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigCount); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigCount > 100 { + return errors.Wrap( + errors.New("invalid signature count"), + "from canonical bytes", + ) + } l.PublicKeySignaturesEd448 = make([]*Ed448Signature, sigCount) for i := uint32(0); i < sigCount; i++ { var sigLen uint32 if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen > 187 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } sigBytes := make([]byte, sigLen) if _, err := buf.Read(sigBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -871,6 +941,14 @@ func (p *ProverJoin) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &filtersCount); err != nil { return errors.Wrap(err, "from canonical bytes") } + + if filtersCount > 100 { + return errors.Wrap( + errors.New("invalid filter count"), + "from canonical bytes", + ) + } + p.Filters = make([][]byte, filtersCount) // Read each filter for i := uint32(0); i < filtersCount; i++ { @@ -878,6 +956,12 @@ func (p *ProverJoin) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &filterLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if filterLen > 64 { + return errors.Wrap( + errors.New("invalid filter length"), + "from canonical bytes", + ) + } p.Filters[i] = make([]byte, filterLen) if _, err := buf.Read(p.Filters[i]); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -894,6 +978,12 @@ func (p *ProverJoin) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen > 753 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } if sigLen > 0 { sigBytes := make([]byte, sigLen) if _, err := buf.Read(sigBytes); err != nil { @@ -912,6 +1002,12 @@ func (p *ProverJoin) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &delegateAddressLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if delegateAddressLen > 32 { + return errors.Wrap( + errors.New("invalid delegate address length"), + "from canonical bytes", + ) + } p.DelegateAddress = make([]byte, delegateAddressLen) if _, err := buf.Read(p.DelegateAddress); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -929,6 +1025,12 @@ func (p *ProverJoin) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &targetLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if targetLen > 675 { + return errors.Wrap( + errors.New("invalid merge target length"), + "from canonical bytes", + ) + } targetBytes := make([]byte, targetLen) if _, err := buf.Read(targetBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -946,6 +1048,12 @@ func (p *ProverJoin) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &proofLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if proofLen > 51600 { + return errors.Wrap( + errors.New("invalid proof length"), + "from canonical bytes", + ) + } p.Proof = make([]byte, proofLen) if _, err := buf.Read(p.Proof); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1034,6 +1142,12 @@ func (p *ProverLeave) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &filtersCount); err != nil { return errors.Wrap(err, "from canonical bytes") } + if filtersCount > 100 { + return errors.Wrap( + errors.New("invalid filters count"), + "from canonical byte", + ) + } p.Filters = make([][]byte, filtersCount) // Read each filter for i := uint32(0); i < filtersCount; i++ { @@ -1041,6 +1155,12 @@ func (p *ProverLeave) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &filterLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if filterLen > 64 { + return errors.Wrap( + errors.New("invalid filter length"), + "from canonical bytes", + ) + } p.Filters[i] = make([]byte, filterLen) if _, err := buf.Read(p.Filters[i]); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1057,6 +1177,12 @@ func (p *ProverLeave) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen > 118 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } if sigLen > 0 { sigBytes := make([]byte, sigLen) if _, err := buf.Read(sigBytes); err != nil { @@ -1143,6 +1269,12 @@ func (p *ProverPause) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &filterLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if filterLen > 64 { + return errors.Wrap( + errors.New("invalid filter length"), + "from canonical bytes", + ) + } p.Filter = make([]byte, filterLen) if _, err := buf.Read(p.Filter); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1158,6 +1290,12 @@ func (p *ProverPause) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen > 118 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } if sigLen > 0 { sigBytes := make([]byte, sigLen) if _, err := buf.Read(sigBytes); err != nil { @@ -1244,6 +1382,12 @@ func (p *ProverResume) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &filterLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if filterLen > 64 { + return errors.Wrap( + errors.New("invalid filter length"), + "from canonical bytes", + ) + } p.Filter = make([]byte, filterLen) if _, err := buf.Read(p.Filter); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1259,6 +1403,12 @@ func (p *ProverResume) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen > 118 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } if sigLen > 0 { sigBytes := make([]byte, sigLen) if _, err := buf.Read(sigBytes); err != nil { @@ -1345,6 +1495,12 @@ func (p *ProverConfirm) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &filterLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if filterLen > 64 { + return errors.Wrap( + errors.New("invalid filter length"), + "from canonical bytes", + ) + } p.Filter = make([]byte, filterLen) if _, err := buf.Read(p.Filter); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1360,6 +1516,12 @@ func (p *ProverConfirm) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen > 118 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } if sigLen > 0 { sigBytes := make([]byte, sigLen) if _, err := buf.Read(sigBytes); err != nil { @@ -1446,6 +1608,12 @@ func (p *ProverReject) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &filterLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if filterLen > 64 { + return errors.Wrap( + errors.New("invalid filter length"), + "from canonical bytes", + ) + } p.Filter = make([]byte, filterLen) if _, err := buf.Read(p.Filter); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1461,6 +1629,12 @@ func (p *ProverReject) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen > 118 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } if sigLen > 0 { sigBytes := make([]byte, sigLen) if _, err := buf.Read(sigBytes); err != nil { @@ -1542,6 +1716,12 @@ func (p *ProverUpdate) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &addressLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if addressLen > 32 { + return errors.Wrap( + errors.New("invalid address length"), + "from canonical bytes", + ) + } p.DelegateAddress = make([]byte, addressLen) if _, err := buf.Read(p.DelegateAddress); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1552,6 +1732,12 @@ func (p *ProverUpdate) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen > 118 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } if sigLen > 0 { sigBytes := make([]byte, sigLen) if _, err := buf.Read(sigBytes); err != nil { @@ -2170,6 +2356,12 @@ func (g *GlobalFrameHeader) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &outputLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if outputLen > 516 { + return errors.Wrap( + errors.New("invalid output length"), + "from canonical bytes", + ) + } g.Output = make([]byte, outputLen) if _, err := buf.Read(g.Output); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2180,6 +2372,12 @@ func (g *GlobalFrameHeader) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &parentSelectorLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if parentSelectorLen > 32 { + return errors.Wrap( + errors.New("invalid parent selector length"), + "from canonical bytes", + ) + } g.ParentSelector = make([]byte, parentSelectorLen) if _, err := buf.Read(g.ParentSelector); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2190,12 +2388,24 @@ func (g *GlobalFrameHeader) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &commitmentsCount); err != nil { return errors.Wrap(err, "from canonical bytes") } + if commitmentsCount > 256 { + return errors.Wrap( + errors.New("invalid commitments count"), + "from canonical bytes", + ) + } g.GlobalCommitments = make([][]byte, commitmentsCount) for i := uint32(0); i < commitmentsCount; i++ { var commitmentLen uint32 if err := binary.Read(buf, binary.BigEndian, &commitmentLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if commitmentLen > 74 { + return errors.Wrap( + errors.New("invalid commitment length"), + "from canonical bytes", + ) + } g.GlobalCommitments[i] = make([]byte, commitmentLen) if _, err := buf.Read(g.GlobalCommitments[i]); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2211,6 +2421,12 @@ func (g *GlobalFrameHeader) FromCanonicalBytes(data []byte) error { ); err != nil { return errors.Wrap(err, "from canonical bytes") } + if proverTreeCommitmentLen > 74 { + return errors.Wrap( + errors.New("invalid prover tree commitment length"), + "from canonical bytes", + ) + } g.ProverTreeCommitment = make([]byte, proverTreeCommitmentLen) if _, err := buf.Read(g.ProverTreeCommitment); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2225,6 +2441,12 @@ func (g *GlobalFrameHeader) FromCanonicalBytes(data []byte) error { ); err != nil { return errors.Wrap(err, "from canonical bytes") } + if requestsRootLen > 74 { + return errors.Wrap( + errors.New("invalid requests root length"), + "from canonical bytes", + ) + } g.RequestsRoot = make([]byte, requestsRootLen) if _, err := buf.Read(g.RequestsRoot); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2239,6 +2461,12 @@ func (g *GlobalFrameHeader) FromCanonicalBytes(data []byte) error { ); err != nil { return errors.Wrap(err, "from canonical bytes") } + if proverLen > 32 { + return errors.Wrap( + errors.New("invalid prover length"), + "from canonical bytes", + ) + } g.Prover = make([]byte, proverLen) if _, err := buf.Read(g.Prover); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2249,6 +2477,12 @@ func (g *GlobalFrameHeader) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen > 711 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } if sigLen > 0 { sigBytes := make([]byte, sigLen) if _, err := buf.Read(sigBytes); err != nil { @@ -2423,6 +2657,12 @@ func (f *FrameHeader) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &addressLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if addressLen > 64 { + return errors.Wrap( + errors.New("invalid address length"), + "from canonical bytes", + ) + } f.Address = make([]byte, addressLen) if _, err := buf.Read(f.Address); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2448,6 +2688,12 @@ func (f *FrameHeader) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &outputLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if outputLen != 516 { + return errors.Wrap( + errors.New("invalid output length"), + "from canonical bytes", + ) + } f.Output = make([]byte, outputLen) if _, err := buf.Read(f.Output); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2458,6 +2704,12 @@ func (f *FrameHeader) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &parentSelectorLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if parentSelectorLen > 32 { + return errors.Wrap( + errors.New("invalid selector length"), + "from canonical bytes", + ) + } f.ParentSelector = make([]byte, parentSelectorLen) if _, err := buf.Read(f.ParentSelector); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2468,6 +2720,12 @@ func (f *FrameHeader) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &requestsRootLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if requestsRootLen > 74 { + return errors.Wrap( + errors.New("invalid requests root length"), + "from canonical bytes", + ) + } f.RequestsRoot = make([]byte, requestsRootLen) if _, err := buf.Read(f.RequestsRoot); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2478,12 +2736,24 @@ func (f *FrameHeader) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &stateRootsCount); err != nil { return errors.Wrap(err, "from canonical bytes") } + if stateRootsCount != 4 { + return errors.Wrap( + errors.New("invalid state roots length"), + "from canonical bytes", + ) + } f.StateRoots = make([][]byte, stateRootsCount) for i := uint32(0); i < stateRootsCount; i++ { var rootLen uint32 if err := binary.Read(buf, binary.BigEndian, &rootLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if rootLen > 74 { + return errors.Wrap( + errors.New("invalid state root length"), + "from canonical bytes", + ) + } f.StateRoots[i] = make([]byte, rootLen) if _, err := buf.Read(f.StateRoots[i]); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2495,6 +2765,12 @@ func (f *FrameHeader) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &proverLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if proverLen > 32 { + return errors.Wrap( + errors.New("invalid prover length"), + "from canonical bytes", + ) + } f.Prover = make([]byte, proverLen) if _, err := buf.Read(f.Prover); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2514,6 +2790,12 @@ func (f *FrameHeader) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen > 33735 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } if sigLen > 0 { sigBytes := make([]byte, sigLen) if _, err := buf.Read(sigBytes); err != nil { @@ -2621,6 +2903,12 @@ func (p *ProverLivenessCheck) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &filterLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if filterLen > 64 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } p.Filter = make([]byte, filterLen) if _, err := buf.Read(p.Filter); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2641,6 +2929,12 @@ func (p *ProverLivenessCheck) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &commitmentHashLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if commitmentHashLen > 20000 { + return errors.Wrap( + errors.New("invalid commitment hash length"), + "from canonical bytes", + ) + } p.CommitmentHash = make([]byte, commitmentHashLen) if _, err := buf.Read(p.CommitmentHash); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2651,7 +2945,12 @@ func (p *ProverLivenessCheck) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } - + if sigLen > 118 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } if sigLen == 0 { return errors.Wrap(errors.New("invalid signature"), "from canonical bytes") } @@ -2762,6 +3061,12 @@ func (f *ProposalVote) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &filterLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if filterLen > 64 { + return errors.Wrap( + errors.New("invalid filter length"), + "from canonical bytes", + ) + } f.Filter = make([]byte, filterLen) if _, err := buf.Read(f.Filter); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2782,6 +3087,12 @@ func (f *ProposalVote) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &selectorLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if selectorLen > 32 { + return errors.Wrap( + errors.New("invalid selector length"), + "from canonical bytes", + ) + } f.Selector = make([]byte, selectorLen) if _, err := buf.Read(f.Selector); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2797,6 +3108,12 @@ func (f *ProposalVote) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen > 634 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } if sigLen > 0 { sigBytes := make([]byte, sigLen) if _, err := buf.Read(sigBytes); err != nil { @@ -2918,6 +3235,12 @@ func (f *TimeoutState) FromCanonicalBytes(data []byte) error { ); err != nil { return errors.Wrap(err, "from canonical bytes") } + if latestQuorumCertLen > 33871 { + return errors.Wrap( + errors.New("invalid latest quorum certificate length"), + "from canonical bytes", + ) + } if latestQuorumCertLen > 0 { latestQuorumCertBytes := make([]byte, latestQuorumCertLen) if _, err := buf.Read(latestQuorumCertBytes); err != nil { @@ -2940,6 +3263,12 @@ func (f *TimeoutState) FromCanonicalBytes(data []byte) error { ); err != nil { return errors.Wrap(err, "from canonical bytes") } + if priorRankTimeoutCertLen > 35194 { + return errors.Wrap( + errors.New("invalid prior rank timeout certificate length"), + "from canonical bytes", + ) + } if priorRankTimeoutCertLen > 0 { priorRankTimeoutBytes := make([]byte, priorRankTimeoutCertLen) if _, err := buf.Read(priorRankTimeoutBytes); err != nil { @@ -2958,6 +3287,12 @@ func (f *TimeoutState) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &voteLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if voteLen > 770 { + return errors.Wrap( + errors.New("invalid vote length"), + "from canonical bytes", + ) + } if voteLen > 0 { voteBytes := make([]byte, voteLen) if _, err := buf.Read(voteBytes); err != nil { @@ -3077,6 +3412,12 @@ func (f *QuorumCertificate) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &filterLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if filterLen > 64 { + return errors.Wrap( + errors.New("invalid filter length"), + "from canonical bytes", + ) + } f.Filter = make([]byte, filterLen) if _, err := buf.Read(f.Filter); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -3097,6 +3438,12 @@ func (f *QuorumCertificate) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &selectorLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if selectorLen > 32 { + return errors.Wrap( + errors.New("invalid selector length"), + "from canonical bytes", + ) + } f.Selector = make([]byte, selectorLen) if _, err := buf.Read(f.Selector); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -3112,6 +3459,12 @@ func (f *QuorumCertificate) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen > 33735 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } if sigLen > 0 { sigBytes := make([]byte, sigLen) if _, err := buf.Read(sigBytes); err != nil { @@ -3241,6 +3594,12 @@ func (t *TimeoutCertificate) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &filterLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if filterLen > 64 { + return errors.Wrap( + errors.New("invalid filter length"), + "from canonical bytes", + ) + } t.Filter = make([]byte, filterLen) if _, err := buf.Read(t.Filter); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -3256,6 +3615,12 @@ func (t *TimeoutCertificate) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &latestRanksCount); err != nil { return errors.Wrap(err, "from canonical bytes") } + if latestRanksCount > 64 { + return errors.Wrap( + errors.New("invalid latest ranks count"), + "from canonical bytes", + ) + } t.LatestRanks = make([]uint64, latestRanksCount) if err := binary.Read(buf, binary.BigEndian, &t.LatestRanks); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -3270,6 +3635,12 @@ func (t *TimeoutCertificate) FromCanonicalBytes(data []byte) error { ); err != nil { return errors.Wrap(err, "from canonical bytes") } + if latestQuorumCertLen > 33871 { + return errors.Wrap( + errors.New("invalid latest quorum certificate length"), + "from canonical bytes", + ) + } if latestQuorumCertLen > 0 { latestQuorumCertBytes := make([]byte, latestQuorumCertLen) if _, err := buf.Read(latestQuorumCertBytes); err != nil { @@ -3293,6 +3664,12 @@ func (t *TimeoutCertificate) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen > 711 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } if sigLen > 0 { sigBytes := make([]byte, sigLen) if _, err := buf.Read(sigBytes); err != nil { @@ -3385,6 +3762,12 @@ func (g *GlobalFrame) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &headerLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if headerLen > 21467 { + return errors.Wrap( + errors.New("invalid header length"), + "from canonical bytes", + ) + } if headerLen > 0 { headerBytes := make([]byte, headerLen) if _, err := buf.Read(headerBytes); err != nil { @@ -3401,12 +3784,24 @@ func (g *GlobalFrame) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &requestsCount); err != nil { return errors.Wrap(err, "from canonical bytes") } + if requestsCount > 100 { + return errors.Wrap( + errors.New("invalid requests count"), + "from canonical bytes", + ) + } g.Requests = make([]*MessageBundle, requestsCount) for i := uint32(0); i < requestsCount; i++ { var requestLen uint32 if err := binary.Read(buf, binary.BigEndian, &requestLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if requestLen > 75000 { + return errors.Wrap( + errors.New("invalid request length"), + "from canonical bytes", + ) + } requestBytes := make([]byte, requestLen) if _, err := buf.Read(requestBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -3498,6 +3893,12 @@ func (a *AppShardFrame) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &headerLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if headerLen > 34829 { + return errors.Wrap( + errors.New("invalid header length"), + "from canonical bytes", + ) + } if headerLen > 0 { headerBytes := make([]byte, headerLen) if _, err := buf.Read(headerBytes); err != nil { @@ -3514,12 +3915,24 @@ func (a *AppShardFrame) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &requestsCount); err != nil { return errors.Wrap(err, "from canonical bytes") } + if requestsCount > 100 { + return errors.Wrap( + errors.New("invalid requests length"), + "from canonical bytes", + ) + } a.Requests = make([]*MessageBundle, requestsCount) for i := uint32(0); i < requestsCount; i++ { var requestLen uint32 if err := binary.Read(buf, binary.BigEndian, &requestLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if requestLen > 74000 { + return errors.Wrap( + errors.New("invalid request size"), + "from canonical bytes", + ) + } requestBytes := make([]byte, requestLen) if _, err := buf.Read(requestBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -3589,6 +4002,12 @@ func (m *Multiproof) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &commitLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if commitLen != 74 { + return errors.Wrap( + errors.New("invalid multicommitment length"), + "from canonical bytes", + ) + } m.Multicommitment = make([]byte, commitLen) if _, err := buf.Read(m.Multicommitment); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -3599,6 +4018,12 @@ func (m *Multiproof) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &proofLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if proofLen != 74 { + return errors.Wrap( + errors.New("invalid proof length"), + "from canonical bytes", + ) + } m.Proof = make([]byte, proofLen) if _, err := buf.Read(m.Proof); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -3654,6 +4079,12 @@ func (p *Path) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &indicesCount); err != nil { return errors.Wrap(err, "from canonical bytes") } + if indicesCount > 64 { + return errors.Wrap( + errors.New("invalid indices count"), + "from canonical bytes", + ) + } p.Indices = make([]uint64, indicesCount) // Read each index for i := uint32(0); i < indicesCount; i++ { @@ -3767,6 +4198,12 @@ func (t *TraversalSubProof) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &commitsCount); err != nil { return errors.Wrap(err, "from canonical bytes") } + if commitsCount > 64 { + return errors.Wrap( + errors.New("invalid commits length"), + "from canonical bytes", + ) + } t.Commits = make([][]byte, commitsCount) // Read each commit for i := uint32(0); i < commitsCount; i++ { @@ -3774,6 +4211,12 @@ func (t *TraversalSubProof) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &commitLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if commitLen > 74 { + return errors.Wrap( + errors.New("invalid commitment length"), + "from canonical bytes", + ) + } t.Commits[i] = make([]byte, commitLen) if _, err := buf.Read(t.Commits[i]); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -3785,6 +4228,12 @@ func (t *TraversalSubProof) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &ysCount); err != nil { return errors.Wrap(err, "from canonical bytes") } + if ysCount > 64 { + return errors.Wrap( + errors.New("invalid multicommitment length"), + "from canonical bytes", + ) + } t.Ys = make([][]byte, ysCount) // Read each y for i := uint32(0); i < ysCount; i++ { @@ -3792,6 +4241,14 @@ func (t *TraversalSubProof) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &yLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + // Not a normal length, but we're accounting for unusual scenarios, the + // parent caller will be more limiting + if yLen > 2000 { + return errors.Wrap( + errors.New("invalid y length"), + "from canonical bytes", + ) + } t.Ys[i] = make([]byte, yLen) if _, err := buf.Read(t.Ys[i]); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -3803,6 +4260,12 @@ func (t *TraversalSubProof) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &pathsCount); err != nil { return errors.Wrap(err, "from canonical bytes") } + if pathsCount > 64 { + return errors.Wrap( + errors.New("invalid paths count"), + "from canonical bytes", + ) + } t.Paths = make([]*Path, pathsCount) // Read each path for i := uint32(0); i < pathsCount; i++ { @@ -3810,6 +4273,12 @@ func (t *TraversalSubProof) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &pathLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if pathLen > 4104 { + return errors.Wrap( + errors.New("invalid path length"), + "from canonical bytes", + ) + } pathBytes := make([]byte, pathLen) if _, err := buf.Read(pathBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -3903,6 +4372,12 @@ func (t *TraversalProof) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &multiproofLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if multiproofLen > 160 { + return errors.Wrap( + errors.New("invalid multiproof length"), + "from canonical bytes", + ) + } if multiproofLen > 0 { multiproofBytes := make([]byte, multiproofLen) if _, err := buf.Read(multiproofBytes); err != nil { @@ -3919,6 +4394,12 @@ func (t *TraversalProof) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &subProofsCount); err != nil { return errors.Wrap(err, "from canonical bytes") } + if subProofsCount > 100 { + return errors.Wrap( + errors.New("invalid subproofs count"), + "from canonical bytes", + ) + } t.SubProofs = make([]*TraversalSubProof, subProofsCount) // Read each sub proof for i := uint32(0); i < subProofsCount; i++ { @@ -3926,6 +4407,12 @@ func (t *TraversalProof) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &subProofLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if subProofLen > 43000 { + return errors.Wrap( + errors.New("invalid subproof length"), + "from canonical bytes", + ) + } subProofBytes := make([]byte, subProofLen) if _, err := buf.Read(subProofBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -4063,6 +4550,12 @@ func (p *ProverKick) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &keyLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if keyLen != 585 { + return errors.Wrap( + errors.New("invalid key length"), + "from canonical bytes", + ) + } p.KickedProverPublicKey = make([]byte, keyLen) if _, err := buf.Read(p.KickedProverPublicKey); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -4073,6 +4566,12 @@ func (p *ProverKick) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &frame1Len); err != nil { return errors.Wrap(err, "from canonical bytes") } + if frame1Len > 34825 { + return errors.Wrap( + errors.New("invalid frame1 length"), + "from canonical bytes", + ) + } p.ConflictingFrame_1 = make([]byte, frame1Len) if _, err := buf.Read(p.ConflictingFrame_1); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -4083,6 +4582,12 @@ func (p *ProverKick) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &frame2Len); err != nil { return errors.Wrap(err, "from canonical bytes") } + if frame2Len > 34825 { + return errors.Wrap( + errors.New("invalid frame1 length"), + "from canonical bytes", + ) + } p.ConflictingFrame_2 = make([]byte, frame2Len) if _, err := buf.Read(p.ConflictingFrame_2); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -4093,6 +4598,12 @@ func (p *ProverKick) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &commitmentLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if commitmentLen > 74 { + return errors.Wrap( + errors.New("invalid commitment length"), + "from canonical bytes", + ) + } p.Commitment = make([]byte, commitmentLen) if _, err := buf.Read(p.Commitment); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -4103,6 +4614,12 @@ func (p *ProverKick) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &proofLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if proofLen > 160 { + return errors.Wrap( + errors.New("invalid proof length"), + "from canonical bytes", + ) + } p.Proof = make([]byte, proofLen) if _, err := buf.Read(p.Proof); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -4113,6 +4630,12 @@ func (p *ProverKick) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &traversalLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if traversalLen > 4000 { + return errors.Wrap( + errors.New("invalid traversal proof length"), + "from canonical bytes", + ) + } if traversalLen > 0 { traversalBytes := make([]byte, traversalLen) if _, err := buf.Read(traversalBytes); err != nil { @@ -4183,6 +4706,12 @@ func (g *GlobalAlert) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &msgLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if msgLen > 1000 { + return errors.Wrap( + errors.New("invalid message length"), + "from canonical bytes", + ) + } msgBytes := make([]byte, msgLen) if _, err := buf.Read(msgBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -4194,6 +4723,12 @@ func (g *GlobalAlert) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen > 114 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } g.Signature = make([]byte, sigLen) if _, err := buf.Read(g.Signature); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -4335,6 +4870,9 @@ func (t *ProverJoin) Validate() error { if len(t.Filters) == 0 { return errors.Wrap(errors.New("no filters provided"), "validate") } + if len(t.Filters) > 100 { + return errors.Wrap(errors.New("too many filters provided"), "validate") + } for _, filter := range t.Filters { if len(filter) < 32 || len(filter) > 64 { return errors.Wrap(errors.New("invalid filter"), "validate") diff --git a/protobufs/global_test.go b/protobufs/global_test.go index 7d2b6b1..9472747 100644 --- a/protobufs/global_test.go +++ b/protobufs/global_test.go @@ -165,14 +165,19 @@ func TestFrameHeader_Serialization(t *testing.T) { { name: "complete frame header", header: &FrameHeader{ - Address: make([]byte, 32), - FrameNumber: 99999, - Timestamp: 1234567890123, - Difficulty: 1000000, - Output: make([]byte, 516), // VDF output: 258 + 258 bytes - ParentSelector: make([]byte, 32), - RequestsRoot: make([]byte, 32), - StateRoots: [][]byte{make([]byte, 32), make([]byte, 32)}, + Address: make([]byte, 64), + FrameNumber: 99999, + Timestamp: 1234567890123, + Difficulty: 1000000, + Output: make([]byte, 516), // VDF output: 258 + 258 bytes + ParentSelector: make([]byte, 32), + RequestsRoot: make([]byte, 74), + StateRoots: [][]byte{ + make([]byte, 74), + make([]byte, 74), + make([]byte, 74), + make([]byte, 74), + }, Prover: make([]byte, 32), FeeMultiplierVote: 500, PublicKeySignatureBls48581: &BLS48581AggregateSignature{ @@ -184,22 +189,6 @@ func TestFrameHeader_Serialization(t *testing.T) { }, }, }, - { - name: "minimal frame header", - header: &FrameHeader{ - Address: []byte{}, - FrameNumber: 0, - Timestamp: 0, - Difficulty: 0, - Output: []byte{}, - ParentSelector: []byte{}, - RequestsRoot: []byte{}, - StateRoots: [][]byte{}, - Prover: []byte{}, - FeeMultiplierVote: 0, - PublicKeySignatureBls48581: nil, - }, - }, } for _, tt := range tests { @@ -626,15 +615,15 @@ func TestProverKick_Serialization(t *testing.T) { name: "complete prover kick", kick: &ProverKick{ FrameNumber: 66666, - KickedProverPublicKey: make([]byte, 57), // Ed448 public key + KickedProverPublicKey: make([]byte, 585), // BLS48-581 public key ConflictingFrame_1: make([]byte, 32), ConflictingFrame_2: make([]byte, 32), Commitment: make([]byte, 32), - Proof: make([]byte, 128), + Proof: make([]byte, 160), TraversalProof: &TraversalProof{ Multiproof: &Multiproof{ - Multicommitment: make([]byte, 32), - Proof: make([]byte, 64), + Multicommitment: make([]byte, 74), + Proof: make([]byte, 74), }, SubProofs: []*TraversalSubProof{ { @@ -646,18 +635,6 @@ func TestProverKick_Serialization(t *testing.T) { }, }, }, - { - name: "minimal prover kick", - kick: &ProverKick{ - FrameNumber: 0, - KickedProverPublicKey: []byte{}, - ConflictingFrame_1: []byte{}, - ConflictingFrame_2: []byte{}, - Commitment: []byte{}, - Proof: []byte{}, - TraversalProof: nil, - }, - }, } for _, tt := range tests { @@ -1023,14 +1000,19 @@ func TestAppShardFrame_Serialization(t *testing.T) { name: "complete app shard frame", frame: &AppShardFrame{ Header: &FrameHeader{ - Address: make([]byte, 32), - FrameNumber: 67890, - Timestamp: 1234567890123, - Difficulty: 500000, - Output: make([]byte, 516), - ParentSelector: make([]byte, 32), - RequestsRoot: make([]byte, 32), - StateRoots: [][]byte{make([]byte, 32), make([]byte, 32)}, + Address: make([]byte, 32), + FrameNumber: 67890, + Timestamp: 1234567890123, + Difficulty: 500000, + Output: make([]byte, 516), + ParentSelector: make([]byte, 32), + RequestsRoot: make([]byte, 32), + StateRoots: [][]byte{ + make([]byte, 74), + make([]byte, 74), + make([]byte, 74), + make([]byte, 74), + }, Prover: make([]byte, 32), FeeMultiplierVote: 250, PublicKeySignatureBls48581: &BLS48581AggregateSignature{ @@ -1071,13 +1053,6 @@ func TestAppShardFrame_Serialization(t *testing.T) { }, }, }, - { - name: "minimal app shard frame", - frame: &AppShardFrame{ - Header: nil, - Requests: []*MessageBundle{}, - }, - }, } for _, tt := range tests { @@ -1216,22 +1191,15 @@ func TestMultiproof_Serialization(t *testing.T) { { name: "complete multiproof", multiproof: &Multiproof{ - Multicommitment: make([]byte, 32), - Proof: make([]byte, 256), + Multicommitment: make([]byte, 74), + Proof: make([]byte, 74), }, }, { name: "multiproof with different sizes", multiproof: &Multiproof{ - Multicommitment: append([]byte{0xAA}, make([]byte, 31)...), - Proof: append([]byte{0xBB}, make([]byte, 255)...), - }, - }, - { - name: "minimal multiproof", - multiproof: &Multiproof{ - Multicommitment: []byte{}, - Proof: []byte{}, + Multicommitment: append([]byte{0xAA}, make([]byte, 73)...), + Proof: append([]byte{0xBB}, make([]byte, 73)...), }, }, } @@ -1359,33 +1327,23 @@ func TestTraversalProof_Serialization(t *testing.T) { name: "complete traversal proof", proof: &TraversalProof{ Multiproof: &Multiproof{ - Multicommitment: make([]byte, 32), - Proof: make([]byte, 128), + Multicommitment: make([]byte, 74), + Proof: make([]byte, 74), }, SubProofs: []*TraversalSubProof{ { - Commits: [][]byte{make([]byte, 32)}, + Commits: [][]byte{make([]byte, 74)}, Ys: [][]byte{make([]byte, 48)}, Paths: []*Path{{Indices: []uint64{1, 2}}}, }, { - Commits: [][]byte{make([]byte, 32), make([]byte, 32)}, + Commits: [][]byte{make([]byte, 74), make([]byte, 74)}, Ys: [][]byte{make([]byte, 48), make([]byte, 48)}, Paths: []*Path{{Indices: []uint64{3, 4, 5}}}, }, }, }, }, - { - name: "minimal traversal proof", - proof: &TraversalProof{ - Multiproof: &Multiproof{ - Multicommitment: []byte{}, - Proof: []byte{}, - }, - SubProofs: []*TraversalSubProof{}, - }, - }, } for _, tt := range tests { diff --git a/protobufs/keys.go b/protobufs/keys.go index a72a9ff..ff5c73b 100644 --- a/protobufs/keys.go +++ b/protobufs/keys.go @@ -87,7 +87,7 @@ func (s *SignedX448Key) Validate() error { } // Parent key address should be non-zero bytes - if len(s.ParentKeyAddress) == 0 { + if len(s.ParentKeyAddress) == 0 || len(s.ParentKeyAddress) > 64 { return errors.Wrap( errors.New("invalid parent key address length"), "validate", @@ -173,7 +173,7 @@ func (s *SignedDecaf448Key) Validate() error { } // Parent key address should be non-zero bytes - if len(s.ParentKeyAddress) == 0 { + if len(s.ParentKeyAddress) == 0 || len(s.ParentKeyAddress) > 64 { return errors.Wrap( errors.New("invalid parent key address length"), "validate", @@ -245,8 +245,12 @@ func (k *KeyCollection) Validate() error { } // KeyPurpose should not be empty - if k.KeyPurpose == "" { - return errors.Wrap(errors.New("empty key purpose"), "validate") + if k.KeyPurpose == "" || len(k.KeyPurpose) > 32 { + return errors.Wrap(errors.New("invalid key purpose length"), "validate") + } + + if len(k.X448Keys) > 20 { + return errors.Wrap(errors.New("invalid key collection length"), "validate") } // Validate all x448 keys @@ -256,6 +260,10 @@ func (k *KeyCollection) Validate() error { } } + if len(k.Decaf448Keys) > 20 { + return errors.Wrap(errors.New("invalid key collection length"), "validate") + } + // Validate all decaf448 keys for i, key := range k.Decaf448Keys { if err := key.Validate(); err != nil { @@ -274,6 +282,10 @@ func (k *KeyRegistry) Validate() error { ) } + if len(k.KeysByPurpose) > 20 { + return errors.Wrap(errors.New("invalid purpose set length"), "validate") + } + // Validate keys by purpose map for purpose, collection := range k.KeysByPurpose { if err := collection.Validate(); err != nil { @@ -389,8 +401,11 @@ func (s *SignedX448Key) Verify( return errors.Wrap(errors.New("invalid length for key"), "verify") } - if len(s.ParentKeyAddress) == 0 { - return errors.Wrap(errors.New("parent key address required"), "verify") + if len(s.ParentKeyAddress) == 0 || len(s.ParentKeyAddress) > 64 { + return errors.Wrap( + errors.New("invalid parent key address length"), + "verify", + ) } // Verify signature and check that parent key address matches @@ -515,8 +530,11 @@ func (s *SignedDecaf448Key) Verify( return errors.Wrap(errors.New("invalid length for key"), "verify") } - if len(s.ParentKeyAddress) == 0 { - return errors.Wrap(errors.New("parent key address required"), "verify") + if len(s.ParentKeyAddress) == 0 || len(s.ParentKeyAddress) > 64 { + return errors.Wrap( + errors.New("invalid parent key address length"), + "verify", + ) } // Verify signature and check that parent key address matches @@ -746,6 +764,9 @@ func (e *Ed448Signature) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &keyLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if keyLen > 61 { + return errors.Wrap(errors.New("invalid key length"), "from canonical bytes") + } if keyLen > 0 { keyBytes := make([]byte, keyLen) if _, err := buf.Read(keyBytes); err != nil { @@ -762,6 +783,12 @@ func (e *Ed448Signature) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen > 114 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } e.Signature = make([]byte, sigLen) if _, err := buf.Read(e.Signature); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -893,6 +920,9 @@ func (b *BLS48581Signature) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &keyLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if keyLen > 589 { + return errors.Wrap(errors.New("invalid key length"), "from canonical bytes") + } if keyLen > 0 { keyBytes := make([]byte, keyLen) if _, err := buf.Read(keyBytes); err != nil { @@ -909,6 +939,12 @@ func (b *BLS48581Signature) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen > 74 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } b.Signature = make([]byte, sigLen) if _, err := buf.Read(b.Signature); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1007,6 +1043,12 @@ func (b *BLS48581SignatureWithProofOfPossession) FromCanonicalBytes( if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen != 74 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } b.Signature = make([]byte, sigLen) if _, err := buf.Read(b.Signature); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1018,6 +1060,12 @@ func (b *BLS48581SignatureWithProofOfPossession) FromCanonicalBytes( return errors.Wrap(err, "from canonical bytes") } if pubKeyLen > 0 { + if pubKeyLen != 589 { + return errors.Wrap( + errors.New("invalid pubkey length"), + "from canonical bytes", + ) + } pubKeyBytes := make([]byte, pubKeyLen) if _, err := buf.Read(pubKeyBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1033,6 +1081,12 @@ func (b *BLS48581SignatureWithProofOfPossession) FromCanonicalBytes( if err := binary.Read(buf, binary.BigEndian, &popSigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if popSigLen != 74 { + return errors.Wrap( + errors.New("invalid pop length"), + "from canonical bytes", + ) + } b.PopSignature = make([]byte, popSigLen) if _, err := buf.Read(b.PopSignature); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1103,6 +1157,12 @@ func (b *BLS48581AddressedSignature) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen != 74 && sigLen != (74+516) { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } b.Signature = make([]byte, sigLen) if _, err := buf.Read(b.Signature); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1113,6 +1173,12 @@ func (b *BLS48581AddressedSignature) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &addrLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if addrLen != 32 { + return errors.Wrap( + errors.New("invalid address length"), + "from canonical bytes", + ) + } b.Address = make([]byte, addrLen) if _, err := buf.Read(b.Address); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1205,6 +1271,12 @@ func (b *BLS48581AggregateSignature) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen != 74 && (sigLen > 74+(516*64) || ((sigLen-74)%516) != 0) { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } b.Signature = make([]byte, sigLen) if _, err := buf.Read(b.Signature); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1215,6 +1287,12 @@ func (b *BLS48581AggregateSignature) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &pubKeyLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if pubKeyLen != 0 && pubKeyLen != 589 { + return errors.Wrap( + errors.New("invalid pubkey length"), + "from canonical bytes", + ) + } if pubKeyLen > 0 { pubKeyBytes := make([]byte, pubKeyLen) if _, err := buf.Read(pubKeyBytes); err != nil { @@ -1231,6 +1309,12 @@ func (b *BLS48581AggregateSignature) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &bitmaskLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if bitmaskLen > 32 { + return errors.Wrap( + errors.New("invalid bitmask length"), + "from canonical bytes", + ) + } b.Bitmask = make([]byte, bitmaskLen) if _, err := buf.Read(b.Bitmask); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1414,6 +1498,12 @@ func (d *Decaf448Signature) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &keyLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if keyLen != 60 && keyLen != 0 { + return errors.Wrap( + errors.New("invalid pubkey length"), + "from canonical bytes", + ) + } if keyLen > 0 { keyBytes := make([]byte, keyLen) if _, err := buf.Read(keyBytes); err != nil { @@ -1430,6 +1520,12 @@ func (d *Decaf448Signature) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if sigLen > 336 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } d.Signature = make([]byte, sigLen) if _, err := buf.Read(d.Signature); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1598,6 +1694,12 @@ func (s *SignedX448Key) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &keyLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if keyLen > 61 { + return errors.Wrap( + errors.New("invalid pubkey length"), + "from canonical bytes", + ) + } if keyLen > 0 { keyBytes := make([]byte, keyLen) if _, err := buf.Read(keyBytes); err != nil { @@ -1618,6 +1720,12 @@ func (s *SignedX448Key) FromCanonicalBytes(data []byte) error { ); err != nil { return errors.Wrap(err, "from canonical bytes") } + if parentKeyAddressLen > 64 { + return errors.Wrap( + errors.New("invalid address length"), + "from canonical bytes", + ) + } s.ParentKeyAddress = make([]byte, parentKeyAddressLen) if _, err := buf.Read(s.ParentKeyAddress); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1635,6 +1743,13 @@ func (s *SignedX448Key) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + // largest possible signature size + if sigLen > 675 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } sigBytes := make([]byte, sigLen) if _, err := buf.Read(sigBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1679,6 +1794,12 @@ func (s *SignedX448Key) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &purposeLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if purposeLen > 32 { + return errors.Wrap( + errors.New("invalid purpose length"), + "from canonical bytes", + ) + } purposeBytes := make([]byte, purposeLen) if _, err := buf.Read(purposeBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1848,6 +1969,12 @@ func (s *SignedDecaf448Key) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &keyLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if keyLen > 60 { + return errors.Wrap( + errors.New("invalid pubkey length"), + "from canonical bytes", + ) + } if keyLen > 0 { keyBytes := make([]byte, keyLen) if _, err := buf.Read(keyBytes); err != nil { @@ -1868,6 +1995,12 @@ func (s *SignedDecaf448Key) FromCanonicalBytes(data []byte) error { ); err != nil { return errors.Wrap(err, "from canonical bytes") } + if parentKeyAddressLen > 64 { + return errors.Wrap( + errors.New("invalid address length"), + "from canonical bytes", + ) + } s.ParentKeyAddress = make([]byte, parentKeyAddressLen) if _, err := buf.Read(s.ParentKeyAddress); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1885,6 +2018,13 @@ func (s *SignedDecaf448Key) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + // longest possible signature length + if sigLen > 675 { + return errors.Wrap( + errors.New("invalid signature length"), + "from canonical bytes", + ) + } sigBytes := make([]byte, sigLen) if _, err := buf.Read(sigBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -1929,6 +2069,12 @@ func (s *SignedDecaf448Key) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &purposeLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if purposeLen > 32 { + return errors.Wrap( + errors.New("invalid purpose length"), + "from canonical bytes", + ) + } purposeBytes := make([]byte, purposeLen) if _, err := buf.Read(purposeBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2113,7 +2259,13 @@ func (b *BLS48581AggregateSignature) Validate() error { } } - // Bitmask can be variable length + // Bitmask can be variable length, but should not exceed 32 + if len(b.Bitmask) > 32 { + return errors.Wrap( + errors.New("invalid bitmask length"), + "validate", + ) + } return nil } @@ -2316,6 +2468,12 @@ func (k *KeyCollection) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &purposeLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if purposeLen > 32 { + return errors.Wrap( + errors.New("invalid purpose length"), + "from canonical bytes", + ) + } purposeBytes := make([]byte, purposeLen) if _, err := buf.Read(purposeBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2327,7 +2485,12 @@ func (k *KeyCollection) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &x448KeysCount); err != nil { return errors.Wrap(err, "from canonical bytes") } - + if x448KeysCount > 20 { + return errors.Wrap( + errors.New("invalid x448 keys length"), + "from canonical bytes", + ) + } // Read each key k.X448Keys = make([]*SignedX448Key, x448KeysCount) for i := uint32(0); i < x448KeysCount; i++ { @@ -2335,6 +2498,12 @@ func (k *KeyCollection) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &keyLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if keyLen > 869 { + return errors.Wrap( + errors.New("invalid key length"), + "from canonical bytes", + ) + } keyBytes := make([]byte, keyLen) if _, err := buf.Read(keyBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2350,7 +2519,12 @@ func (k *KeyCollection) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &decaf448KeysCount); err != nil { return errors.Wrap(err, "from canonical bytes") } - + if decaf448KeysCount > 20 { + return errors.Wrap( + errors.New("invalid decaf448 keys length"), + "from canonical bytes", + ) + } // Read each key k.Decaf448Keys = make([]*SignedDecaf448Key, decaf448KeysCount) for i := uint32(0); i < decaf448KeysCount; i++ { @@ -2358,6 +2532,12 @@ func (k *KeyCollection) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &keyLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if keyLen > 869 { + return errors.Wrap( + errors.New("invalid key length"), + "from canonical bytes", + ) + } keyBytes := make([]byte, keyLen) if _, err := buf.Read(keyBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2542,6 +2722,12 @@ func (k *KeyRegistry) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &identityKeyLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if identityKeyLen > 61 { + return errors.Wrap( + errors.New("invalid identity key length"), + "from canonical bytes", + ) + } if identityKeyLen > 0 { keyBytes := make([]byte, identityKeyLen) if _, err := buf.Read(keyBytes); err != nil { @@ -2558,6 +2744,12 @@ func (k *KeyRegistry) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &proverKeyLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if proverKeyLen > 589 { + return errors.Wrap( + errors.New("invalid prover key length"), + "from canonical bytes", + ) + } if proverKeyLen > 0 { keyBytes := make([]byte, proverKeyLen) if _, err := buf.Read(keyBytes); err != nil { @@ -2578,6 +2770,12 @@ func (k *KeyRegistry) FromCanonicalBytes(data []byte) error { ); err != nil { return errors.Wrap(err, "from canonical bytes") } + if identityToProverLen > 187 { + return errors.Wrap( + errors.New("invalid key length"), + "from canonical bytes", + ) + } if identityToProverLen > 0 { sigBytes := make([]byte, identityToProverLen) if _, err := buf.Read(sigBytes); err != nil { @@ -2598,6 +2796,12 @@ func (k *KeyRegistry) FromCanonicalBytes(data []byte) error { ); err != nil { return errors.Wrap(err, "from canonical bytes") } + if proverToIdentityLen > 675 { + return errors.Wrap( + errors.New("invalid key length"), + "from canonical bytes", + ) + } if proverToIdentityLen > 0 { sigBytes := make([]byte, proverToIdentityLen) if _, err := buf.Read(sigBytes); err != nil { @@ -2614,6 +2818,12 @@ func (k *KeyRegistry) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &mapCount); err != nil { return errors.Wrap(err, "from canonical bytes") } + if mapCount > 20 { + return errors.Wrap( + errors.New("invalid key map length"), + "from canonical bytes", + ) + } // Read each key collection in the map k.KeysByPurpose = make(map[string]*KeyCollection) @@ -2623,6 +2833,12 @@ func (k *KeyRegistry) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &purposeLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if purposeLen > 32 { + return errors.Wrap( + errors.New("invalid purpose length"), + "from canonical bytes", + ) + } purposeBytes := make([]byte, purposeLen) if _, err := buf.Read(purposeBytes); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2634,6 +2850,12 @@ func (k *KeyRegistry) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &collectionLen); err != nil { return errors.Wrap(err, "from canonical bytes") } + if collectionLen > 27604 { + return errors.Wrap( + errors.New("invalid collection length"), + "from canonical bytes", + ) + } collectionBytes := make([]byte, collectionLen) if _, err := buf.Read(collectionBytes); err != nil { return errors.Wrap(err, "from canonical bytes") diff --git a/protobufs/keys_test.go b/protobufs/keys_test.go index bbbe2c8..a0e7023 100644 --- a/protobufs/keys_test.go +++ b/protobufs/keys_test.go @@ -29,13 +29,6 @@ func TestBLS48581SignatureWithProofOfPossession_Serialization(t *testing.T) { PopSignature: make([]byte, 74), }, }, - { - name: "empty fields", - sig: &BLS48581SignatureWithProofOfPossession{ - Signature: []byte{}, - PopSignature: []byte{}, - }, - }, } for _, tt := range tests { @@ -76,17 +69,6 @@ func TestBLS48581AddressedSignature_Serialization(t *testing.T) { Address: make([]byte, 32), }, }, - { - name: "empty fields", - sig: &BLS48581AddressedSignature{ - Signature: []byte{}, - Address: []byte{}, - }, - }, - { - name: "nil fields", - sig: &BLS48581AddressedSignature{}, - }, } for _, tt := range tests { @@ -130,13 +112,6 @@ func TestBLS48581AggregateSignature_Serialization(t *testing.T) { Bitmask: []byte{0x00, 0x00, 0x00, 0x00}, }, }, - { - name: "empty fields", - sig: &BLS48581AggregateSignature{ - Signature: []byte{}, - Bitmask: []byte{}, - }, - }, } for _, tt := range tests { @@ -384,12 +359,6 @@ func TestDecaf448Signature_Serialization(t *testing.T) { Signature: make([]byte, 112), }, }, - { - name: "empty fields", - sig: &Decaf448Signature{ - Signature: []byte{}, - }, - }, } for _, tt := range tests { diff --git a/protobufs/node.pb.go b/protobufs/node.pb.go index 23cd246..c7676ce 100644 --- a/protobufs/node.pb.go +++ b/protobufs/node.pb.go @@ -371,6 +371,7 @@ type NodeInfoResponse struct { PeerSeniority []byte `protobuf:"bytes,4,opt,name=peer_seniority,json=peerSeniority,proto3" json:"peer_seniority,omitempty"` RunningWorkers uint32 `protobuf:"varint,5,opt,name=running_workers,json=runningWorkers,proto3" json:"running_workers,omitempty"` AllocatedWorkers uint32 `protobuf:"varint,6,opt,name=allocated_workers,json=allocatedWorkers,proto3" json:"allocated_workers,omitempty"` + PatchVersion []byte `protobuf:"bytes,7,opt,name=patch_version,json=patchVersion,proto3" json:"patch_version,omitempty"` } func (x *NodeInfoResponse) Reset() { @@ -447,6 +448,13 @@ func (x *NodeInfoResponse) GetAllocatedWorkers() uint32 { return 0 } +func (x *NodeInfoResponse) GetPatchVersion() []byte { + if x != nil { + return x.PatchVersion + } + return nil +} + type WorkerInfo struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1948,7 +1956,7 @@ var file_node_proto_rawDesc = []byte{ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, - 0x6f, 0x22, 0xe1, 0x01, 0x0a, 0x10, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, + 0x6f, 0x22, 0x86, 0x02, 0x0a, 0x10, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, @@ -1962,271 +1970,273 @@ var file_node_proto_rawDesc = []byte{ 0x67, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x57, 0x6f, - 0x72, 0x6b, 0x65, 0x72, 0x73, 0x22, 0x8f, 0x01, 0x0a, 0x0a, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x17, 0x0a, 0x07, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, 0x6f, 0x72, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, - 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x10, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, - 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0x5a, 0x0a, 0x12, 0x57, 0x6f, 0x72, 0x6b, 0x65, - 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, - 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x57, 0x6f, 0x72, - 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, - 0x6e, 0x66, 0x6f, 0x22, 0x6e, 0x0a, 0x0a, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, - 0x79, 0x12, 0x2f, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x22, 0x2f, 0x0a, 0x09, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x4b, 0x65, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x72, - 0x65, 0x66, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x22, 0x41, 0x0a, 0x07, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x12, - 0x36, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, - 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x4b, 0x65, - 0x79, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x22, 0x4f, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x69, 0x76, - 0x65, 0x72, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6c, - 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x0c, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x54, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x69, - 0x76, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x44, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, - 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, - 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0xdd, - 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, - 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, - 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x42, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, - 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, - 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x75, 0x6e, 0x64, 0x6c, - 0x65, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x61, 0x75, - 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0d, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x71, 0x75, 0x69, 0x6c, - 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, - 0x52, 0x0c, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x22, 0x5a, - 0x0a, 0x0c, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, - 0x0a, 0x0d, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, - 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, - 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0c, 0x64, 0x65, - 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x22, 0x30, 0x0a, 0x14, 0x4f, 0x72, - 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, - 0x65, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x68, 0x0a, 0x0f, - 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, - 0x23, 0x0a, 0x0d, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, - 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, - 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0xce, 0x01, 0x0a, 0x0a, 0x41, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x52, 0x65, 0x66, 0x12, 0x5e, 0x0a, 0x12, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, - 0x74, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2d, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4f, 0x72, 0x69, 0x67, - 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x66, - 0x48, 0x00, 0x52, 0x11, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x41, 0x63, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x55, 0x0a, 0x10, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, - 0x74, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x72, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, + 0x74, 0x63, 0x68, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8f, 0x01, 0x0a, 0x0a, 0x57, + 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x17, 0x0a, 0x07, 0x63, 0x6f, 0x72, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, 0x6f, 0x72, 0x65, + 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x76, + 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0x5a, 0x0a, 0x12, + 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, + 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, + 0x62, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x77, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x6e, 0x0a, 0x0a, 0x43, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x2f, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x12, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x2f, 0x0a, 0x09, 0x49, 0x6e, 0x6c, 0x69, + 0x6e, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x41, 0x0a, 0x07, 0x4b, 0x65, 0x79, + 0x52, 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x6c, + 0x69, 0x6e, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x22, 0x4f, 0x0a, 0x0e, + 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x23, + 0x0a, 0x0d, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x54, 0x0a, + 0x0c, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x44, 0x0a, + 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, - 0x69, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0f, 0x69, 0x6d, 0x70, - 0x6c, 0x69, 0x63, 0x69, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x09, 0x0a, 0x07, - 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x7d, 0x0a, 0x04, 0x43, 0x6f, 0x69, 0x6e, 0x12, - 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x05, 0x6f, - 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x71, 0x75, 0x69, - 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x66, 0x52, - 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x22, 0x7c, 0x0a, 0x0a, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, - 0x43, 0x6f, 0x69, 0x6e, 0x12, 0x31, 0x0a, 0x04, 0x63, 0x6f, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x69, - 0x6e, 0x52, 0x04, 0x63, 0x6f, 0x69, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, - 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, - 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x22, 0x88, 0x03, 0x0a, 0x17, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, - 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x61, - 0x77, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x0a, 0x72, 0x61, 0x77, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x66, - 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1e, - 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x20, - 0x0a, 0x0c, 0x6f, 0x6e, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, - 0x12, 0x29, 0x0a, 0x10, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x76, 0x65, 0x72, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, - 0x6f, 0x69, 0x6e, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x69, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x12, - 0x0a, 0x04, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6d, 0x61, - 0x73, 0x6b, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x38, 0x0a, 0x18, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x16, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x22, - 0x8c, 0x06, 0x0a, 0x1e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, - 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1f, 0x0a, 0x0b, - 0x72, 0x61, 0x77, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0a, 0x72, 0x61, 0x77, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x21, 0x0a, - 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, - 0x12, 0x25, 0x0a, 0x0f, 0x74, 0x6f, 0x5f, 0x6f, 0x6e, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, - 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x74, 0x6f, 0x4f, 0x6e, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x13, 0x72, 0x65, 0x66, 0x75, 0x6e, - 0x64, 0x5f, 0x6f, 0x6e, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x4f, 0x6e, 0x65, 0x54, - 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x2e, 0x0a, 0x13, 0x74, 0x6f, 0x5f, 0x76, 0x65, 0x72, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x11, 0x74, 0x6f, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, - 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x15, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x56, - 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x26, - 0x0a, 0x0f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x69, 0x6e, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x74, 0x6f, 0x43, 0x6f, 0x69, 0x6e, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, - 0x5f, 0x63, 0x6f, 0x69, 0x6e, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x11, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x69, 0x6e, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x6f, 0x5f, 0x6d, 0x61, 0x73, - 0x6b, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x74, 0x6f, 0x4d, 0x61, 0x73, 0x6b, 0x12, - 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x4d, 0x61, 0x73, 0x6b, - 0x12, 0x36, 0x0a, 0x17, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x15, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, - 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x3d, 0x0a, 0x1b, 0x74, 0x6f, 0x5f, 0x61, - 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x74, - 0x6f, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x3e, 0x0a, 0x1b, 0x72, 0x65, 0x66, 0x75, 0x6e, - 0x64, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x19, 0x72, 0x65, - 0x66, 0x75, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, - 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x1f, 0x72, 0x65, 0x66, 0x75, 0x6e, - 0x64, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x1c, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x1e, - 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4d, - 0x0a, 0x19, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0xa6, 0x02, - 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x0c, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x63, 0x6f, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x43, 0x6f, 0x69, 0x6e, 0x52, 0x0b, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x43, - 0x6f, 0x69, 0x6e, 0x73, 0x12, 0x54, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x71, 0x75, 0x69, - 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, - 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x74, 0x72, - 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x6a, 0x0a, 0x14, 0x70, 0x65, - 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, - 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x70, 0x62, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x50, - 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x13, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x28, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x61, 0x77, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x22, 0x11, 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x8b, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, - 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, - 0x0a, 0x09, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x09, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, - 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x03, 0x69, 0x64, 0x73, 0x12, 0x21, - 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, - 0x78, 0x22, 0x35, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x50, - 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x9c, 0x04, 0x0a, 0x0b, 0x4e, 0x6f, 0x64, - 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x65, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x50, - 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, - 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, - 0x62, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, - 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x50, - 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x65, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2b, + 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x62, + 0x6f, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x73, 0x22, 0xdd, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x42, 0x0a, 0x07, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, + 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, + 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x26, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0d, 0x64, 0x65, 0x6c, 0x69, 0x76, + 0x65, 0x72, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x71, 0x75, - 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, - 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2d, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, - 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, - 0x62, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, + 0x79, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0c, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x44, + 0x61, 0x74, 0x61, 0x22, 0x5a, 0x0a, 0x0c, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0d, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x71, 0x75, 0x69, + 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, + 0x61, 0x52, 0x0c, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x22, + 0x30, 0x0a, 0x14, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x41, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x22, 0x68, 0x0a, 0x0f, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x41, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x69, 0x6d, 0x70, + 0x6c, 0x69, 0x63, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0xce, 0x01, 0x0a, 0x0a, + 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x66, 0x12, 0x5e, 0x0a, 0x12, 0x6f, 0x72, + 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x04, 0x53, 0x65, 0x6e, 0x64, 0x12, 0x24, 0x2e, 0x71, 0x75, - 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x25, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x6e, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7d, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x32, - 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xe4, 0x01, 0x0a, 0x0e, 0x44, 0x61, 0x74, 0x61, - 0x49, 0x50, 0x43, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5c, 0x0a, 0x07, 0x52, 0x65, - 0x73, 0x70, 0x61, 0x77, 0x6e, 0x12, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, - 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, - 0x52, 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, - 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x74, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x2f, 0x2e, 0x71, 0x75, - 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e, - 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, + 0x2e, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x41, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x52, 0x65, 0x66, 0x48, 0x00, 0x52, 0x11, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, + 0x74, 0x65, 0x64, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x55, 0x0a, 0x10, 0x69, 0x6d, + 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, + 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x49, + 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x48, 0x00, + 0x52, 0x0f, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x42, 0x09, 0x0a, 0x07, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x7d, 0x0a, 0x04, + 0x43, 0x6f, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0c, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x39, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x52, 0x65, 0x66, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x22, 0x7c, 0x0a, 0x0a, 0x4c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x43, 0x6f, 0x69, 0x6e, 0x12, 0x31, 0x0a, 0x04, 0x63, 0x6f, 0x69, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, + 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, + 0x62, 0x2e, 0x43, 0x6f, 0x69, 0x6e, 0x52, 0x04, 0x63, 0x6f, 0x69, 0x6e, 0x12, 0x21, 0x0a, 0x0c, + 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, + 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x88, 0x03, 0x0a, 0x17, 0x4d, 0x61, + 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x1f, 0x0a, 0x0b, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x72, 0x61, 0x77, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, + 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x0c, 0x6f, 0x6e, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x54, 0x69, + 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, + 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x69, 0x6e, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x69, 0x6e, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x04, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x38, 0x0a, 0x18, 0x61, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x16, 0x61, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x4b, 0x65, 0x79, 0x22, 0x8c, 0x06, 0x0a, 0x1e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x72, 0x61, 0x77, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, + 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, + 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0f, 0x74, 0x6f, 0x5f, 0x6f, 0x6e, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, + 0x74, 0x6f, 0x4f, 0x6e, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x13, + 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x6f, 0x6e, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x72, 0x65, 0x66, 0x75, 0x6e, + 0x64, 0x4f, 0x6e, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x2e, 0x0a, 0x13, 0x74, + 0x6f, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x74, 0x6f, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x72, + 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x15, 0x72, 0x65, + 0x66, 0x75, 0x6e, 0x64, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x69, 0x6e, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x74, 0x6f, + 0x43, 0x6f, 0x69, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x72, + 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x69, 0x6e, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, + 0x43, 0x6f, 0x69, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x74, + 0x6f, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x74, 0x6f, + 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x6d, + 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x72, 0x65, 0x66, 0x75, 0x6e, + 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x36, 0x0a, 0x17, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x15, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x3d, 0x0a, + 0x1b, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0e, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x18, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x3e, 0x0a, 0x1b, + 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x19, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x1f, + 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x10, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x41, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x4b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0x4d, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, + 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x22, 0xa6, 0x02, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, + 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x46, 0x0a, 0x0c, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x63, 0x6f, 0x69, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, + 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, + 0x62, 0x2e, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x43, 0x6f, 0x69, 0x6e, 0x52, 0x0b, 0x6c, 0x65, + 0x67, 0x61, 0x63, 0x79, 0x43, 0x6f, 0x69, 0x6e, 0x73, 0x12, 0x54, 0x0a, 0x0c, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x6a, 0x0a, 0x14, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, + 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x64, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x28, 0x0a, 0x0e, 0x52, + 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x11, 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8b, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, + 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x03, + 0x69, 0x64, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x76, 0x65, + 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x35, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x9c, 0x04, + 0x0a, 0x0b, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x65, 0x0a, + 0x0b, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, - 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3a, - 0x5a, 0x38, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, - 0x69, 0x75, 0x6d, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, - 0x75, 0x6d, 0x2f, 0x6d, 0x6f, 0x6e, 0x6f, 0x72, 0x65, 0x70, 0x6f, 0x2f, 0x6e, 0x6f, 0x64, 0x65, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x71, 0x75, 0x69, 0x6c, + 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, + 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x29, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x0d, 0x47, + 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2d, 0x2e, 0x71, + 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x71, 0x75, + 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x04, 0x53, 0x65, 0x6e, 0x64, + 0x12, 0x24, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, + 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, + 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7d, 0x0a, + 0x12, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x32, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, + 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, + 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, + 0x62, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xe4, 0x01, 0x0a, + 0x0e, 0x44, 0x61, 0x74, 0x61, 0x49, 0x50, 0x43, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, + 0x5c, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x12, 0x27, 0x2e, 0x71, 0x75, 0x69, + 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x61, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x74, 0x0a, + 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, + 0x12, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x42, 0x3a, 0x5a, 0x38, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x71, 0x75, + 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x71, 0x75, 0x69, + 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f, 0x6e, 0x6f, 0x72, 0x65, 0x70, 0x6f, + 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x73, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/protobufs/node.proto b/protobufs/node.proto index 962c770..419c7fd 100644 --- a/protobufs/node.proto +++ b/protobufs/node.proto @@ -54,6 +54,7 @@ message NodeInfoResponse { bytes peer_seniority = 4; uint32 running_workers = 5; uint32 allocated_workers = 6; + bytes patch_version = 7; } message WorkerInfo { diff --git a/types/worker/manager.go b/types/worker/manager.go index 8c0e51d..b9523de 100644 --- a/types/worker/manager.go +++ b/types/worker/manager.go @@ -11,6 +11,7 @@ type WorkerManager interface { Stop() error AllocateWorker(coreId uint, filter []byte) error DeallocateWorker(coreId uint) error + CheckWorkersConnected() ([]uint, error) GetWorkerIdByFilter(filter []byte) (uint, error) GetFilterByWorkerId(coreId uint) ([]byte, error) RegisterWorker(info *store.WorkerInfo) error