diff --git a/consensus/.mockery.yaml b/consensus/.mockery.yaml new file mode 100644 index 0000000..9ba827f --- /dev/null +++ b/consensus/.mockery.yaml @@ -0,0 +1,18 @@ +dir: "{{.InterfaceDir}}/mock" +outpkg: "mock" +filename: "{{.InterfaceName | snakecase}}.go" +mockname: "{{.InterfaceName}}" + +all: True +with-expecter: False +include-auto-generated: False +disable-func-mocks: True +fail-on-missing: True +disable-version-string: True +resolve-type-alias: False + +packages: + source.quilibrium.com/quilibrium/monorepo/consensus: + config: + dir: "mocks" + outpkg: "mocks" diff --git a/consensus/README.md b/consensus/README.md index ac9cb30..e75035f 100644 --- a/consensus/README.md +++ b/consensus/README.md @@ -1,300 +1,4 @@ # Consensus State Machine -A generic, extensible state machine implementation for building Byzantine Fault -Tolerant (BFT) consensus protocols. This library provides a framework for -implementing round-based consensus algorithms with cryptographic proofs. - -## Overview - -The state machine manages consensus engine state transitions through a -well-defined set of states and events. It supports generic type parameters to -allow different implementations of state data, votes, peer identities, and -collected mutations. - -## Features - -- **Generic Implementation**: Supports custom types for state data, votes, peer - IDs, and collected data -- **Byzantine Fault Tolerance**: Provides BFT consensus with < 1/3 byzantine - nodes, flexible to other probabilistic BFT implementations -- **Round-based Consensus**: Implements a round-based state transition pattern -- **Pluggable Providers**: Extensible through provider interfaces for different - consensus behaviors -- **Event-driven Architecture**: State transitions triggered by events with - optional guard conditions -- **Concurrent Safe**: Thread-safe implementation with proper mutex usage -- **Timeout Support**: Configurable timeouts for each state with automatic - transitions -- **Transition Listeners**: Observable state transitions for monitoring and - debugging - -## Core Concepts - -### States - -The state machine progresses through the following states: - -1. **StateStopped**: Initial state, engine is not running -2. **StateStarting**: Engine is initializing -3. **StateLoading**: Loading data and syncing with network -4. **StateCollecting**: Collecting data/mutations for consensus round -5. **StateLivenessCheck**: Checking peer liveness before proving -6. **StateProving**: Generating cryptographic proof (leader only) -7. **StatePublishing**: Publishing proposed state -8. **StateVoting**: Voting on proposals -9. **StateFinalizing**: Finalizing consensus round -10. **StateVerifying**: Verifying and publishing results -11. **StateStopping**: Engine is shutting down - -### Events - -Events trigger state transitions: -- `EventStart`, `EventStop`: Lifecycle events -- `EventSyncComplete`: Synchronization finished -- `EventCollectionDone`: Mutation collection complete -- `EventLivenessCheckReceived`: Peer liveness confirmed -- `EventProverSignal`: Leader selection complete -- `EventProofComplete`: Proof generation finished -- `EventProposalReceived`: New proposal received -- `EventVoteReceived`: Vote received -- `EventQuorumReached`: Voting quorum achieved -- `EventConfirmationReceived`: State confirmation received -- And more... - -### Type Constraints - -All generic type parameters must implement the `Unique` interface: - -```go -type Unique interface { - Identity() Identity // Returns a unique string identifier -} -``` - -## Provider Interfaces - -### SyncProvider - -Handles initial state synchronization: - -```go -type SyncProvider[StateT Unique] interface { - Synchronize( - existing *StateT, - ctx context.Context, - ) (<-chan *StateT, <-chan error) -} -``` - -### VotingProvider - -Manages the voting process: - -```go -type VotingProvider[StateT Unique, VoteT Unique, PeerIDT Unique] interface { - SendProposal(proposal *StateT, ctx context.Context) error - DecideAndSendVote( - proposals map[Identity]*StateT, - ctx context.Context, - ) (PeerIDT, *VoteT, error) - IsQuorum(votes map[Identity]*VoteT, ctx context.Context) (bool, error) - FinalizeVotes( - proposals map[Identity]*StateT, - votes map[Identity]*VoteT, - ctx context.Context, - ) (*StateT, PeerIDT, error) - SendConfirmation(finalized *StateT, ctx context.Context) error -} -``` - -### LeaderProvider - -Handles leader selection and proof generation: - -```go -type LeaderProvider[ - StateT Unique, - PeerIDT Unique, - CollectedT Unique, -] interface { - GetNextLeaders(prior *StateT, ctx context.Context) ([]PeerIDT, error) - ProveNextState( - prior *StateT, - collected CollectedT, - ctx context.Context, - ) (*StateT, error) -} -``` - -### LivenessProvider - -Manages peer liveness checks: - -```go -type LivenessProvider[ - StateT Unique, - PeerIDT Unique, - CollectedT Unique, -] interface { - Collect(ctx context.Context) (CollectedT, error) - SendLiveness(prior *StateT, collected CollectedT, ctx context.Context) error -} -``` - -## Usage - -### Basic Setup - -```go -// Define your types implementing Unique -type MyState struct { - Round uint64 - Hash string -} -func (s MyState) Identity() string { return s.Hash } - -type MyVote struct { - Voter string - Value bool -} -func (v MyVote) Identity() string { return v.Voter } - -type MyPeerID struct { - ID string -} -func (p MyPeerID) Identity() string { return p.ID } - -type MyCollected struct { - Data []byte -} -func (c MyCollected) Identity() string { return string(c.Data) } - -// Implement providers -syncProvider := &MySyncProvider{} -votingProvider := &MyVotingProvider{} -leaderProvider := &MyLeaderProvider{} -livenessProvider := &MyLivenessProvider{} - -// Create state machine -sm := consensus.NewStateMachine[MyState, MyVote, MyPeerID, MyCollected]( - MyPeerID{ID: "node1"}, // This node's ID - &MyState{Round: 0, Hash: "genesis"}, // Initial state - true, // shouldEmitReceiveEventsOnSends - 3, // minimumProvers - syncProvider, - votingProvider, - leaderProvider, - livenessProvider, - nil, // Optional trace logger -) - -// Add transition listener -sm.AddListener(&MyTransitionListener{}) - -// Start the state machine -if err := sm.Start(); err != nil { - log.Fatal(err) -} - -// Receive external events -sm.ReceiveProposal(peer, proposal) -sm.ReceiveVote(voter, vote) -sm.ReceiveLivenessCheck(peer, collected) -sm.ReceiveConfirmation(peer, confirmation) - -// Stop the state machine -if err := sm.Stop(); err != nil { - log.Fatal(err) -} -``` - -### Implementing Providers - -See the `example/generic_consensus_example.go` for a complete working example -with mock provider implementations. - -## State Flow - -The typical consensus flow: - -1. **Start** → **Starting** → **Loading** -2. **Loading**: Synchronize with network -3. **Collecting**: Gather mutations/changes -4. **LivenessCheck**: Verify peer availability -5. **Proving**: Leader generates proof -6. **Publishing**: Leader publishes proposal -7. **Voting**: All nodes vote on proposals -8. **Finalizing**: Aggregate votes and determine outcome -9. **Verifying**: Confirm and apply state changes -10. Loop back to **Collecting** for next round - -## Configuration - -### Constructor Parameters - -- `id`: This node's peer ID -- `initialState`: Starting state (can be nil) -- `shouldEmitReceiveEventsOnSends`: Whether to emit receive events for own - messages -- `minimumProvers`: Minimum number of active provers required -- `traceLogger`: Optional logger for debugging state transitions - -### State Timeouts - -Each state can have a configured timeout that triggers an automatic transition: - -- **Starting**: 1 second → `EventInitComplete` -- **Loading**: 10 minutes → `EventSyncComplete` -- **Collecting**: 1 second → `EventCollectionDone` -- **LivenessCheck**: 1 second → `EventLivenessTimeout` -- **Proving**: 120 seconds → `EventPublishTimeout` -- **Publishing**: 1 second → `EventPublishTimeout` -- **Voting**: 10 seconds → `EventVotingTimeout` -- **Finalizing**: 1 second → `EventAggregationDone` -- **Verifying**: 1 second → `EventVerificationDone` -- **Stopping**: 30 seconds → `EventCleanupComplete` - -## Thread Safety - -The state machine is thread-safe. All public methods properly handle concurrent -access through mutex locks. State behaviors run in separate goroutines with -proper cancellation support. - -## Error Handling - -- Provider errors are logged but don't crash the state machine -- The state machine continues operating and may retry operations -- Critical errors during state transitions are returned to callers -- Use the `TraceLogger` interface for debugging - -## Best Practices - -1. **Message Isolation**: When implementing providers, always deep-copy data - before sending to prevent shared state between state machine and other - handlers -2. **Nil Handling**: Provider implementations should handle nil prior states - gracefully -3. **Context Usage**: Respect context cancellation in long-running operations -4. **Quorum Size**: Set appropriate quorum size based on your network (typically - 2f+1 for f failures) -5. **Timeout Configuration**: Adjust timeouts based on network conditions and - proof generation time - -## Example - -See `example/generic_consensus_example.go` for a complete working example -demonstrating: -- Mock provider implementations -- Multi-node consensus network -- Byzantine node behavior -- Message passing between nodes -- State transition monitoring - -## Testing - -The package includes comprehensive tests in `state_machine_test.go` covering: -- State transitions -- Event handling -- Concurrent operations -- Byzantine scenarios -- Timeout behavior +Consensus State Machine is being swapped out with a fork of the HotStuff implementation by Flow. +This will be updated with appropriate license details when the fork work has finished. diff --git a/consensus/backoff_timer.go b/consensus/backoff_timer.go new file mode 100644 index 0000000..1da35a5 --- /dev/null +++ b/consensus/backoff_timer.go @@ -0,0 +1,77 @@ +package consensus + +import ( + "context" + "time" +) + +type BackoffTimer struct { + timeoutCh chan time.Time + cancel context.CancelFunc + fail uint64 +} + +func NewBackoffTimer() *BackoffTimer { + t := make(chan time.Time) + close(t) + + return &BackoffTimer{ + timeoutCh: t, + cancel: func() {}, + fail: 0, + } +} + +func (t *BackoffTimer) TimeoutCh() <-chan time.Time { + return t.timeoutCh +} + +func (t *BackoffTimer) Start( + ctx context.Context, +) (start, end time.Time) { + t.cancel() + + t.timeoutCh = make(chan time.Time) + ctx, cancelFn := context.WithCancel(ctx) + t.cancel = cancelFn + + go rebroadcastTimeout(ctx, t.timeoutCh) + + start = time.Now().UTC() + end = start.Add(time.Duration(min(t.fail, 10)+10) * time.Second) + return start, end +} + +func (t *BackoffTimer) ReceiveTimeout() { + if t.fail < 10 { + t.fail++ + } +} + +func (t *BackoffTimer) ReceiveSuccess() { + if t.fail > 0 { + t.fail-- + } +} + +func rebroadcastTimeout(ctx context.Context, timeoutCh chan<- time.Time) { + timeout := time.NewTimer(20 * time.Second) + select { + case t := <-timeout.C: + timeoutCh <- t + case <-ctx.Done(): + timeout.Stop() + return + } + + rebroadcast := time.NewTicker(1 * time.Second) + for { + select { + case t := <-rebroadcast.C: + timeoutCh <- t + case <-ctx.Done(): + rebroadcast.Stop() + return + } + } +} diff --git a/consensus/consensus_committee.go b/consensus/consensus_committee.go new file mode 100644 index 0000000..85cc776 --- /dev/null +++ b/consensus/consensus_committee.go @@ -0,0 +1,154 @@ +package consensus + +import "source.quilibrium.com/quilibrium/monorepo/consensus/models" + +// A committee provides a subset of the protocol.State, which is restricted to +// exactly those nodes that participate in the current HotStuff instance: the +// state of all legitimate HotStuff participants for the specified rank. +// Legitimate HotStuff participants have NON-ZERO WEIGHT. +// +// For the purposes of validating votes, timeouts, quorum certificates, and +// timeout certificates we consider a committee which is static over the course +// of an epoch. Although committee members may be ejected, or have their weight +// change during an epoch, we ignore these changes. For these purposes we use +// the Replicas and *ByEpoch methods. +// +// When validating proposals, we take into account changes to the committee +// during the course of an epoch. In particular, if a node is ejected, we will +// immediately reject all future proposals from that node. For these purposes we +// use the DynamicCommittee and *ByState methods. + +// Replicas defines the consensus committee for the purposes of validating +// votes, timeouts, quorum certificates, and timeout certificates. Any consensus +// committee member who was authorized to contribute to consensus AT THE +// BEGINNING of the epoch may produce valid votes and timeouts for the entire +// epoch, even if they are later ejected. So for validating votes/timeouts we +// use *ByEpoch methods. +// +// Since the voter committee is considered static over an epoch: +// - we can query identities by rank +// - we don't need the full state ancestry prior to validating messages +type Replicas interface { + + // LeaderForRank returns the identity of the leader for a given rank. + // CAUTION: per liveness requirement of HotStuff, the leader must be + // fork-independent. Therefore, a node retains its proposer rank + // slots even if it is slashed. Its proposal is simply considered + // invalid, as it is not from a legitimate participant. + // Returns the following expected errors for invalid inputs: + // - model.ErrRankUnknown if no epoch containing the given rank is + // known + LeaderForRank(rank uint64) (models.Identity, error) + + // QuorumThresholdForRank returns the minimum total weight for a supermajority + // at the given rank. This weight threshold is computed using the total weight + // of the initial committee and is static over the course of an epoch. + // Returns the following expected errors for invalid inputs: + // - model.ErrRankUnknown if no epoch containing the given rank is + // known + QuorumThresholdForRank(rank uint64) (uint64, error) + + // TimeoutThresholdForRank returns the minimum total weight of observed + // timeout states required to safely timeout for the given rank. This weight + // threshold is computed using the total weight of the initial committee and + // is static over the course of an epoch. + // Returns the following expected errors for invalid inputs: + // - model.ErrRankUnknown if no epoch containing the given rank is + // known + TimeoutThresholdForRank(rank uint64) (uint64, error) + + // Self returns our own node identifier. + // TODO: ultimately, the own identity of the node is necessary for signing. + // Ideally, we would move the method for checking whether an Identifier + // refers to this node to the signer. This would require some + // refactoring of EventHandler (postponed to later) + Self() models.Identity + + // IdentitiesByRank returns a list of the legitimate HotStuff participants + // for the epoch given by the input rank. + // The returned list of HotStuff participants: + // - contains nodes that are allowed to submit votes or timeouts within the + // given epoch (un-ejected, non-zero weight at the beginning of the epoch) + // - is ordered in the canonical order + // - contains no duplicates. + // + // CAUTION: DO NOT use this method for validating state proposals. + // + // Returns the following expected errors for invalid inputs: + // - model.ErrRankUnknown if no epoch containing the given rank is + // known + // + IdentitiesByRank(rank uint64) ([]models.WeightedIdentity, error) + + // IdentityByEpoch returns the full Identity for specified HotStuff + // participant. The node must be a legitimate HotStuff participant with + // NON-ZERO WEIGHT at the specified state. + // + // ERROR conditions: + // - model.InvalidSignerError if participantID does NOT correspond to an + // authorized HotStuff participant at the specified state. + // + // Returns the following expected errors for invalid inputs: + // - model.ErrRankUnknown if no epoch containing the given rank is + // known + // + IdentityByRank( + rank uint64, + participantID models.Identity, + ) (models.WeightedIdentity, error) +} + +// DynamicCommittee extends Replicas to provide the consensus committee for the +// purposes of validating proposals. The proposer committee reflects +// state-to-state changes in the identity table to support immediately rejecting +// proposals from nodes after they are ejected. For validating proposals, we use +// *ByState methods. +// +// Since the proposer committee can change at any state: +// - we query by state ID +// - we must have incorporated the full state ancestry prior to validating +// messages +type DynamicCommittee interface { + Replicas + + // IdentitiesByState returns a list of the legitimate HotStuff participants + // for the given state. The returned list of HotStuff participants: + // - contains nodes that are allowed to submit proposals, votes, and + // timeouts (un-ejected, non-zero weight at current state) + // - is ordered in the canonical order + // - contains no duplicates. + // + // ERROR conditions: + // - state.ErrUnknownSnapshotReference if the stateID is for an unknown state + IdentitiesByState(stateID models.Identity) ([]models.WeightedIdentity, error) + + // IdentityByState returns the full Identity for specified HotStuff + // participant. The node must be a legitimate HotStuff participant with + // NON-ZERO WEIGHT at the specified state. + // ERROR conditions: + // - model.InvalidSignerError if participantID does NOT correspond to an + // authorized HotStuff participant at the specified state. + // - state.ErrUnknownSnapshotReference if the stateID is for an unknown state + IdentityByState( + stateID models.Identity, + participantID models.Identity, + ) (*models.WeightedIdentity, error) +} + +// StateSignerDecoder defines how to convert the ParentSignerIndices field +// within a particular state header to the identifiers of the nodes which signed +// the state. +type StateSignerDecoder[StateT models.Unique] interface { + // DecodeSignerIDs decodes the signer indices from the given state header into + // full node IDs. + // Note: A state header contains a quorum certificate for its parent, which + // proves that the consensus committee has reached agreement on validity of + // parent state. Consequently, the returned IdentifierList contains the + // consensus participants that signed the parent state. + // Expected Error returns during normal operations: + // - signature.InvalidSignerIndicesError if signer indices included in the + // header do not encode a valid subset of the consensus committee + DecodeSignerIDs( + state *models.State[StateT], + ) ([]models.WeightedIdentity, error) +} diff --git a/consensus/consensus_consumer.go b/consensus/consensus_consumer.go new file mode 100644 index 0000000..19aeec0 --- /dev/null +++ b/consensus/consensus_consumer.go @@ -0,0 +1,453 @@ +package consensus + +import ( + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// ProposalViolationConsumer consumes outbound notifications about +// HotStuff-protocol violations. Such notifications are produced by the active +// consensus participants and consensus follower. +// +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type ProposalViolationConsumer[ + StateT models.Unique, + VoteT models.Unique, +] interface { + // OnInvalidStateDetected notifications are produced by components that have + // detected that a state proposal is invalid and need to report it. Most of + // the time such state can be detected by calling Validator.ValidateProposal. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; + // and must handle repetition of the same events (with some processing + // overhead). + OnInvalidStateDetected(err *models.InvalidProposalError[StateT, VoteT]) + + // OnDoubleProposeDetected notifications are produced by the Finalization + // Logic whenever a double state proposal (equivocation) was detected. + // Equivocation occurs when the same leader proposes two different states for + // the same rank. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; + // and must handle repetition of the same events (with some processing + // overhead). + OnDoubleProposeDetected(*models.State[StateT], *models.State[StateT]) +} + +// VoteAggregationViolationConsumer consumes outbound notifications about +// HotStuff-protocol violations specifically invalid votes during processing. +// Such notifications are produced by the Vote Aggregation logic. +// +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type VoteAggregationViolationConsumer[ + StateT models.Unique, + VoteT models.Unique, +] interface { + // OnDoubleVotingDetected notifications are produced by the Vote Aggregation + // logic whenever a double voting (same voter voting for different states at + // the same rank) was detected. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnDoubleVotingDetected(*VoteT, *VoteT) + + // OnInvalidVoteDetected notifications are produced by the Vote Aggregation + // logic whenever an invalid vote was detected. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnInvalidVoteDetected(err models.InvalidVoteError[VoteT]) + + // OnVoteForInvalidStateDetected notifications are produced by the Vote + // Aggregation logic whenever vote for invalid proposal was detected. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnVoteForInvalidStateDetected( + vote *VoteT, + invalidProposal *models.SignedProposal[StateT, VoteT], + ) +} + +// TimeoutAggregationViolationConsumer consumes outbound notifications about +// Active Pacemaker violations specifically invalid timeouts during processing. +// Such notifications are produced by the Timeout Aggregation logic. +// +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type TimeoutAggregationViolationConsumer[VoteT models.Unique] interface { + // OnDoubleTimeoutDetected notifications are produced by the Timeout + // Aggregation logic whenever a double timeout (same replica producing two + // different timeouts at the same rank) was detected. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnDoubleTimeoutDetected( + *models.TimeoutState[VoteT], + *models.TimeoutState[VoteT], + ) + + // OnInvalidTimeoutDetected notifications are produced by the Timeout + // Aggregation logic whenever an invalid timeout was detected. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnInvalidTimeoutDetected(err models.InvalidTimeoutError[VoteT]) +} + +// FinalizationConsumer consumes outbound notifications produced by the logic +// tracking forks and finalization. Such notifications are produced by the +// active consensus participants, and generally potentially relevant to the +// larger node. The notifications are emitted in the order in which the +// finalization algorithm makes the respective steps. +// +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type FinalizationConsumer[StateT models.Unique] interface { + // OnStateIncorporated notifications are produced by the Finalization Logic + // whenever a state is incorporated into the consensus state. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnStateIncorporated(*models.State[StateT]) + + // OnFinalizedState notifications are produced by the Finalization Logic + // whenever a state has been finalized. They are emitted in the order the + // states are finalized. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnFinalizedState(*models.State[StateT]) +} + +// ParticipantConsumer consumes outbound notifications produced by consensus +// participants actively proposing states, voting, collecting & aggregating +// votes to QCs, and participating in the pacemaker (sending timeouts, +// collecting & aggregating timeouts to TCs). +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type ParticipantConsumer[ + StateT models.Unique, + VoteT models.Unique, +] interface { + // OnEventProcessed notifications are produced by the EventHandler when it is + // done processing and hands control back to the EventLoop to wait for the + // next event. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnEventProcessed() + + // OnStart notifications are produced by the EventHandler when it starts + // blocks recovery and prepares for handling incoming events from EventLoop. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnStart(currentRank uint64) + + // OnReceiveProposal notifications are produced by the EventHandler when it + // starts processing a state. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnReceiveProposal( + currentRank uint64, + proposal *models.SignedProposal[StateT, VoteT], + ) + + // OnReceiveQuorumCertificate notifications are produced by the EventHandler + // when it starts processing a QuorumCertificate [QC] constructed by the + // node's internal vote aggregator. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnReceiveQuorumCertificate(currentRank uint64, qc models.QuorumCertificate) + + // OnReceiveTimeoutCertificate notifications are produced by the EventHandler + // when it starts processing a TimeoutCertificate [TC] constructed by the + // node's internal timeout aggregator. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnReceiveTimeoutCertificate(currentRank uint64, tc models.TimeoutCertificate) + + // OnPartialTimeoutCertificate notifications are produced by the EventHandler + // when it starts processing partial TC constructed by local timeout + // aggregator. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnPartialTimeoutCertificate( + currentRank uint64, + partialTimeoutCertificate *PartialTimeoutCertificateCreated, + ) + + // OnLocalTimeout notifications are produced by the EventHandler when it + // reacts to expiry of round duration timer. Such a notification indicates + // that the Pacemaker's timeout was processed by the system. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnLocalTimeout(currentRank uint64) + + // OnRankChange notifications are produced by Pacemaker when it transitions to + // a new rank based on processing a QC or TC. The arguments specify the + // oldRank (first argument), and the newRank to which the Pacemaker + // transitioned (second argument). + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnRankChange(oldRank, newRank uint64) + + // OnQuorumCertificateTriggeredRankChange notifications are produced by + // Pacemaker when it moves to a new rank based on processing a QC. The + // arguments specify the qc (first argument), which triggered the rank change, + // and the newRank to which the Pacemaker transitioned (second argument). + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; + // and must handle repetition of the same events (with some processing + // overhead). + OnQuorumCertificateTriggeredRankChange( + oldRank uint64, + newRank uint64, + qc models.QuorumCertificate, + ) + + // OnTimeoutCertificateTriggeredRankChange notifications are produced by + // Pacemaker when it moves to a new rank based on processing a TC. The + // arguments specify the tc (first argument), which triggered the rank change, + // and the newRank to which the Pacemaker transitioned (second argument). + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnTimeoutCertificateTriggeredRankChange( + oldRank uint64, + newRank uint64, + tc models.TimeoutCertificate, + ) + + // OnStartingTimeout notifications are produced by Pacemaker. Such a + // notification indicates that the Pacemaker is now waiting for the system to + // (receive and) process states or votes. The specific timeout type is + // contained in the TimerInfo. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnStartingTimeout(startTime, endTime time.Time) + + // OnCurrentRankDetails notifications are produced by the EventHandler during + // the course of a rank with auxiliary information. These notifications are + // generally not produced for all ranks (for example skipped ranks). These + // notifications are guaranteed to be produced for all ranks we enter after + // fully processing a message. + // Example 1: + // - We are in rank 8. We process a QC with rank 10, causing us to enter + // rank 11. + // - Then this notification will be produced for rank 11. + // Example 2: + // - We are in rank 8. We process a proposal with rank 10, which contains a + // TC for rank 9 and TC.NewestQC for rank 8. + // - The QC would allow us to enter rank 9 and the TC would allow us to + // enter rank 10, so after fully processing the message we are in rank 10. + // - Then this notification will be produced for rank 10, but not rank 9 + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnCurrentRankDetails( + currentRank, finalizedRank uint64, + currentLeader models.Identity, + ) +} + +// VoteCollectorConsumer consumes outbound notifications produced by HotStuff's +// vote aggregation component. These events are primarily intended for the +// HotStuff-internal state machine (EventHandler), but might also be relevant to +// the larger node in which HotStuff is running. +// +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type VoteCollectorConsumer[VoteT models.Unique] interface { + // OnQuorumCertificateConstructedFromVotes notifications are produced by the + // VoteAggregator component, whenever it constructs a QC from votes. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnQuorumCertificateConstructedFromVotes(models.QuorumCertificate) + + // OnVoteProcessed notifications are produced by the Vote Aggregation logic, + // each time we successfully ingest a valid vote. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnVoteProcessed(vote *VoteT) +} + +// TimeoutCollectorConsumer consumes outbound notifications produced by +// HotStuff's timeout aggregation component. These events are primarily intended +// for the HotStuff-internal state machine (EventHandler), but might also be +// relevant to the larger node in which HotStuff is running. +// +// Caution: the events are not strictly ordered by increasing ranks! The +// notifications are emitted by concurrent processing logic. Over larger time +// scales, the emitted events are for statistically increasing ranks. However, +// on short time scales there are _no_ monotonicity guarantees w.r.t. the +// events' ranks. +// +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type TimeoutCollectorConsumer[VoteT models.Unique] interface { + // OnTimeoutCertificateConstructedFromTimeouts notifications are produced by + // the TimeoutProcessor component, whenever it constructs a TC based on + // TimeoutStates from a supermajority of consensus participants. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnTimeoutCertificateConstructedFromTimeouts( + certificate models.TimeoutCertificate, + ) + + // OnPartialTimeoutCertificateCreated notifications are produced by the + // TimeoutProcessor component, whenever it collected TimeoutStates from a + // superminority of consensus participants for a specific rank. Along with the + // rank, it reports the newest QC and TC (for previous rank) discovered in + // process of timeout collection. Per convention, the newest QC is never nil, + // while the TC for the previous rank might be nil. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnPartialTimeoutCertificateCreated( + rank uint64, + newestQC models.QuorumCertificate, + lastRankTC models.TimeoutCertificate, + ) + + // OnNewQuorumCertificateDiscovered notifications are produced by the + // TimeoutCollector component, whenever it discovers new QC included in + // timeout state. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnNewQuorumCertificateDiscovered(certificate models.QuorumCertificate) + + // OnNewTimeoutCertificateDiscovered notifications are produced by the + // TimeoutCollector component, whenever it discovers new TC included in + // timeout state. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnNewTimeoutCertificateDiscovered(certificate models.TimeoutCertificate) + + // OnTimeoutProcessed notifications are produced by the Timeout Aggregation + // logic, each time we successfully ingest a valid timeout. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnTimeoutProcessed(timeout *models.TimeoutState[VoteT]) +} + +// CommunicatorConsumer consumes outbound notifications produced by HotStuff and +// it's components. Notifications allow the HotStuff core algorithm to +// communicate with the other actors of the consensus process. +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type CommunicatorConsumer[StateT models.Unique, VoteT models.Unique] interface { + // OnOwnVote notifies about intent to send a vote for the given parameters to + // the specified recipient. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnOwnVote(vote *VoteT, recipientID models.Identity) + + // OnOwnTimeout notifies about intent to broadcast the given timeout + // state to all actors of the consensus process. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; and must handle + // repetition of the same events (with some processing overhead). + OnOwnTimeout(timeout *models.TimeoutState[VoteT]) + + // OnOwnProposal notifies about intent to broadcast the given state proposal + // to all actors of the consensus process. delay is to hold the proposal + // before broadcasting it. Useful to control the state production rate. + // Prerequisites: + // Implementation must be concurrency safe; Non-blocking; + // and must handle repetition of the same events (with some processing + // overhead). + OnOwnProposal( + proposal *models.SignedProposal[StateT, VoteT], + targetPublicationTime time.Time, + ) +} + +// FollowerConsumer consumes outbound notifications produced by consensus +// followers. It is a subset of the notifications produced by consensus +// participants. +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type FollowerConsumer[StateT models.Unique, VoteT models.Unique] interface { + ProposalViolationConsumer[StateT, VoteT] + FinalizationConsumer[StateT] +} + +// Consumer consumes outbound notifications produced by consensus participants. +// Notifications are consensus-internal state changes which are potentially +// relevant to the larger node in which HotStuff is running. The notifications +// are emitted in the order in which the HotStuff algorithm makes the respective +// steps. +// +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type Consumer[StateT models.Unique, VoteT models.Unique] interface { + FollowerConsumer[StateT, VoteT] + CommunicatorConsumer[StateT, VoteT] + ParticipantConsumer[StateT, VoteT] +} + +// VoteAggregationConsumer consumes outbound notifications produced by Vote +// Aggregation logic. It is a subset of the notifications produced by consensus +// participants. +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type VoteAggregationConsumer[ + StateT models.Unique, + VoteT models.Unique, +] interface { + VoteAggregationViolationConsumer[StateT, VoteT] + VoteCollectorConsumer[VoteT] +} + +// TimeoutAggregationConsumer consumes outbound notifications produced by Vote +// Aggregation logic. It is a subset of the notifications produced by consensus +// participants. +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type TimeoutAggregationConsumer[VoteT models.Unique] interface { + TimeoutAggregationViolationConsumer[VoteT] + TimeoutCollectorConsumer[VoteT] +} diff --git a/consensus/consensus_events.go b/consensus/consensus_events.go new file mode 100644 index 0000000..75094fe --- /dev/null +++ b/consensus/consensus_events.go @@ -0,0 +1,82 @@ +package consensus + +import ( + "context" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// PartialTimeoutCertificateCreated represents a notification emitted by the +// TimeoutProcessor component, whenever it has collected TimeoutStates from a +// superminority of consensus participants for a specific rank. Along with the +// rank, it reports the newest QuorumCertificate and TimeoutCertificate (for +// previous rank) discovered during timeout collection. Per convention, the +// newest QuorumCertificate is never nil, while the TimeoutCertificate for the +// previous rank might be nil. +type PartialTimeoutCertificateCreated struct { + Rank uint64 + NewestQuorumCertificate models.QuorumCertificate + PriorRankTimeoutCertificate models.TimeoutCertificate +} + +// EventHandler runs a state machine to process proposals, QuorumCertificate and +// local timeouts. Not concurrency safe. +type EventHandler[StateT models.Unique, VoteT models.Unique] interface { + // OnReceiveQuorumCertificate processes a valid quorumCertificate constructed + // by internal vote aggregator or discovered in TimeoutState. All inputs + // should be validated before feeding into this function. Assuming trusted + // data. No errors are expected during normal operation. + OnReceiveQuorumCertificate(quorumCertificate models.QuorumCertificate) error + + // OnReceiveTimeoutCertificate processes a valid timeoutCertificate + // constructed by internal timeout aggregator, discovered in TimeoutState or + // broadcast over the network. All inputs should be validated before feeding + // into this function. Assuming trusted data. No errors are expected during + // normal operation. + OnReceiveTimeoutCertificate( + timeoutCertificate models.TimeoutCertificate, + ) error + + // OnReceiveProposal processes a state proposal received from another HotStuff + // consensus participant. All inputs should be validated before feeding into + // this function. Assuming trusted data. No errors are expected during normal + // operation. + OnReceiveProposal(proposal *models.SignedProposal[StateT, VoteT]) error + + // OnLocalTimeout handles a local timeout event by creating a + // models.TimeoutState and broadcasting it. No errors are expected during + // normal operation. + OnLocalTimeout() error + + // OnPartialTimeoutCertificateCreated handles notification produces by the + // internal timeout aggregator. If the notification is for the current rank, + // a corresponding models.TimeoutState is broadcast to the consensus + // committee. No errors are expected during normal operation. + OnPartialTimeoutCertificateCreated( + partialTimeoutCertificate *PartialTimeoutCertificateCreated, + ) error + + // TimeoutChannel returns a channel that sends a signal on timeout. + TimeoutChannel() <-chan time.Time + + // Start starts the event handler. No errors are expected during normal + // operation. + // CAUTION: EventHandler is not concurrency safe. The Start method must be + // executed by the same goroutine that also calls the other business logic + // methods, or concurrency safety has to be implemented externally. + Start(ctx context.Context) error +} + +// EventLoop performs buffer and processing of incoming proposals and QCs. +type EventLoop[StateT models.Unique, VoteT models.Unique] interface { + TimeoutCollectorConsumer[VoteT] + VoteCollectorConsumer[VoteT] + SubmitProposal(proposal *models.SignedProposal[StateT, VoteT]) +} + +// FollowerLoop only follows certified states, does not actively process the +// collection of proposals and QC/TCs. +type FollowerLoop[StateT models.Unique, VoteT models.Unique] interface { + AddCertifiedState(certifiedState *models.CertifiedState[StateT]) +} diff --git a/consensus/consensus_finalizer.go b/consensus/consensus_finalizer.go new file mode 100644 index 0000000..4c1fc2c --- /dev/null +++ b/consensus/consensus_finalizer.go @@ -0,0 +1,23 @@ +package consensus + +import "source.quilibrium.com/quilibrium/monorepo/consensus/models" + +// Finalizer is used by the consensus algorithm to inform other components for +// (such as the protocol state) about finalization of states. +// +// Since we have two different protocol states: one for the main consensus, +// the other for the collection cluster consensus, the Finalizer interface +// allows the two different protocol states to provide different implementations +// for updating its state when a state has been finalized. +// +// Updating the protocol state should always succeed when the data is +// consistent. However, in case the protocol state is corrupted, error should be +// returned and the consensus algorithm should halt. So the error returned from +// MakeFinal is for the protocol state to report exceptions. +type Finalizer interface { + + // MakeFinal will declare a state and all of its ancestors as finalized, which + // makes it an immutable part of the time reel. Returning an error indicates + // some fatal condition and will cause the finalization logic to terminate. + MakeFinal(stateID models.Identity) error +} diff --git a/consensus/consensus_forks.go b/consensus/consensus_forks.go new file mode 100644 index 0000000..4c09cb7 --- /dev/null +++ b/consensus/consensus_forks.go @@ -0,0 +1,106 @@ +package consensus + +import "source.quilibrium.com/quilibrium/monorepo/consensus/models" + +// FinalityProof represents a finality proof for a State. By convention, a +// FinalityProof is immutable. Finality in Jolteon/HotStuff is determined by the +// 2-chain rule: +// +// There exists a _certified_ state C, such that State.Rank + 1 = C.Rank +type FinalityProof[StateT models.Unique] struct { + State *models.State[StateT] + CertifiedChild *models.CertifiedState[StateT] +} + +// Forks maintains an in-memory data-structure of all states whose rank-number +// is larger or equal to the latest finalized state. The latest finalized state +// is defined as the finalized state with the largest rank number. When adding +// states, Forks automatically updates its internal state (including finalized +// states). Furthermore, states whose rank number is smaller than the latest +// finalized state are pruned automatically. +// +// PREREQUISITES: +// Forks expects that only states are added that can be connected to its latest +// finalized state (without missing interim ancestors). If this condition is +// violated, Forks will raise an error and ignore the state. +type Forks[StateT models.Unique] interface { + + // GetStatesForRank returns all known states for the given rank + GetStatesForRank(rank uint64) []*models.State[StateT] + + // GetState returns (*model.State, true) if the state with the specified + // id was found and (nil, false) otherwise. + GetState(stateID models.Identity) (*models.State[StateT], bool) + + // FinalizedRank returns the largest rank number where a finalized state is + // known + FinalizedRank() uint64 + + // FinalizedState returns the finalized state with the largest rank number + FinalizedState() *models.State[StateT] + + // FinalityProof returns the latest finalized state and a certified child from + // the subsequent rank, which proves finality. + // CAUTION: method returns (nil, false), when Forks has not yet finalized any + // states beyond the finalized root state it was initialized with. + FinalityProof() (*FinalityProof[StateT], bool) + + // AddValidatedState appends the validated state to the tree of pending + // states and updates the latest finalized state (if applicable). Unless the + // parent is below the pruning threshold (latest finalized rank), we require + // that the parent is already stored in Forks. Calling this method with + // previously processed states leaves the consensus state invariant (though, + // it will potentially cause some duplicate processing). + // Notes: + // - Method `AddCertifiedState(..)` should be used preferably, if a QC + // certifying `state` is already known. This is generally the case for the + // consensus follower. + // - Method `AddValidatedState` is intended for active consensus + // participants, which fully validate states (incl. payload), i.e. QCs are + // processed as part of validated proposals. + // + // Possible error returns: + // - model.MissingStateError if the parent does not exist in the forest (but + // is above the pruned rank). From the perspective of Forks, this error is + // benign (no-op). + // - model.InvalidStateError if the state is invalid (see + // `Forks.EnsureStateIsValidExtension` for details). From the perspective + // of Forks, this error is benign (no-op). However, we assume all states + // are fully verified, i.e. they should satisfy all consistency + // requirements. Hence, this error is likely an indicator of a bug in the + // compliance layer. + // - model.ByzantineThresholdExceededError if conflicting QCs or conflicting + // finalized states have been detected (violating a foundational consensus + // guarantees). This indicates that there are 1/3+ Byzantine nodes + // (weighted by seniority) in the network, breaking the safety guarantees + // of HotStuff (or there is a critical bug / data corruption). Forks + // cannot recover from this exception. + // - All other errors are potential symptoms of bugs or state corruption. + AddValidatedState(proposal *models.State[StateT]) error + + // AddCertifiedState appends the given certified state to the tree of pending + // states and updates the latest finalized state (if finalization progressed). + // Unless the parent is below the pruning threshold (latest finalized rank), + // we require that the parent is already stored in Forks. Calling this method + // with previously processed states leaves the consensus state invariant + // (though, it will potentially cause some duplicate processing). + // + // Possible error returns: + // - model.MissingStateError if the parent does not exist in the forest (but + // is above the pruned rank). From the perspective of Forks, this error is + // benign (no-op). + // - model.InvalidStateError if the state is invalid (see + // `Forks.EnsureStateIsValidExtension` for details). From the perspective + // of Forks, this error is benign (no-op). However, we assume all states + // are fully verified, i.e. they should satisfy all consistency + // requirements. Hence, this error is likely an indicator of a bug in the + // compliance layer. + // - model.ByzantineThresholdExceededError if conflicting QCs or conflicting + // finalized states have been detected (violating a foundational consensus + // guarantees). This indicates that there are 1/3+ Byzantine nodes + // (weighted by seniority) in the network, breaking the safety guarantees + // of HotStuff (or there is a critical bug / data corruption). Forks + // cannot recover from this exception. + // - All other errors are potential symptoms of bugs or state corruption. + AddCertifiedState(certifiedState *models.CertifiedState[StateT]) error +} diff --git a/consensus/consensus_leader.go b/consensus/consensus_leader.go new file mode 100644 index 0000000..2edb80b --- /dev/null +++ b/consensus/consensus_leader.go @@ -0,0 +1,29 @@ +package consensus + +import ( + "context" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// LeaderProvider handles leader selection. State is provided, if relevant to +// the upstream consensus engine. +type LeaderProvider[ + StateT models.Unique, + PeerIDT models.Unique, + CollectedT models.Unique, +] interface { + // GetNextLeaders returns a list of node indices, in priority order. Note that + // it is assumed that if no error is returned, GetNextLeaders should produce + // a non-empty list. If a list of size smaller than minimumProvers is + // provided, the liveness check will loop until the list is greater than that. + GetNextLeaders(ctx context.Context, prior *StateT) ([]PeerIDT, error) + // ProveNextState prepares a non-finalized new state from the prior, to be + // proposed and voted upon. Provided context may be canceled, should be used + // to halt long-running prover operations. + ProveNextState( + ctx context.Context, + filter []byte, + priorState models.Identity, + ) (*StateT, error) +} diff --git a/consensus/consensus_liveness.go b/consensus/consensus_liveness.go new file mode 100644 index 0000000..426f522 --- /dev/null +++ b/consensus/consensus_liveness.go @@ -0,0 +1,25 @@ +package consensus + +import ( + "context" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// LivenessProvider handles liveness announcements ahead of proving, to +// pre-emptively choose the next prover. In expected leader scenarios, this +// enables a peer to determine if an honest next prover is offline, so that it +// can publish the next state without waiting. +type LivenessProvider[ + StateT models.Unique, + PeerIDT models.Unique, + CollectedT models.Unique, +] interface { + // Collect returns the collected mutation operations ahead of liveness + // announcements. + Collect(ctx context.Context) (CollectedT, error) + // SendLiveness announces liveness ahead of the next prover deterimination and + // subsequent proving. Provides prior state and collected mutation operations + // if relevant. + SendLiveness(ctx context.Context, prior *StateT, collected CollectedT) error +} diff --git a/consensus/consensus_pacemaker.go b/consensus/consensus_pacemaker.go new file mode 100644 index 0000000..02863c6 --- /dev/null +++ b/consensus/consensus_pacemaker.go @@ -0,0 +1,65 @@ +package consensus + +import ( + "context" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Pacemaker defines a standard set of methods for handling pacemaker behaviors +// in the consensus engine. +type Pacemaker interface { + ProposalDurationProvider + // CurrentRank returns the current rank + CurrentRank() uint64 + // LatestQuorumCertificate returns the latest quorum certificate seen. + LatestQuorumCertificate() models.QuorumCertificate + // PriorRankTimeoutCertificate returns the prior rank's timeout certificate, + // if it exists. + PriorRankTimeoutCertificate() models.TimeoutCertificate + // ReceiveQuorumCertificate handles an incoming quorum certificate, advancing + // to a new rank if applicable. + ReceiveQuorumCertificate( + quorumCertificate models.QuorumCertificate, + ) (*models.NextRank, error) + // ReceiveTimeoutCertificate handles an incoming timeout certificate, + // advancing to a new rank if applicable. + ReceiveTimeoutCertificate( + timeoutCertificate models.TimeoutCertificate, + ) (*models.NextRank, error) + // TimeoutCh provides a channel for timing out on the current rank. + TimeoutCh() <-chan time.Time + // Start starts the pacemaker, takes a cancellable context. + Start(ctx context.Context) error +} + +// ProposalDurationProvider generates the target publication time for state +// proposals. +type ProposalDurationProvider interface { + + // TargetPublicationTime is intended to be called by the EventHandler, + // whenever it wants to publish a new proposal. The event handler inputs + // - proposalRank: the rank it is proposing for, + // - timeRankEntered: the time when the EventHandler entered this rank + // - parentStateId: the ID of the parent state, which the EventHandler is + // building on + // TargetPublicationTime returns the time stamp when the new proposal should + // be broadcasted. For a given rank where we are the primary, suppose the + // actual time we are done building our proposal is P: + // - if P < TargetPublicationTime(..), then the EventHandler should wait + // until `TargetPublicationTime` to broadcast the proposal + // - if P >= TargetPublicationTime(..), then the EventHandler should + // immediately broadcast the proposal + // + // Note: Technically, our metrics capture the publication delay relative to + // this function's _latest_ call. Currently, the EventHandler is the only + // caller of this function, and only calls it once. + // + // Concurrency safe. + TargetPublicationTime( + proposalRank uint64, + timeRankEntered time.Time, + parentStateId models.Identity, + ) time.Time +} diff --git a/consensus/consensus_producer.go b/consensus/consensus_producer.go new file mode 100644 index 0000000..5c57f4d --- /dev/null +++ b/consensus/consensus_producer.go @@ -0,0 +1,25 @@ +package consensus + +import "source.quilibrium.com/quilibrium/monorepo/consensus/models" + +// StateProducer is responsible for producing new state proposals. It is a +// service component to HotStuff's main state machine (implemented in the +// EventHandler). The StateProducer's central purpose is to mediate concurrent +// signing requests to its embedded `hotstuff.SafetyRules` during state +// production. The actual work of producing a state proposal is delegated to the +// embedded `consensus.LeaderProvider`. +type StateProducer[StateT models.Unique, VoteT models.Unique] interface { + // MakeStateProposal builds a new HotStuff state proposal using the given + // rank, the given quorum certificate for its parent and [optionally] a + // timeout certificate for last rank (could be nil). + // Error Returns: + // - model.NoVoteError if it is not safe for us to vote (our proposal + // includes our vote) for this rank. This can happen if we have already + // proposed or timed out this rank. + // - generic error in case of unexpected failure + MakeStateProposal( + rank uint64, + qc models.QuorumCertificate, + lastRankTC models.TimeoutCertificate, + ) (*models.SignedProposal[StateT, VoteT], error) +} diff --git a/consensus/consensus_safety_rules.go b/consensus/consensus_safety_rules.go new file mode 100644 index 0000000..1148bd2 --- /dev/null +++ b/consensus/consensus_safety_rules.go @@ -0,0 +1,73 @@ +package consensus + +import "source.quilibrium.com/quilibrium/monorepo/consensus/models" + +// SafetyRules enforces all consensus rules that guarantee safety. It produces +// votes for the given states or TimeoutState for the given ranks, only if all +// safety rules are satisfied. In particular, SafetyRules guarantees a +// foundational security theorem for HotStuff, which we utilize also outside of +// consensus (e.g. queuing pending states for execution, verification, sealing +// etc): +// +// THEOREM: For each rank, there can be at most 1 certified state. +// +// Implementations are generally *not* concurrency safe. +type SafetyRules[StateT models.Unique, VoteT models.Unique] interface { + // ProduceVote takes a state proposal and current rank, and decides whether to + // vote for the state. Voting is deterministic, i.e. voting for same proposal + // will always result in the same vote. + // Returns: + // * (vote, nil): On the _first_ state for the current rank that is safe to + // vote for. Subsequently, voter does _not_ vote for any _other_ state with + // the same (or lower) rank. SafetyRules internally caches and persists its + // latest vote. As long as the SafetyRules' internal state remains + // unchanged, ProduceVote will return its cached for identical inputs. + // * (nil, model.NoVoteError): If the safety module decides that it is not + // safe to vote for the given state. This is a sentinel error and + // _expected_ during normal operation. + // All other errors are unexpected and potential symptoms of uncovered edge + // cases or corrupted internal state (fatal). + ProduceVote( + proposal *models.SignedProposal[StateT, VoteT], + curRank uint64, + ) (*VoteT, error) + + // ProduceTimeout takes current rank, highest locally known QC and TC + // (optional, must be nil if and only if QC is for previous rank) and decides + // whether to produce timeout for current rank. + // Returns: + // * (timeout, nil): It is safe to timeout for current rank using newestQC + // and lastRankTC. + // * (nil, model.NoTimeoutError): If replica is not part of the authorized + // consensus committee (anymore) and therefore is not authorized to produce + // a valid timeout state. This sentinel error is _expected_ during normal + // operation, e.g. during the grace-period after Epoch switchover or after + // the replica self-ejected. + // All other errors are unexpected and potential symptoms of uncovered edge + // cases or corrupted internal state (fatal). + ProduceTimeout( + curRank uint64, + newestQC models.QuorumCertificate, + lastRankTC models.TimeoutCertificate, + ) (*models.TimeoutState[VoteT], error) + + // SignOwnProposal takes an unsigned state proposal and produces a vote for + // it. Vote is a cryptographic commitment to the proposal. By adding the vote + // to an unsigned proposal, the caller constructs a signed state proposal. + // This method has to be used only by the leader, which must be the proposer + // of the state (or an exception is returned). + // Implementors must guarantee that: + // - vote on the proposal satisfies safety rules + // - maximum one proposal is signed per rank + // Returns: + // * (vote, nil): the passed unsigned proposal is a valid one, and it's safe + // to make a proposal. Subsequently, leader does _not_ produce any _other_ + // proposal with the same (or lower) rank. + // * (nil, model.NoVoteError): according to HotStuff's Safety Rules, it is + // not safe to sign the given proposal. This could happen because we have + // already proposed or timed out for the given rank. This is a sentinel + // error and _expected_ during normal operation. + // All other errors are unexpected and potential symptoms of uncovered edge + // cases or corrupted internal state (fatal). + SignOwnProposal(unsignedProposal *models.Proposal[StateT]) (*VoteT, error) +} diff --git a/consensus/consensus_signature.go b/consensus/consensus_signature.go new file mode 100644 index 0000000..d0d168f --- /dev/null +++ b/consensus/consensus_signature.go @@ -0,0 +1,161 @@ +package consensus + +import ( + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// WeightedSignatureAggregator aggregates signatures of the same signature +// scheme and the same message from different signers. The public keys and +// message are agreed upon upfront. It is also recommended to only aggregate +// signatures generated with keys representing equivalent security-bit level. +// Furthermore, a weight [unsigned int64] is assigned to each signer ID. The +// WeightedSignatureAggregator internally tracks the total weight of all +// collected signatures. Implementations must be concurrency safe. +type WeightedSignatureAggregator interface { + // Verify verifies the signature under the stored public keys and message. + // Expected errors during normal operations: + // - model.InvalidSignerError if signerID is invalid (not a consensus + // participant) + // - model.ErrInvalidSignature if signerID is valid but signature is + // cryptographically invalid + Verify(signerID models.Identity, sig []byte) error + + // TrustedAdd adds a signature to the internal set of signatures and adds the + // signer's weight to the total collected weight, iff the signature is _not_ a + // duplicate. The total weight of all collected signatures (excluding + // duplicates) is returned regardless of any returned error. + // Expected errors during normal operations: + // - model.InvalidSignerError if signerID is invalid (not a consensus + // participant) + // - model.DuplicatedSignerError if the signer has been already added + TrustedAdd(signerID models.Identity, sig []byte) ( + totalWeight uint64, + exception error, + ) + + // TotalWeight returns the total weight presented by the collected signatures. + TotalWeight() uint64 + + // Aggregate aggregates the signatures and returns the aggregated signature. + // The function performs a final verification and errors if the aggregated + // signature is invalid. This is required for the function safety since + // `TrustedAdd` allows adding invalid signatures. + // The function errors with: + // - model.InsufficientSignaturesError if no signatures have been added yet + // - model.InvalidSignatureIncludedError if: + // -- some signature(s), included via TrustedAdd, fail to deserialize + // (regardless of the aggregated public key) + // -- or all signatures deserialize correctly but some signature(s), + // included via TrustedAdd, are invalid (while aggregated public key is + // valid) + // - model.InvalidAggregatedKeyError if all signatures deserialize correctly + // but the signer's proving public keys sum up to an invalid key (BLS + // identity public key). Any aggregated signature would fail the + // cryptographic verification under the identity public key and therefore + // such signature is considered invalid. Such scenario can only happen if + // proving public keys of signers were forged to add up to the identity + // public key. Under the assumption that all proving key PoPs are valid, + // this error case can only happen if all signers are malicious and + // colluding. If there is at least one honest signer, there is a + // negligible probability that the aggregated key is identity. + // + // The function is thread-safe. + Aggregate() ([]models.WeightedIdentity, models.AggregatedSignature, error) +} + +// TimeoutSignatureAggregator aggregates timeout signatures for one particular +// rank. When instantiating a TimeoutSignatureAggregator, the following +// information is supplied: +// - The rank for which the aggregator collects timeouts. +// - For each replicas that is authorized to send a timeout at this particular +// rank: the node ID, public proving keys, and weight +// +// Timeouts for other ranks or from non-authorized replicas are rejected. +// In their TimeoutStates, replicas include a signature over the pair (rank, +// newestQCRank), where `rank` is the rank number the timeout is for and +// `newestQCRank` is the rank of the newest QC known to the replica. +// TimeoutSignatureAggregator collects these signatures, internally tracks the +// total weight of all collected signatures. Note that in general the signed +// messages are different, which makes the aggregation a comparatively expensive +// operation. Upon calling `Aggregate`, the TimeoutSignatureAggregator +// aggregates all valid signatures collected up to this point. The aggregate +// signature is guaranteed to be correct, as only valid signatures are accepted +// as inputs. +// TimeoutSignatureAggregator internally tracks the total weight of all +// collected signatures. Implementations must be concurrency safe. +type TimeoutSignatureAggregator interface { + // VerifyAndAdd verifies the signature under the stored public keys and adds + // the signature and the corresponding highest QC to the internal set. + // Internal set and collected weight is modified iff signature _is_ valid. + // The total weight of all collected signatures (excluding duplicates) is + // returned regardless of any returned error. + // Expected errors during normal operations: + // - model.InvalidSignerError if signerID is invalid (not a consensus + // participant) + // - model.DuplicatedSignerError if the signer has been already added + // - model.ErrInvalidSignature if signerID is valid but signature is + // cryptographically invalid + VerifyAndAdd( + signerID models.Identity, + sig []byte, + newestQCRank uint64, + ) (totalWeight uint64, exception error) + + // TotalWeight returns the total weight presented by the collected signatures. + TotalWeight() uint64 + + // Rank returns the rank that this instance is aggregating signatures for. + Rank() uint64 + + // Aggregate aggregates the signatures and returns with additional data. + // Aggregated signature will be returned as SigData of timeout certificate. + // Caller can be sure that resulting signature is valid. + // Expected errors during normal operations: + // - model.InsufficientSignaturesError if no signatures have been added yet + Aggregate() ( + signersInfo []TimeoutSignerInfo, + aggregatedSig models.AggregatedSignature, + exception error, + ) +} + +// TimeoutSignerInfo is a helper structure that stores the QC ranks that each +// signer contributed to a TC. Used as result of +// TimeoutSignatureAggregator.Aggregate() +type TimeoutSignerInfo struct { + NewestQCRank uint64 + Signer models.Identity +} + +// StateSignatureData is an intermediate struct for Packer to pack the +// aggregated signature data into raw bytes or unpack from raw bytes. +type StateSignatureData struct { + Signers []models.WeightedIdentity + Signature []byte +} + +// Packer packs aggregated signature data into raw bytes to be used in state +// header. +type Packer interface { + // Pack serializes the provided StateSignatureData into a precursor format of + // a QC. rank is the rank of the state that the aggregated signature is for. + // sig is the aggregated signature data. + // Expected error returns during normal operations: + // * none; all errors are symptoms of inconsistent input data or corrupted + // internal state. + Pack(rank uint64, sig *StateSignatureData) ( + signerIndices []byte, + sigData []byte, + err error, + ) + + // Unpack de-serializes the provided signature data. + // sig is the aggregated signature data + // It returns: + // - (sigData, nil) if successfully unpacked the signature data + // - (nil, model.InvalidFormatError) if failed to unpack the signature data + Unpack(signerIdentities []models.WeightedIdentity, sigData []byte) ( + *StateSignatureData, + error, + ) +} diff --git a/consensus/consensus_signer.go b/consensus/consensus_signer.go new file mode 100644 index 0000000..31b128e --- /dev/null +++ b/consensus/consensus_signer.go @@ -0,0 +1,39 @@ +package consensus + +import ( + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Signer is responsible for creating votes, proposals for a given state. +type Signer[StateT models.Unique, VoteT models.Unique] interface { + // CreateVote creates a vote for the given state. No error returns are + // expected during normal operations (incl. presence of byz. actors). + CreateVote(state *models.State[StateT]) (*VoteT, error) + + // CreateTimeout creates a timeout for given rank. No errors return are + // expected during normal operations(incl presence of byz. actors). + CreateTimeout( + curView uint64, + newestQC models.QuorumCertificate, + previousRankTimeoutCert models.TimeoutCertificate, + ) (*models.TimeoutState[VoteT], error) +} + +type SignatureAggregator interface { + VerifySignatureMultiMessage( + publicKeys [][]byte, + signature []byte, + messages [][]byte, + context []byte, + ) bool + VerifySignatureRaw( + publicKey []byte, + signature []byte, + message []byte, + context []byte, + ) bool + Aggregate( + publicKeys [][]byte, + signatures [][]byte, + ) (models.AggregatedSignature, error) +} diff --git a/consensus/consensus_store.go b/consensus/consensus_store.go new file mode 100644 index 0000000..d3e2783 --- /dev/null +++ b/consensus/consensus_store.go @@ -0,0 +1,18 @@ +package consensus + +import "source.quilibrium.com/quilibrium/monorepo/consensus/models" + +// ConsensusStore defines the methods required for internal state that should +// persist between restarts of the consensus engine. +type ConsensusStore[VoteT models.Unique] interface { + ReadOnlyConsensusStore[VoteT] + PutConsensusState(state *models.ConsensusState[VoteT]) error + PutLivenessState(state *models.LivenessState) error +} + +// ReadOnlyConsensusStore defines the methods required for reading internal +// state persisted between restarts of the consensus engine. +type ReadOnlyConsensusStore[VoteT models.Unique] interface { + GetConsensusState() (*models.ConsensusState[VoteT], error) + GetLivenessState() (*models.LivenessState, error) +} diff --git a/consensus/consensus_sync.go b/consensus/consensus_sync.go new file mode 100644 index 0000000..9a09180 --- /dev/null +++ b/consensus/consensus_sync.go @@ -0,0 +1,20 @@ +package consensus + +import ( + "context" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// SyncProvider handles synchronization management +type SyncProvider[StateT models.Unique] interface { + // Performs synchronization to set internal state. Note that it is assumed + // that errors are transient and synchronization should be reattempted on + // failure. If some other process for synchronization is used and this should + // be bypassed, send nil on the error channel. Provided context may be + // canceled, should be used to halt long-running sync operations. + Synchronize( + ctx context.Context, + existing *StateT, + ) (<-chan *StateT, <-chan error) +} diff --git a/consensus/consensus_timeout.go b/consensus/consensus_timeout.go new file mode 100644 index 0000000..11cfb36 --- /dev/null +++ b/consensus/consensus_timeout.go @@ -0,0 +1,128 @@ +package consensus + +import ( + "context" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutAggregator verifies and aggregates timeout states to build timeout +// certificates [TCs]. When enough timeout states are collected, it builds a TC +// and sends it to the EventLoop TimeoutAggregator also detects protocol +// violation, including invalid timeouts, double timeout, etc and notifies a +// HotStuff consumer for slashing. +type TimeoutAggregator[VoteT models.Unique] interface { + Start(ctx context.Context) error + + // AddTimeout verifies and aggregates a timeout state. + // This method can be called concurrently, timeouts will be queued and + // processed asynchronously. + AddTimeout(timeoutState *models.TimeoutState[VoteT]) + + // PruneUpToRank deletes all `TimeoutCollector`s _below_ to the given rank, as + // well as related indices. We only retain and process `TimeoutCollector`s, + // whose rank is equal or larger than `lowestRetainedRank`. If + // `lowestRetainedRank` is smaller than the previous value, the previous value + // is kept and the method call is a NoOp. This value should be set to the + // latest active rank maintained by `Pacemaker`. + PruneUpToRank(lowestRetainedRank uint64) +} + +// TimeoutCollector collects all timeout states for a specified rank. On the +// happy path, it generates a TimeoutCertificate when enough timeouts have been +// collected. The TimeoutCollector is a higher-level structure that orchestrates +// deduplication, caching and processing of timeouts, delegating those tasks to +// underlying modules (such as TimeoutProcessor). Implementations of +// TimeoutCollector must be concurrency safe. +type TimeoutCollector[VoteT models.Unique] interface { + // AddTimeout adds a Timeout State to the collector. When TSs from + // strictly more than 1/3 of consensus participants (measured by weight) were + // collected, the callback for partial TC will be triggered. After collecting + // TSs from a supermajority, a TC will be created and passed to the EventLoop. + // Expected error returns during normal operations: + // * timeoutcollector.ErrTimeoutForIncompatibleRank - submitted timeout for + // incompatible rank + // All other exceptions are symptoms of potential state corruption. + AddTimeout(timeoutState *models.TimeoutState[VoteT]) error + + // Rank returns the rank that this instance is collecting timeouts for. + // This method is useful when adding the newly created timeout collector to + // timeout collectors map. + Rank() uint64 +} + +// TimeoutProcessor ingests Timeout States for a particular rank. It +// implements the algorithms for validating TSs, orchestrates their low-level +// aggregation and emits `OnPartialTcCreated` and `OnTcConstructedFromTimeouts` +// notifications. TimeoutProcessor cannot deduplicate TSs (this should be +// handled by the higher-level TimeoutCollector) and errors instead. Depending +// on their implementation, a TimeoutProcessor might drop timeouts or attempt to +// construct a TC. +type TimeoutProcessor[VoteT models.Unique] interface { + // Process performs processing of single timeout state. This function is safe + // to call from multiple goroutines. Expected error returns during normal + // operations: + // * timeoutcollector.ErrTimeoutForIncompatibleRank - submitted timeout for + // incompatible rank + // * models.InvalidTimeoutError - submitted invalid timeout(invalid structure + // or invalid signature) + // * models.DuplicatedSignerError if a timeout from the same signer was + // previously already added. It does _not necessarily_ imply that the + // timeout is invalid or the sender is equivocating. + // All other errors should be treated as exceptions. + Process(timeout *models.TimeoutState[VoteT]) error +} + +// TimeoutCollectorFactory performs creation of TimeoutCollector for a given +// rank +type TimeoutCollectorFactory[VoteT models.Unique] interface { + // Create is a factory method to generate a TimeoutCollector for a given rank + // Expected error returns during normal operations: + // * models.ErrRankUnknown no epoch containing the given rank is known + // All other errors should be treated as exceptions. + Create(rank uint64) (TimeoutCollector[VoteT], error) +} + +// TimeoutProcessorFactory performs creation of TimeoutProcessor for a given +// rank +type TimeoutProcessorFactory[VoteT models.Unique] interface { + // Create is a factory method to generate a TimeoutProcessor for a given rank + // Expected error returns during normal operations: + // * models.ErrRankUnknown no epoch containing the given rank is known + // All other errors should be treated as exceptions. + Create(rank uint64) (TimeoutProcessor[VoteT], error) +} + +// TimeoutCollectors encapsulates the functionality to generate, store and prune +// `TimeoutCollector` instances (one per rank). Its main purpose is to provide a +// higher-level API to `TimeoutAggregator` for managing and interacting with the +// rank-specific `TimeoutCollector` instances. Implementations are concurrency +// safe. +type TimeoutCollectors[VoteT models.Unique] interface { + // GetOrCreateCollector retrieves the TimeoutCollector for the specified + // rank or creates one if none exists. When creating a timeout collector, + // the rank is used to query the consensus committee for the respective + // Epoch the rank belongs to. + // It returns: + // - (collector, true, nil) if no collector can be found by the rank, and a + // new collector was created. + // - (collector, false, nil) if the collector can be found by the rank. + // - (nil, false, error) if running into any exception creating the timeout + // collector. + // Expected error returns during normal operations: + // * models.BelowPrunedThresholdError if rank is below the pruning threshold + // * models.ErrRankUnknown if rank is not yet pruned but no epoch containing + // the given rank is known + GetOrCreateCollector(rank uint64) ( + collector TimeoutCollector[VoteT], + created bool, + err error, + ) + + // PruneUpToRank prunes the timeout collectors with ranks _below_ the given + // value, i.e. we only retain and process timeout collectors, whose ranks are + // equal or larger than `lowestRetainedRank`. If `lowestRetainedRank` is + // smaller than the previous value, the previous value is kept and the method + // call is a NoOp. + PruneUpToRank(lowestRetainedRank uint64) +} diff --git a/consensus/consensus_tracing.go b/consensus/consensus_tracing.go new file mode 100644 index 0000000..4b2cc15 --- /dev/null +++ b/consensus/consensus_tracing.go @@ -0,0 +1,12 @@ +package consensus + +// TraceLogger defines a simple tracing interface +type TraceLogger interface { + Trace(message string) + Error(message string, err error) +} + +type nilTracer struct{} + +func (nilTracer) Trace(message string) {} +func (nilTracer) Error(message string, err error) {} diff --git a/consensus/consensus_validator.go b/consensus/consensus_validator.go new file mode 100644 index 0000000..7ac8e68 --- /dev/null +++ b/consensus/consensus_validator.go @@ -0,0 +1,32 @@ +package consensus + +import ( + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Validator provides functions to validate QuorumCertificate, proposals and +// votes. +type Validator[StateT models.Unique, VoteT models.Unique] interface { + + // ValidateQuorumCertificate checks the validity of a QuorumCertificate. + // During normal operations, the following error returns are expected: + // * models.InvalidQuorumCertificateError if the QuorumCertificate is invalid + ValidateQuorumCertificate(qc models.QuorumCertificate) error + + // ValidateTimeoutCertificate checks the validity of a TimeoutCertificate. + // During normal operations, the following error returns are expected: + // * models.InvalidTimeoutCertificateError if the TimeoutCertificate is + // invalid + ValidateTimeoutCertificate(tc models.TimeoutCertificate) error + + // ValidateProposal checks the validity of a proposal. + // During normal operations, the following error returns are expected: + // * models.InvalidProposalError if the state is invalid + ValidateProposal(proposal *models.SignedProposal[StateT, VoteT]) error + + // ValidateVote checks the validity of a vote. + // Returns the full entity for the voter. During normal operations, + // the following errors are expected: + // * models.InvalidVoteError for invalid votes + ValidateVote(vote *VoteT) (*models.WeightedIdentity, error) +} diff --git a/consensus/consensus_verifier.go b/consensus/consensus_verifier.go new file mode 100644 index 0000000..3ff3992 --- /dev/null +++ b/consensus/consensus_verifier.go @@ -0,0 +1,45 @@ +package consensus + +import "source.quilibrium.com/quilibrium/monorepo/consensus/models" + +// Verifier is the component responsible for the cryptographic integrity of +// votes, proposals and QC's against the state they are signing. +type Verifier[VoteT models.Unique] interface { + // VerifyVote checks the cryptographic validity of a vote's `SigData` w.r.t. + // the rank and stateID. It is the responsibility of the calling code to + // ensure that `voter` is authorized to vote. + // Return values: + // * nil if `sigData` is cryptographically valid + // * models.InvalidFormatError if the signature has an incompatible format. + // * models.ErrInvalidSignature is the signature is invalid + // * unexpected errors should be treated as symptoms of bugs or uncovered + // edge cases in the logic (i.e. as fatal) + VerifyVote(vote *VoteT) error + + // VerifyQC checks the cryptographic validity of a QC's `SigData` w.r.t. the + // given rank and stateID. It is the responsibility of the calling code to + // ensure that all `signers` are authorized, without duplicates. + // Return values: + // * nil if `sigData` is cryptographically valid + // * models.InvalidFormatError if `sigData` has an incompatible format + // * models.InsufficientSignaturesError if `signers is empty. + // Depending on the order of checks in the higher-level logic this error + // might be an indicator of a external byzantine input or an internal bug. + // * models.ErrInvalidSignature if a signature is invalid + // * unexpected errors should be treated as symptoms of bugs or uncovered + // edge cases in the logic (i.e. as fatal) + VerifyQuorumCertificate(quorumCertificate models.QuorumCertificate) error + + // VerifyTimeoutCertificate checks cryptographic validity of the TC's + // `sigData` w.r.t. the given rank. It is the responsibility of the calling + // code to ensure that all `signers` are authorized, without duplicates. + // Return values: + // * nil if `sigData` is cryptographically valid + // * models.InsufficientSignaturesError if `signers is empty. + // * models.InvalidFormatError if `signers`/`highQCViews` have differing + // lengths + // * models.ErrInvalidSignature if a signature is invalid + // * unexpected errors should be treated as symptoms of bugs or uncovered + // edge cases in the logic (i.e. as fatal) + VerifyTimeoutCertificate(timeoutCertificate models.TimeoutCertificate) error +} diff --git a/consensus/consensus_voting.go b/consensus/consensus_voting.go new file mode 100644 index 0000000..e2cbba1 --- /dev/null +++ b/consensus/consensus_voting.go @@ -0,0 +1,63 @@ +package consensus + +import ( + "context" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VotingProvider handles voting logic by deferring decisions, collection, and +// state finalization to an outside implementation. +type VotingProvider[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +] interface { + // Sends a proposal for voting. + SendProposal(ctx context.Context, proposal *StateT) error + // SignVote signs a proposal, produces an output vote for aggregation and + // broadcasting. + SignVote( + ctx context.Context, + state *models.State[StateT], + ) (*VoteT, error) + // SignVote signs a proposal, produces an output vote for aggregation and + // broadcasting. + SignTimeoutVote( + ctx context.Context, + filter []byte, + currentRank uint64, + newestQuorumCertificateRank uint64, + ) (*VoteT, error) + FinalizeQuorumCertificate( + ctx context.Context, + state *models.State[StateT], + aggregatedSignature models.AggregatedSignature, + ) (models.QuorumCertificate, error) + // Produces a timeout certificate + FinalizeTimeout( + ctx context.Context, + filter []byte, + rank uint64, + latestQuorumCertificateRanks []uint64, + aggregatedSignature models.AggregatedSignature, + ) (models.TimeoutCertificate, error) + // Re-publishes a vote message, used to help lagging peers catch up. + SendVote(ctx context.Context, vote *VoteT) (PeerIDT, error) + // IsQuorum returns a response indicating whether or not quorum has been + // reached. + IsQuorum( + ctx context.Context, + proposalVotes map[models.Identity]*VoteT, + ) (bool, error) + // FinalizeVotes performs any folding of proposed state required from VoteT + // onto StateT, proposed states and votes matched by PeerIDT, returns + // finalized state, chosen proposer PeerIDT. + FinalizeVotes( + ctx context.Context, + proposals map[models.Identity]*StateT, + proposalVotes map[models.Identity]*VoteT, + ) (*StateT, PeerIDT, error) + // SendConfirmation sends confirmation of the finalized state. + SendConfirmation(ctx context.Context, finalized *StateT) error +} diff --git a/consensus/consensus_weight.go b/consensus/consensus_weight.go new file mode 100644 index 0000000..78965c7 --- /dev/null +++ b/consensus/consensus_weight.go @@ -0,0 +1,10 @@ +package consensus + +// WeightProvider defines the methods for handling weighted differentiation of +// voters, such as seniority, or stake. +type WeightProvider interface { + // GetWeightForBitmask returns the total weight of the given bitmask for the + // prover set under the filter. Bitmask is expected to be in ascending ring + // order. + GetWeightForBitmask(filter []byte, bitmask []byte) uint64 +} diff --git a/consensus/event_handler/event_handler.go b/consensus/event_handler/event_handler.go new file mode 100644 index 0000000..213bdb7 --- /dev/null +++ b/consensus/event_handler/event_handler.go @@ -0,0 +1,688 @@ +package eventhandler + +import ( + "context" + "errors" + "fmt" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// EventHandler is the main handler for individual events that trigger state +// transition. It exposes API to handle one event at a time synchronously. +// EventHandler is *not concurrency safe*. Please use the EventLoop to ensure +// that only a single go-routine executes the EventHandler's algorithms. +// EventHandler is implemented in event-driven way, it reacts to incoming events +// and performs certain actions. It doesn't perform any actions on its own. +// There are 3 main responsibilities of EventHandler, vote, propose, timeout. +// There are specific scenarios that lead to each of those actions. +// - create vote: voting logic is triggered by OnReceiveProposal, after +// receiving proposal we have all required information to create a valid +// vote. Compliance engine makes sure that we receive proposals, whose +// parents are known. Creating a vote can be triggered ONLY by receiving +// proposal. +// - create timeout: creating models.TimeoutState is triggered by +// OnLocalTimeout, after reaching deadline for current round. EventHandler +// gets notified about it and has to create a models.TimeoutState and +// broadcast it to other replicas. Creating a TO can be triggered by +// reaching round deadline or triggered as part of Bracha broadcast when +// superminority of replicas have contributed to TC creation and created a +// partial TC. +// - create a proposal: proposing logic is more complicated. Creating a +// proposal is triggered by the EventHandler receiving a QC or TC that +// induces a rank change to a rank where the replica is primary. As an edge +// case, the EventHandler can receive a QC or TC that triggers the rank +// change, but we can't create a proposal in case we are missing parent +// state the newest QC refers to. In case we already have the QC, but are +// still missing the respective parent, OnReceiveProposal can trigger the +// proposing logic as well, but only when receiving proposal for rank lower +// than active rank. To summarize, to make a valid proposal for rank N we +// need to have a QC or TC for N-1 and know the proposal with stateID +// NewestQC.Identifier. +// +// Not concurrency safe. +type EventHandler[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, + CollectedT models.Unique, +] struct { + tracer consensus.TraceLogger + paceMaker consensus.Pacemaker + stateProducer consensus.StateProducer[StateT, VoteT] + forks consensus.Forks[StateT] + store consensus.ConsensusStore[VoteT] + committee consensus.Replicas + safetyRules consensus.SafetyRules[StateT, VoteT] + notifier consensus.Consumer[StateT, VoteT] +} + +var _ consensus.EventHandler[*nilUnique, *nilUnique] = (*EventHandler[ + *nilUnique, *nilUnique, *nilUnique, *nilUnique, +])(nil) + +// NewEventHandler creates an EventHandler instance with initial components. +func NewEventHandler[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, + CollectedT models.Unique, +]( + paceMaker consensus.Pacemaker, + stateProducer consensus.StateProducer[StateT, VoteT], + forks consensus.Forks[StateT], + store consensus.ConsensusStore[VoteT], + committee consensus.Replicas, + safetyRules consensus.SafetyRules[StateT, VoteT], + notifier consensus.Consumer[StateT, VoteT], + tracer consensus.TraceLogger, +) (*EventHandler[StateT, VoteT, PeerIDT, CollectedT], error) { + e := &EventHandler[StateT, VoteT, PeerIDT, CollectedT]{ + paceMaker: paceMaker, + stateProducer: stateProducer, + forks: forks, + store: store, + safetyRules: safetyRules, + committee: committee, + notifier: notifier, + tracer: tracer, + } + return e, nil +} + +// OnReceiveQuorumCertificate processes a valid qc constructed by internal vote +// aggregator or discovered in TimeoutState. All inputs should be validated +// before feeding into this function. Assuming trusted data. No errors are +// expected during normal operation. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) OnReceiveQuorumCertificate(qc models.QuorumCertificate) error { + curRank := e.paceMaker.CurrentRank() + + e.tracer.Trace("received QC") + e.notifier.OnReceiveQuorumCertificate(curRank, qc) + defer e.notifier.OnEventProcessed() + + newRankEvent, err := e.paceMaker.ReceiveQuorumCertificate(qc) + if err != nil { + return fmt.Errorf("could not process QC: %w", err) + } + if newRankEvent == nil { + e.tracer.Trace("QC didn't trigger rank change, nothing to do") + return nil + } + + // current rank has changed, go to new rank + e.tracer.Trace("QC triggered rank change, starting new rank now") + return e.proposeForNewRankIfPrimary() +} + +// OnReceiveTimeoutCertificate processes a valid tc constructed by internal timeout aggregator, +// discovered in TimeoutState or broadcast over the network. +// All inputs should be validated before feeding into this function. Assuming +// trusted data. No errors are expected during normal operation. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) OnReceiveTimeoutCertificate(tc models.TimeoutCertificate) error { + curRank := e.paceMaker.CurrentRank() + e.tracer.Trace("received TC") + e.notifier.OnReceiveTimeoutCertificate(curRank, tc) + defer e.notifier.OnEventProcessed() + + newRankEvent, err := e.paceMaker.ReceiveTimeoutCertificate(tc) + if err != nil { + return fmt.Errorf("could not process TC for rank %d: %w", tc.GetRank(), err) + } + if newRankEvent == nil { + e.tracer.Trace("TC didn't trigger rank change, nothing to do") + return nil + } + + // current rank has changed, go to new rank + e.tracer.Trace("TC triggered rank change, starting new rank now") + return e.proposeForNewRankIfPrimary() +} + +// OnReceiveProposal processes a state proposal received from another HotStuff +// consensus participant. +// All inputs should be validated before feeding into this function. Assuming +// trusted data. No errors are expected during normal operation. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) OnReceiveProposal(proposal *models.SignedProposal[StateT, VoteT]) error { + state := proposal.State + curRank := e.paceMaker.CurrentRank() + e.tracer.Trace("proposal received from compliance engine") + e.notifier.OnReceiveProposal(curRank, proposal) + defer e.notifier.OnEventProcessed() + + // ignore stale proposals + if (*state).Rank < e.forks.FinalizedRank() { + e.tracer.Trace("stale proposal") + return nil + } + + // store the state. + err := e.forks.AddValidatedState(proposal.State) + if err != nil { + return fmt.Errorf( + "cannot add proposal to forks (%x): %w", + state.Identifier, + err, + ) + } + + _, err = e.paceMaker.ReceiveQuorumCertificate( + proposal.State.ParentQuorumCertificate, + ) + if err != nil { + return fmt.Errorf( + "could not process QC for state %x: %w", + state.Identifier, + err, + ) + } + + _, err = e.paceMaker.ReceiveTimeoutCertificate( + proposal.PreviousRankTimeoutCertificate, + ) + if err != nil { + return fmt.Errorf( + "could not process TC for state %x: %w", + state.Identifier, + err, + ) + } + + // if the state is for the current rank, then try voting for this state + err = e.processStateForCurrentRank(proposal) + if err != nil { + return fmt.Errorf("failed processing current state: %w", err) + } + e.tracer.Trace("proposal processed from compliance engine") + + // nothing to do if this proposal is for current rank + if proposal.State.Rank == e.paceMaker.CurrentRank() { + return nil + } + + return e.proposeForNewRankIfPrimary() +} + +// TimeoutChannel returns the channel for subscribing the waiting timeout on +// receiving state or votes for the current rank. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) TimeoutChannel() <-chan time.Time { + return e.paceMaker.TimeoutCh() +} + +// OnLocalTimeout handles a local timeout event by creating a +// models.TimeoutState and broadcasting it. No errors are expected during normal +// operation. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) OnLocalTimeout() error { + curRank := e.paceMaker.CurrentRank() + e.tracer.Trace("timeout received from event loop") + e.notifier.OnLocalTimeout(curRank) + defer e.notifier.OnEventProcessed() + + err := e.broadcastTimeoutStateIfAuthorized() + if err != nil { + return fmt.Errorf( + "unexpected exception while processing timeout in rank %d: %w", + curRank, + err, + ) + } + return nil +} + +// OnPartialTcCreated handles notification produces by the internal timeout +// aggregator. If the notification is for the current rank, a corresponding +// models.TimeoutState is broadcast to the consensus committee. No errors are +// expected during normal operation. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) OnPartialTimeoutCertificateCreated( + partialTC *consensus.PartialTimeoutCertificateCreated, +) error { + curRank := e.paceMaker.CurrentRank() + previousRankTimeoutCert := partialTC.PriorRankTimeoutCertificate + e.tracer.Trace("constructed partial TC") + + e.notifier.OnPartialTimeoutCertificate(curRank, partialTC) + defer e.notifier.OnEventProcessed() + + // process QC, this might trigger rank change + _, err := e.paceMaker.ReceiveQuorumCertificate( + partialTC.NewestQuorumCertificate, + ) + if err != nil { + return fmt.Errorf("could not process newest QC: %w", err) + } + + // process TC, this might trigger rank change + _, err = e.paceMaker.ReceiveTimeoutCertificate(previousRankTimeoutCert) + if err != nil { + return fmt.Errorf( + "could not process TC for rank %d: %w", + previousRankTimeoutCert.GetRank(), + err, + ) + } + + // NOTE: in other cases when we have observed a rank change we will trigger + // proposing logic, this is desired logic for handling proposal, QC and TC. + // However, observing a partial TC means that superminority have timed out and + // there was at least one honest replica in that set. Honest replicas will + // never vote after timing out for current rank meaning we won't be able to + // collect supermajority of votes for a proposal made after observing partial + // TC. + + // by definition, we are allowed to produce timeout state if we have received + // partial TC for current rank + if e.paceMaker.CurrentRank() != partialTC.Rank { + return nil + } + + e.tracer.Trace("partial TC generated for current rank, broadcasting timeout") + err = e.broadcastTimeoutStateIfAuthorized() + if err != nil { + return fmt.Errorf( + "unexpected exception while processing partial TC in rank %d: %w", + partialTC.Rank, + err, + ) + } + return nil +} + +// Start starts the event handler. No errors are expected during normal +// operation. CAUTION: EventHandler is not concurrency safe. The Start method +// must be executed by the same goroutine that also calls the other business +// logic methods, or concurrency safety has to be implemented externally. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) Start(ctx context.Context) error { + e.notifier.OnStart(e.paceMaker.CurrentRank()) + defer e.notifier.OnEventProcessed() + e.paceMaker.Start(ctx) + err := e.proposeForNewRankIfPrimary() + if err != nil { + return fmt.Errorf("could not start new rank: %w", err) + } + return nil +} + +// broadcastTimeoutStateIfAuthorized attempts to generate a +// models.TimeoutState, adds it to `timeoutAggregator` and broadcasts it to the +// consensus commettee. We check, whether this node, at the current rank, is +// part of the consensus committee. Otherwise, this method is functionally a +// no-op. For example, right after an epoch switchover a consensus node might +// still be online but not part of the _active_ consensus committee anymore. +// Consequently, it should not broadcast timeouts anymore. No errors are +// expected during normal operation. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) broadcastTimeoutStateIfAuthorized() error { + curRank := e.paceMaker.CurrentRank() + newestQC := e.paceMaker.LatestQuorumCertificate() + previousRankTimeoutCert := e.paceMaker.PriorRankTimeoutCertificate() + + if newestQC.GetRank()+1 == curRank { + // in case last rank has ended with QC and TC, make sure that only QC is + // included otherwise such timeout is invalid. This case is possible if TC + // has included QC with the same rank as the TC itself, meaning that + // newestQC.Rank == previousRankTimeoutCert.Rank + previousRankTimeoutCert = nil + } + + timeout, err := e.safetyRules.ProduceTimeout( + curRank, + newestQC, + previousRankTimeoutCert, + ) + if err != nil { + if models.IsNoTimeoutError(err) { + e.tracer.Error( + "not generating timeout as this node is not part of the active committee", + err, + ) + return nil + } + return fmt.Errorf("could not produce timeout: %w", err) + } + + // raise a notification to broadcast timeout + e.notifier.OnOwnTimeout(timeout) + e.tracer.Trace("broadcast TimeoutState done") + + return nil +} + +// proposeForNewRankIfPrimary will only be called when we may be able to propose +// a state, after processing a new event. +// - after entering a new rank as a result of processing a QC or TC, then we +// may propose for the newly entered rank +// - after receiving a proposal (but not changing rank), if that proposal is +// referenced by our highest known QC, and the proposal was previously +// unknown, then we can propose a state in the current rank +// +// Enforced INVARIANTS: +// - There will at most be `OnOwnProposal` notification emitted for ranks +// where this node is the leader, and none if another node is the leader. +// This holds irrespective of restarts. Formally, this prevents proposal +// equivocation. +// +// It reads the current rank, and generates a proposal if we are the leader. +// No errors are expected during normal operation. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) proposeForNewRankIfPrimary() error { + start := time.Now() // track the start time + curRank := e.paceMaker.CurrentRank() + currentLeader, err := e.committee.LeaderForRank(curRank) + if err != nil { + return fmt.Errorf( + "failed to determine primary for new rank %d: %w", + curRank, + err, + ) + } + finalizedRank := e.forks.FinalizedRank() + + e.notifier.OnCurrentRankDetails(curRank, finalizedRank, currentLeader) + + // check that I am the primary for this rank + if e.committee.Self() != currentLeader { + return nil + } + + // attempt to generate proposal: + newestQC := e.paceMaker.LatestQuorumCertificate() + previousRankTimeoutCert := e.paceMaker.PriorRankTimeoutCertificate() + + _, found := e.forks.GetState(newestQC.GetSelector()) + if !found { + // we don't know anything about state referenced by our newest QC, in this + // case we can't create a valid proposal since we can't guarantee validity + // of state payload. + e.tracer.Trace("haven't synced the latest state yet; can't propose") + return nil + } + e.tracer.Trace("generating proposal as leader") + + // Sanity checks to make sure that resulting proposal is valid: + // In its proposal, the leader for rank N needs to present evidence that it + // has legitimately entered rank N. As evidence, we include a QC or TC for + // rank N-1, which should always be available as the PaceMaker advances to + // rank N only after observing a QC or TC from rank N-1. Moreover, QC and TC + // are always processed together. As EventHandler is strictly single-threaded + // without reentrancy, we must have a QC or TC for the prior rank (curRank-1). + // Failing one of these sanity checks is a symptom of state corruption or a + // severe implementation bug. + if newestQC.GetRank()+1 != curRank { + if previousRankTimeoutCert == nil { + return fmt.Errorf("possible state corruption, expected previousRankTimeoutCert to be not nil") + } + if previousRankTimeoutCert.GetRank()+1 != curRank { + return fmt.Errorf( + "possible state corruption, don't have QC(rank=%d) and TC(rank=%d) for previous rank(currentRank=%d)", + newestQC.GetRank(), + previousRankTimeoutCert.GetRank(), + curRank, + ) + } + } else { + // In case last rank has ended with QC and TC, make sure that only QC is + // included, otherwise such proposal is invalid. This case is possible if TC + // has included QC with the same rank as the TC itself, meaning that + // newestQC.Rank == previousRankTimeoutCert.Rank + previousRankTimeoutCert = nil + } + + // Construct Own SignedProposal + // CAUTION, design constraints: + // (i) We cannot process our own proposal within the `EventHandler` right + // away. + // (ii) We cannot add our own proposal to Forks here right away. + // (iii) Metrics for the PaceMaker/CruiseControl assume that the EventHandler + // is the only caller of `TargetPublicationTime`. Technically, + // `TargetPublicationTime` records the publication delay relative to + // its _latest_ call. + // + // To satisfy all constraints, we construct the proposal here and query + // (once!) its `TargetPublicationTime`. Though, we do _not_ process our own + // states right away and instead ingest them into the EventHandler the same + // way as proposals from other consensus participants. Specifically, on the + // path through the HotStuff state machine leading to state construction, the + // node's own proposal is largely ephemeral. The proposal is handed to the + // `MessageHub` (via the `OnOwnProposal` notification including the + // `TargetPublicationTime`). The `MessageHub` waits until + // `TargetPublicationTime` and only then broadcast the proposal and puts it + // into the EventLoop's queue for inbound states. This is exactly the same way + // as proposals from other nodes are ingested by the `EventHandler`, except + // that we are skipping the ComplianceEngine (assuming that our own proposals + // are protocol-compliant). + // + // Context: + // • On constraint (i): We want to support consensus committees only + // consisting of a *single* node. If the EventHandler internally processed + // the state right away via a direct message call, the call-stack would be + // ever-growing and the node would crash eventually (we experienced this + // with a very early HotStuff implementation). Specifically, if we wanted + // to process the state directly without taking a detour through the + // EventLoop's inbound queue, we would call `OnReceiveProposal` here. The + // function `OnReceiveProposal` would then end up calling + // `proposeForNewRankIfPrimary` (this function) to generate the next + // proposal, which again would result in calling `OnReceiveProposal` and so + // on so forth until the call stack or memory limit is reached and the node + // crashes. This is only a problem for consensus committees of size 1. + // • On constraint (ii): When adding a proposal to Forks, Forks emits a + // `StateIncorporatedEvent` notification, which is observed by Cruise + // Control and would change its state. However, note that Cruise Control + // is trying to estimate the point in time when _other_ nodes are observing + // the proposal. The time when we broadcast the proposal (i.e. + // `TargetPublicationTime`) is a reasonably good estimator, but *not* the + // time the proposer constructed the state (because there is potentially + // still a significant wait until `TargetPublicationTime`). + // + // The current approach is for a node to process its own proposals at the same + // time and through the same code path as proposals from other nodes. This + // satisfies constraints (i) and (ii) and generates very strong consistency, + // from a software design perspective. + // Just hypothetically, if we changed Cruise Control to be notified about + // own state proposals _only_ when they are broadcast (satisfying constraint + // (ii) without relying on the EventHandler), then we could add a proposal to + // Forks here right away. Nevertheless, the restriction remains that we cannot + // process that proposal right away within the EventHandler and instead need + // to put it into the EventLoop's inbound queue to support consensus + // committees of size 1. + stateProposal, err := e.stateProducer.MakeStateProposal( + curRank, + newestQC, + previousRankTimeoutCert, + ) + if err != nil { + if models.IsNoVoteError(err) { + e.tracer.Error( + "aborting state proposal to prevent equivocation (likely re-entered proposal logic due to crash)", + err, + ) + return nil + } + return fmt.Errorf( + "can not make state proposal for curRank %v: %w", + curRank, + err, + ) + } + targetPublicationTime := e.paceMaker.TargetPublicationTime( + stateProposal.State.Rank, + start, + stateProposal.State.ParentQuorumCertificate.GetSelector(), + ) // determine target publication time + e.tracer.Trace("forwarding proposal to communicator for broadcasting") + + // emit notification with own proposal (also triggers broadcast) + e.notifier.OnOwnProposal(stateProposal, targetPublicationTime) + return nil +} + +// processStateForCurrentRank processes the state for the current rank. +// It is called AFTER the state has been stored or found in Forks +// It checks whether to vote for this state. +// No errors are expected during normal operation. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) processStateForCurrentRank( + proposal *models.SignedProposal[StateT, VoteT], +) error { + // sanity check that state is really for the current rank: + curRank := e.paceMaker.CurrentRank() + state := proposal.State + if state.Rank != curRank { + // ignore outdated proposals in case we have moved forward + return nil + } + // leader (node ID) for next rank + nextLeader, err := e.committee.LeaderForRank(curRank + 1) + if errors.Is(err, models.ErrRankUnknown) { + // We are attempting process a state in an unknown rank + // This should never happen, because: + // * the compliance layer ensures proposals are passed to the event loop + // strictly after their parent + // * the protocol state ensures that, before incorporating the first state + // of an rank R, either R is known or we have triggered fallback mode - in + // either case the current rank is known + return fmt.Errorf("attempting to process a state for unknown rank") + } + if err != nil { + return fmt.Errorf( + "failed to determine primary for next rank %d: %w", + curRank+1, + err, + ) + } + + // safetyRules performs all the checks to decide whether to vote for this + // state or not. + err = e.ownVote(proposal, curRank, nextLeader) + if err != nil { + return fmt.Errorf("unexpected error in voting logic: %w", err) + } + + return nil +} + +// ownVote generates and forwards the own vote, if we decide to vote. +// Any errors are potential symptoms of uncovered edge cases or corrupted +// internal state (fatal). No errors are expected during normal operation. +func (e *EventHandler[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) ownVote( + proposal *models.SignedProposal[StateT, VoteT], + curRank uint64, + nextLeader models.Identity, +) error { + _, found := e.forks.GetState( + proposal.State.ParentQuorumCertificate.GetSelector(), + ) + if !found { + // we don't have parent for this proposal, we can't vote since we can't + // guarantee validity of proposals payload. Strictly speaking this shouldn't + // ever happen because compliance engine makes sure that we receive + // proposals with valid parents. + return fmt.Errorf( + "won't vote for proposal, no parent state for this proposal", + ) + } + + // safetyRules performs all the checks to decide whether to vote for this + // state or not. + ownVote, err := e.safetyRules.ProduceVote(proposal, curRank) + if err != nil { + if !models.IsNoVoteError(err) { + // unknown error, exit the event loop + return fmt.Errorf("could not produce vote: %w", err) + } + e.tracer.Trace("should not vote for this state") + return nil + } + + e.tracer.Trace("forwarding vote to compliance engine") + e.notifier.OnOwnVote(ownVote, nextLeader) + return nil +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/event_loop/event_loop.go b/consensus/event_loop/event_loop.go new file mode 100644 index 0000000..8beafb9 --- /dev/null +++ b/consensus/event_loop/event_loop.go @@ -0,0 +1,346 @@ +package eventloop + +import ( + "context" + "fmt" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/tracker" +) + +// queuedProposal is a helper structure that is used to transmit proposal in +// channel it contains an attached insertionTime that is used to measure how +// long we have waited between queening proposal and actually processing by +// `EventHandler`. +type queuedProposal[StateT models.Unique, VoteT models.Unique] struct { + proposal *models.SignedProposal[StateT, VoteT] + insertionTime time.Time +} + +// EventLoop buffers all incoming events to the hotstuff EventHandler, and feeds +// EventHandler one event at a time. +type EventLoop[StateT models.Unique, VoteT models.Unique] struct { + ctx context.Context + eventHandler consensus.EventHandler[StateT, VoteT] + proposals chan queuedProposal[StateT, VoteT] + newestSubmittedTc *tracker.NewestTCTracker + newestSubmittedQc *tracker.NewestQCTracker + newestSubmittedPartialTc *tracker.NewestPartialTcTracker + tcSubmittedNotifier chan struct{} + qcSubmittedNotifier chan struct{} + partialTcCreatedNotifier chan struct{} + startTime time.Time + tracer consensus.TraceLogger +} + +var _ consensus.EventLoop[*nilUnique, *nilUnique] = (*EventLoop[*nilUnique, *nilUnique])(nil) + +// NewEventLoop creates an instance of EventLoop. +func NewEventLoop[StateT models.Unique, VoteT models.Unique]( + tracer consensus.TraceLogger, + eventHandler consensus.EventHandler[StateT, VoteT], + startTime time.Time, +) (*EventLoop[StateT, VoteT], error) { + // we will use a buffered channel to avoid blocking of caller + // we can't afford to drop messages since it undermines liveness, but we also + // want to avoid blocking of compliance engine. We assume that we should be + // able to process proposals faster than compliance engine feeds them, worst + // case we will fill the buffer and state compliance engine worker but that + // should happen only if compliance engine receives large number of states in + // short period of time(when catching up for instance). + proposals := make(chan queuedProposal[StateT, VoteT], 1000) + + el := &EventLoop[StateT, VoteT]{ + tracer: tracer, + eventHandler: eventHandler, + proposals: proposals, + tcSubmittedNotifier: make(chan struct{}, 1), + qcSubmittedNotifier: make(chan struct{}, 1), + partialTcCreatedNotifier: make(chan struct{}, 1), + newestSubmittedTc: tracker.NewNewestTCTracker(), + newestSubmittedQc: tracker.NewNewestQCTracker(), + newestSubmittedPartialTc: tracker.NewNewestPartialTcTracker(), + startTime: startTime, + } + + return el, nil +} + +func (el *EventLoop[StateT, VoteT]) Start(ctx context.Context) error { + el.ctx = ctx + + select { + case <-ctx.Done(): + return nil + case <-time.After(time.Until(el.startTime)): + el.tracer.Trace("starting event loop") + err := el.loop(ctx) + if err != nil { + el.tracer.Error("irrecoverable event loop error", err) + return err + } + } + return nil +} + +// loop executes the core HotStuff logic in a single thread. It picks inputs +// from the various inbound channels and executes the EventHandler's respective +// method for processing this input. During normal operations, the EventHandler +// is not expected to return any errors, as all inputs are assumed to be fully +// validated (or produced by trusted components within the node). Therefore, +// any error is a symptom of state corruption, bugs or violation of API +// contracts. In all cases, continuing operations is not an option, i.e. we exit +// the event loop and return an exception. +func (el *EventLoop[StateT, VoteT]) loop(ctx context.Context) error { + err := el.eventHandler.Start(ctx) + if err != nil { + return fmt.Errorf("could not start event handler: %w", err) + } + + shutdownSignaled := ctx.Done() + timeoutCertificates := el.tcSubmittedNotifier + quorumCertificates := el.qcSubmittedNotifier + partialTCs := el.partialTcCreatedNotifier + + for { + // Giving timeout events the priority to be processed first. + // This is to prevent attacks from malicious nodes that attempt + // to block honest nodes' pacemaker from progressing by sending + // other events. + timeoutChannel := el.eventHandler.TimeoutChannel() + + // the first select makes sure we process timeouts with priority + select { + + // if we receive the shutdown signal, exit the loop + case <-shutdownSignaled: + return nil + + // processing timeout or partial TC event are top priority since + // they allow node to contribute to TC aggregation when replicas can't + // make progress on happy path + case <-timeoutChannel: + err = el.eventHandler.OnLocalTimeout() + if err != nil { + return fmt.Errorf("could not process timeout: %w", err) + } + + // At this point, we have received and processed an event from the timeout + // channel. A timeout also means that we have made progress. A new timeout + // will have been started and el.eventHandler.TimeoutChannel() will be a + // NEW channel (for the just-started timeout). Very important to start the + // for loop from the beginning, to continue the with the new timeout + // channel! + continue + + case <-partialTCs: + err = el.eventHandler.OnPartialTimeoutCertificateCreated( + el.newestSubmittedPartialTc.NewestPartialTc(), + ) + if err != nil { + return fmt.Errorf("could not process partial created TC event: %w", err) + } + + // At this point, we have received and processed partial TC event, it + // could have resulted in several scenarios: + // 1. a rank change with potential voting or proposal creation + // 2. a created and broadcast timeout state + // 3. QC and TC didn't result in rank change and no timeout was created + // since we have already timed out or the partial TC was created for rank + // different from current one. + continue + + default: + // fall through to non-priority events + } + + // select for state headers/QCs here + select { + + // same as before + case <-shutdownSignaled: + return nil + + // same as before + case <-timeoutChannel: + err = el.eventHandler.OnLocalTimeout() + if err != nil { + return fmt.Errorf("could not process timeout: %w", err) + } + + // if we have a new proposal, process it + case queuedItem := <-el.proposals: + proposal := queuedItem.proposal + err = el.eventHandler.OnReceiveProposal(proposal) + if err != nil { + return fmt.Errorf( + "could not process proposal %v: %w", + proposal.State.Identifier, + err, + ) + } + + el.tracer.Trace("state proposal has been processed successfully") + + // if we have a new QC, process it + case <-quorumCertificates: + err = el.eventHandler.OnReceiveQuorumCertificate( + *el.newestSubmittedQc.NewestQC(), + ) + if err != nil { + return fmt.Errorf("could not process QC: %w", err) + } + + // if we have a new TC, process it + case <-timeoutCertificates: + err = el.eventHandler.OnReceiveTimeoutCertificate( + *el.newestSubmittedTc.NewestTC(), + ) + if err != nil { + return fmt.Errorf("could not process TC: %w", err) + } + + case <-partialTCs: + err = el.eventHandler.OnPartialTimeoutCertificateCreated( + el.newestSubmittedPartialTc.NewestPartialTc(), + ) + if err != nil { + return fmt.Errorf("could no process partial created TC event: %w", err) + } + } + } +} + +// SubmitProposal pushes the received state to the proposals channel +func (el *EventLoop[StateT, VoteT]) SubmitProposal( + proposal *models.SignedProposal[StateT, VoteT], +) { + queueItem := queuedProposal[StateT, VoteT]{ + proposal: proposal, + insertionTime: time.Now(), + } + select { + case el.proposals <- queueItem: + case <-el.ctx.Done(): + return + } +} + +// onTrustedQC pushes the received QC (which MUST be validated) to the +// quorumCertificates channel +func (el *EventLoop[StateT, VoteT]) onTrustedQC(qc *models.QuorumCertificate) { + if el.newestSubmittedQc.Track(qc) { + el.qcSubmittedNotifier <- struct{}{} + } +} + +// onTrustedTC pushes the received TC (which MUST be validated) to the +// timeoutCertificates channel +func (el *EventLoop[StateT, VoteT]) onTrustedTC(tc *models.TimeoutCertificate) { + if el.newestSubmittedTc.Track(tc) { + el.tcSubmittedNotifier <- struct{}{} + } else { + qc := (*tc).GetLatestQuorumCert() + if el.newestSubmittedQc.Track(&qc) { + el.qcSubmittedNotifier <- struct{}{} + } + } +} + +// OnTimeoutCertificateConstructedFromTimeouts pushes the received TC to the +// timeoutCertificates channel +func (el *EventLoop[StateT, VoteT]) OnTimeoutCertificateConstructedFromTimeouts( + tc models.TimeoutCertificate, +) { + el.onTrustedTC(&tc) +} + +// OnPartialTimeoutCertificateCreated created a consensus.PartialTcCreated +// payload and pushes it into partialTcCreated buffered channel for further +// processing by EventHandler. Since we use buffered channel this function can +// block if buffer is full. +func (el *EventLoop[StateT, VoteT]) OnPartialTimeoutCertificateCreated( + rank uint64, + newestQC models.QuorumCertificate, + previousRankTimeoutCert models.TimeoutCertificate, +) { + event := &consensus.PartialTimeoutCertificateCreated{ + Rank: rank, + NewestQuorumCertificate: newestQC, + PriorRankTimeoutCertificate: previousRankTimeoutCert, + } + if el.newestSubmittedPartialTc.Track(event) { + el.partialTcCreatedNotifier <- struct{}{} + } +} + +// OnNewQuorumCertificateDiscovered pushes already validated QCs that were +// submitted from TimeoutAggregator to the event handler +func (el *EventLoop[StateT, VoteT]) OnNewQuorumCertificateDiscovered( + qc models.QuorumCertificate, +) { + el.onTrustedQC(&qc) +} + +// OnNewTimeoutCertificateDiscovered pushes already validated TCs that were +// submitted from TimeoutAggregator to the event handler +func (el *EventLoop[StateT, VoteT]) OnNewTimeoutCertificateDiscovered( + tc models.TimeoutCertificate, +) { + el.onTrustedTC(&tc) +} + +// OnQuorumCertificateConstructedFromVotes implements +// consensus.VoteCollectorConsumer and pushes received qc into processing +// pipeline. +func (el *EventLoop[StateT, VoteT]) OnQuorumCertificateConstructedFromVotes( + qc models.QuorumCertificate, +) { + el.onTrustedQC(&qc) +} + +// OnTimeoutProcessed implements consensus.TimeoutCollectorConsumer and is no-op +func (el *EventLoop[StateT, VoteT]) OnTimeoutProcessed( + timeout *models.TimeoutState[VoteT], +) { +} + +// OnVoteProcessed implements consensus.VoteCollectorConsumer and is no-op +func (el *EventLoop[StateT, VoteT]) OnVoteProcessed(vote *VoteT) {} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/forest/leveled_forest.go b/consensus/forest/leveled_forest.go new file mode 100644 index 0000000..05c726a --- /dev/null +++ b/consensus/forest/leveled_forest.go @@ -0,0 +1,394 @@ +package forest + +import ( + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// LevelledForest contains multiple trees (which is a potentially disconnected +// planar graph). Each vertex in the graph has a level and a hash. A vertex can +// only have one parent, which must have strictly smaller level. A vertex can +// have multiple children, all with strictly larger level. +// A LevelledForest provides the ability to prune all vertices up to a specific +// level. A tree whose root is below the pruning threshold might decompose into +// multiple disconnected subtrees as a result of pruning. +// By design, the LevelledForest does _not_ touch the parent information for +// vertices that are on the lowest retained level. Thereby, it is possible to +// initialize the LevelledForest with a root vertex at the lowest retained +// level, without this root needing to have a parent. Furthermore, the root +// vertex can be at level 0 and in absence of a parent still satisfy the +// condition that any parent must be of lower level (mathematical principle of +// acuous truth) without the implementation needing to worry about unsigned +// integer underflow. +// +// LevelledForest is NOT safe for concurrent use by multiple goroutines. +type LevelledForest struct { + vertices VertexSet + verticesAtLevel map[uint64]VertexList + size uint64 + LowestLevel uint64 +} + +type VertexList []*vertexContainer +type VertexSet map[models.Identity]*vertexContainer + +// vertexContainer holds information about a tree vertex. Internally, we +// distinguish between +// - FULL container: has non-nil value for vertex. +// Used for vertices, which have been added to the tree. +// - EMPTY container: has NIL value for vertex. +// Used for vertices, which have NOT been added to the tree, but are +// referenced by vertices in the tree. An empty container is converted to a +// full container when the respective vertex is added to the tree +type vertexContainer struct { + id models.Identity + level uint64 + children VertexList + + // the following are only set if the state is actually known + vertex Vertex +} + +// NewLevelledForest initializes a LevelledForest +func NewLevelledForest(lowestLevel uint64) *LevelledForest { + return &LevelledForest{ + vertices: make(VertexSet), + verticesAtLevel: make(map[uint64]VertexList), + LowestLevel: lowestLevel, + } +} + +// PruneUpToLevel prunes all states UP TO but NOT INCLUDING `level`. +func (f *LevelledForest) PruneUpToLevel(level uint64) error { + if level < f.LowestLevel { + return fmt.Errorf( + "new lowest level %d cannot be smaller than previous last retained level %d", + level, + f.LowestLevel, + ) + } + if len(f.vertices) == 0 { + f.LowestLevel = level + return nil + } + + elementsPruned := 0 + + // to optimize the pruning large level-ranges, we compare: + // * the number of levels for which we have stored vertex containers: + // len(f.verticesAtLevel) + // * the number of levels that need to be pruned: level-f.LowestLevel + // We iterate over the dimension which is smaller. + if uint64(len(f.verticesAtLevel)) < level-f.LowestLevel { + for l, vertices := range f.verticesAtLevel { + if l < level { + for _, v := range vertices { + if !f.isEmptyContainer(v) { + elementsPruned++ + } + delete(f.vertices, v.id) + } + delete(f.verticesAtLevel, l) + } + } + } else { + for l := f.LowestLevel; l < level; l++ { + verticesAtLevel := f.verticesAtLevel[l] + for _, v := range verticesAtLevel { + if !f.isEmptyContainer(v) { + elementsPruned++ + } + delete(f.vertices, v.id) + } + delete(f.verticesAtLevel, l) + + } + } + f.LowestLevel = level + f.size -= uint64(elementsPruned) + return nil +} + +// HasVertex returns true iff full vertex exists. +func (f *LevelledForest) HasVertex(id models.Identity) bool { + container, exists := f.vertices[id] + return exists && !f.isEmptyContainer(container) +} + +// isEmptyContainer returns true iff vertexContainer container is empty, i.e. +// full vertex itself has not been added +func (f *LevelledForest) isEmptyContainer( + vertexContainer *vertexContainer, +) bool { + return vertexContainer.vertex == nil +} + +// GetVertex returns (, true) if the vertex with `id` and `level` +// was found (nil, false) if full vertex is unknown +func (f *LevelledForest) GetVertex(id models.Identity) (Vertex, bool) { + container, exists := f.vertices[id] + if !exists || f.isEmptyContainer(container) { + return nil, false + } + return container.vertex, true +} + +// GetSize returns the total number of vertices above the lowest pruned level. +// Note this call is not concurrent-safe, caller is responsible to ensure +// concurrency safety. +func (f *LevelledForest) GetSize() uint64 { + return f.size +} + +// GetChildren returns a VertexIterator to iterate over the children +// An empty VertexIterator is returned, if no vertices are known whose parent is +// `id`. +func (f *LevelledForest) GetChildren(id models.Identity) VertexIterator { + // if vertex does not exist, container will be nil + if container, ok := f.vertices[id]; ok { + return newVertexIterator(container.children) + } + return newVertexIterator(nil) // VertexIterator gracefully handles nil slices +} + +// GetNumberOfChildren returns number of children of given vertex +func (f *LevelledForest) GetNumberOfChildren(id models.Identity) int { + // if vertex does not exist, container is the default zero value for + // vertexContainer, which contains a nil-slice for its children + container := f.vertices[id] + num := 0 + for _, child := range container.children { + if child.vertex != nil { + num++ + } + } + return num +} + +// GetVerticesAtLevel returns a VertexIterator to iterate over the Vertices at +// the specified level. An empty VertexIterator is returned, if no vertices are +// known at the specified level. If `level` is already pruned, an empty +// VertexIterator is returned. +func (f *LevelledForest) GetVerticesAtLevel(level uint64) VertexIterator { + return newVertexIterator(f.verticesAtLevel[level]) +} + +// GetNumberOfVerticesAtLevel returns the number of full vertices at given +// level. A full vertex is a vertex that was explicitly added to the forest. In +// contrast, an empty vertex container represents a vertex that is _referenced_ +// as parent by one or more full vertices, but has not been added itself to the +// forest. We only count vertices that have been explicitly added to the forest +// and not yet pruned. (In comparision, we do _not_ count vertices that are +// _referenced_ as parent by vertices, but have not been added themselves). +func (f *LevelledForest) GetNumberOfVerticesAtLevel(level uint64) int { + num := 0 + for _, container := range f.verticesAtLevel[level] { + if !f.isEmptyContainer(container) { + num++ + } + } + return num +} + +// AddVertex adds vertex to forest if vertex is within non-pruned levels +// Handles repeated addition of same vertex (keeps first added vertex). +// If vertex is at or below pruning level: method is NoOp. +// UNVALIDATED: +// requires that vertex would pass validity check LevelledForest.VerifyVertex(vertex). +func (f *LevelledForest) AddVertex(vertex Vertex) { + if vertex.Level() < f.LowestLevel { + return + } + container := f.getOrCreateVertexContainer(vertex.VertexID(), vertex.Level()) + if !f.isEmptyContainer(container) { // the vertex was already stored + return + } + // container is empty, i.e. full vertex is new and should be stored in container + container.vertex = vertex // add vertex to container + f.registerWithParent(container) + f.size += 1 +} + +// registerWithParent retrieves the parent and registers the given vertex as a +// child. For a state, whose level equal to the pruning threshold, we do not +// inspect the parent at all. Thereby, this implementation can gracefully handle +// the corner case where the tree has a defined end vertex (distinct root). This +// is commonly the case in statechain (genesis, or spork root state). +// Mathematically, this means that this library can also represent bounded +// trees. +func (f *LevelledForest) registerWithParent(vertexContainer *vertexContainer) { + // caution, necessary for handling bounded trees: + // For root vertex (genesis state) the rank is _exactly_ at LowestLevel. For + // these states, a parent does not exist. In the implementation, we + // deliberately do not call the `Parent()` method, as its output is + // conceptually undefined. Thereby, we can gracefully handle the corner case + // of + // vertex.level = vertex.Parent().Level = LowestLevel = 0 + if vertexContainer.level <= f.LowestLevel { // check (a) + return + } + + _, parentView := vertexContainer.vertex.Parent() + if parentView < f.LowestLevel { + return + } + parentContainer := f.getOrCreateVertexContainer( + vertexContainer.vertex.Parent(), + ) + parentContainer.children = append(parentContainer.children, vertexContainer) +} + +// getOrCreateVertexContainer returns the vertexContainer if there exists one +// or creates a new vertexContainer and adds it to the internal data structures. +// (i.e. there exists an empty or full container with the same id but different +// level). +func (f *LevelledForest) getOrCreateVertexContainer( + id models.Identity, + level uint64, +) *vertexContainer { + container, exists := f.vertices[id] + if !exists { + container = &vertexContainer{ + id: id, + level: level, + } + f.vertices[container.id] = container + vertices := f.verticesAtLevel[container.level] + f.verticesAtLevel[container.level] = append(vertices, container) + } + return container +} + +// VerifyVertex verifies that adding vertex `v` would yield a valid Levelled +// Forest. Specifically, we verify that _all_ of the following conditions are +// satisfied: +// +// 1. `v.Level()` must be strictly larger than the level that `v` reports +// for its parent (maintains an acyclic graph). +// +// 2. If a vertex with the same ID as `v.VertexID()` exists in the graph or is +// referenced by another vertex within the graph, the level must be +// identical. (In other words, we don't have vertices with the same ID but +// different level) +// +// 3. Let `ParentLevel`, `ParentID` denote the level, ID that `v` reports for +// its parent. If a vertex with `ParentID` exists (or is referenced by other +// vertices as their parent), we require that the respective level is +// identical to `ParentLevel`. +// +// Notes: +// - If `v.Level()` has already been pruned, adding it to the forest is a +// NoOp. Hence, any vertex with level below the pruning threshold +// automatically passes. +// - By design, the LevelledForest does _not_ touch the parent information for +// vertices that are on the lowest retained level. Thereby, it is possible +// to initialize the LevelledForest with a root vertex at the lowest +// retained level, without this root needing to have a parent. Furthermore, +// the root vertex can be at level 0 and in absence of a parent still +// satisfy the condition that any parent must be of lower level +// (mathematical principle of vacuous truth) without the implementation +// needing to worry about unsigned integer underflow. +// +// Error returns: +// - InvalidVertexError if the input vertex is invalid for insertion to the +// forest. +func (f *LevelledForest) VerifyVertex(v Vertex) error { + if v.Level() < f.LowestLevel { + return nil + } + + storedContainer, haveVertexContainer := f.vertices[v.VertexID()] + if !haveVertexContainer { // have no vertex with same id stored + // the only thing remaining to check is the parent information + return f.ensureConsistentParent(v) + } + + // Found a vertex container, i.e. `v` already exists, or it is referenced by + // some other vertex. In all cases, `v.Level()` should match the + // vertexContainer's information + if v.Level() != storedContainer.level { + return NewInvalidVertexErrorf( + v, + "level conflicts with stored vertex with same id (%d!=%d)", + v.Level(), + storedContainer.level, + ) + } + + // vertex container is empty, i.e. `v` is referenced by some other vertex as + // its parent: + if f.isEmptyContainer(storedContainer) { + // the only thing remaining to check is the parent information + return f.ensureConsistentParent(v) + } + + // vertex container holds a vertex with the same ID as `v`: + // The parent information from vertexContainer has already been checked for + // consistency. So we simply compare with the existing vertex for + // inconsistencies + + // the vertex is at or below the lowest retained level, so we can't check the + // parent (it's pruned) + if v.Level() == f.LowestLevel { + return nil + } + + newParentId, newParentLevel := v.Parent() + storedParentId, storedParentLevel := storedContainer.vertex.Parent() + if newParentId != storedParentId { + return NewInvalidVertexErrorf( + v, + "parent ID conflicts with stored parent (%x!=%x)", + newParentId, + storedParentId, + ) + } + if newParentLevel != storedParentLevel { + return NewInvalidVertexErrorf( + v, + "parent level conflicts with stored parent (%d!=%d)", + newParentLevel, + storedParentLevel, + ) + } + // all _relevant_ fields identical + return nil +} + +// ensureConsistentParent verifies that vertex.Parent() is consistent with +// current forest. +// Returns InvalidVertexError if: +// * there is a parent with the same ID but different level; +// * the parent's level is _not_ smaller than the vertex's level +func (f *LevelledForest) ensureConsistentParent(vertex Vertex) error { + if vertex.Level() <= f.LowestLevel { + // the vertex is at or below the lowest retained level, so we can't check + // the parent (it's pruned) + return nil + } + + // verify parent + parentID, parentLevel := vertex.Parent() + if !(vertex.Level() > parentLevel) { + return NewInvalidVertexErrorf( + vertex, + "vertex parent level (%d) must be smaller than proposed vertex level (%d)", + parentLevel, + vertex.Level(), + ) + } + storedParent, haveParentStored := f.GetVertex(parentID) + if !haveParentStored { + return nil + } + if storedParent.Level() != parentLevel { + return NewInvalidVertexErrorf( + vertex, + "parent level conflicts with stored parent (%d!=%d)", + parentLevel, + storedParent.Level(), + ) + } + return nil +} diff --git a/consensus/forest/vertex.go b/consensus/forest/vertex.go new file mode 100644 index 0000000..feccb90 --- /dev/null +++ b/consensus/forest/vertex.go @@ -0,0 +1,103 @@ +package forest + +import ( + "errors" + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +type Vertex interface { + // VertexID returns the vertex's ID (in most cases its hash) + VertexID() models.Identity + // Level returns the vertex's level + Level() uint64 + // Parent returns the parent's (level, ID) + Parent() (models.Identity, uint64) +} + +// VertexToString returns a string representation of the vertex. +func VertexToString(v Vertex) string { + parentID, parentLevel := v.Parent() + return fmt.Sprintf( + "", + v.VertexID(), + v.Level(), + parentID, + parentLevel, + ) +} + +// VertexIterator is a stateful iterator for VertexList. +// Internally operates directly on the Vertex Containers +// It has one-element look ahead for skipping empty vertex containers. +type VertexIterator struct { + data VertexList + idx int + next Vertex +} + +func (it *VertexIterator) preLoad() { + for it.idx < len(it.data) { + v := it.data[it.idx].vertex + it.idx++ + if v != nil { + it.next = v + return + } + } + it.next = nil +} + +// NextVertex returns the next Vertex or nil if there is none +func (it *VertexIterator) NextVertex() Vertex { + res := it.next + it.preLoad() + return res +} + +// HasNext returns true if and only if there is a next Vertex +func (it *VertexIterator) HasNext() bool { + return it.next != nil +} + +func newVertexIterator(vertexList VertexList) VertexIterator { + it := VertexIterator{ + data: vertexList, + } + it.preLoad() + return it +} + +// InvalidVertexError indicates that a proposed vertex is invalid for insertion +// to the forest. +type InvalidVertexError struct { + // Vertex is the invalid vertex + Vertex Vertex + // msg provides additional context + msg string +} + +func (err InvalidVertexError) Error() string { + return fmt.Sprintf( + "invalid vertex %s: %s", + VertexToString(err.Vertex), + err.msg, + ) +} + +func IsInvalidVertexError(err error) bool { + var target InvalidVertexError + return errors.As(err, &target) +} + +func NewInvalidVertexErrorf( + vertex Vertex, + msg string, + args ...interface{}, +) InvalidVertexError { + return InvalidVertexError{ + Vertex: vertex, + msg: fmt.Sprintf(msg, args...), + } +} diff --git a/consensus/forks/forks.go b/consensus/forks/forks.go new file mode 100644 index 0000000..958758c --- /dev/null +++ b/consensus/forks/forks.go @@ -0,0 +1,688 @@ +package forks + +import ( + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/forest" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Forks enforces structural validity of the consensus state and implements +// finalization rules as defined in Jolteon consensus +// https://arxiv.org/abs/2106.10362 The same approach has later been adopted by +// the Diem team resulting in DiemBFT v4: +// https://developers.diem.com/papers/diem-consensus-state-machine-replication-in-the-diem-blockchain/2021-08-17.pdf +// Forks is NOT safe for concurrent use by multiple goroutines. +type Forks[StateT models.Unique, VoteT models.Unique] struct { + finalizationCallback consensus.Finalizer + notifier consensus.FollowerConsumer[StateT, VoteT] + forest forest.LevelledForest + trustedRoot *models.CertifiedState[StateT] + + // finalityProof holds the latest finalized state including the certified + // child as proof of finality. CAUTION: is nil, when Forks has not yet + // finalized any states beyond the finalized root state it was initialized + // with + finalityProof *consensus.FinalityProof[StateT] +} + +var _ consensus.Forks[*nilUnique] = (*Forks[*nilUnique, *nilUnique])(nil) + +func NewForks[StateT models.Unique, VoteT models.Unique]( + trustedRoot *models.CertifiedState[StateT], + finalizationCallback consensus.Finalizer, + notifier consensus.FollowerConsumer[StateT, VoteT], +) (*Forks[StateT, VoteT], error) { + if (trustedRoot.State.Identifier != trustedRoot.CertifyingQuorumCertificate.GetSelector()) || + (trustedRoot.State.Rank != trustedRoot.CertifyingQuorumCertificate.GetRank()) { + return nil, + models.NewConfigurationErrorf( + "invalid root: root QC is not pointing to root state", + ) + } + + forks := Forks[StateT, VoteT]{ + finalizationCallback: finalizationCallback, + notifier: notifier, + forest: *forest.NewLevelledForest(trustedRoot.State.Rank), + trustedRoot: trustedRoot, + finalityProof: nil, + } + + // verify and add root state to levelled forest + err := forks.EnsureStateIsValidExtension(trustedRoot.State) + if err != nil { + return nil, fmt.Errorf( + "invalid root state %v: %w", + trustedRoot.Identifier(), + err, + ) + } + forks.forest.AddVertex(ToStateContainer2[StateT](trustedRoot.State)) + return &forks, nil +} + +// FinalizedRank returns the largest rank number where a finalized state is +// known +func (f *Forks[StateT, VoteT]) FinalizedRank() uint64 { + if f.finalityProof == nil { + return f.trustedRoot.State.Rank + } + return f.finalityProof.State.Rank +} + +// FinalizedState returns the finalized state with the largest rank number +func (f *Forks[StateT, VoteT]) FinalizedState() *models.State[StateT] { + if f.finalityProof == nil { + return f.trustedRoot.State + } + return f.finalityProof.State +} + +// FinalityProof returns the latest finalized state and a certified child from +// the subsequent rank, which proves finality. +// CAUTION: method returns (nil, false), when Forks has not yet finalized any +// states beyond the finalized root state it was initialized with. +func (f *Forks[StateT, VoteT]) FinalityProof() ( + *consensus.FinalityProof[StateT], + bool, +) { + return f.finalityProof, f.finalityProof != nil +} + +// GetState returns (*models.State, true) if the state with the specified +// id was found and (nil, false) otherwise. +func (f *Forks[StateT, VoteT]) GetState(stateID models.Identity) ( + *models.State[StateT], + bool, +) { + stateContainer, hasState := f.forest.GetVertex(stateID) + if !hasState { + return nil, false + } + return stateContainer.(*StateContainer[StateT]).GetState(), true +} + +// GetStatesForRank returns all known states for the given rank +func (f *Forks[StateT, VoteT]) GetStatesForRank( + rank uint64, +) []*models.State[StateT] { + vertexIterator := f.forest.GetVerticesAtLevel(rank) + // in the vast majority of cases, there will only be one proposal for a + // particular rank + states := make([]*models.State[StateT], 0, 1) + for vertexIterator.HasNext() { + v := vertexIterator.NextVertex() + states = append(states, v.(*StateContainer[StateT]).GetState()) + } + return states +} + +// IsKnownState checks whether state is known. +func (f *Forks[StateT, VoteT]) IsKnownState(stateID models.Identity) bool { + _, hasState := f.forest.GetVertex(stateID) + return hasState +} + +// IsProcessingNeeded determines whether the given state needs processing, +// based on the state's rank and hash. +// Returns false if any of the following conditions applies +// - state rank is _below_ the most recently finalized state +// - the state already exists in the consensus state +// +// UNVALIDATED: expects state to pass Forks.EnsureStateIsValidExtension(state) +func (f *Forks[StateT, VoteT]) IsProcessingNeeded(state *models.State[StateT]) bool { + if state.Rank < f.FinalizedRank() || f.IsKnownState(state.Identifier) { + return false + } + return true +} + +// EnsureStateIsValidExtension checks that the given state is a valid extension +// to the tree of states already stored (no state modifications). Specifically, +// the following conditions are enforced, which are critical to the correctness +// of Forks: +// +// 1. If a state with the same ID is already stored, their ranks must be +// identical. +// 2. The state's rank must be strictly larger than the rank of its parent. +// 3. The parent must already be stored (or below the pruning height). +// +// Exclusions to these rules (by design): +// Let W denote the rank of state's parent (i.e. W := state.QC.Rank) and F the +// latest finalized rank. +// +// (i) If state.Rank < F, adding the state would be a no-op. Such states are +// considered compatible (principle of vacuous truth), i.e. we skip +// checking 1, 2, 3. +// (ii) If state.Rank == F, we do not inspect the QC / parent at all (skip 2 +// and 3). This exception is important for compatability with genesis or +// spork-root states, which do not contain a QC. +// (iii) If state.Rank > F, but state.QC.Rank < F the parent has already been +// pruned. In this case, we omit rule 3. (principle of vacuous truth +// applied to the parent) +// +// We assume that all states are fully verified. A valid state must satisfy all +// consistency requirements; otherwise we have a bug in the compliance layer. +// +// Error returns: +// - models.MissingStateError if the parent of the input proposal does not +// exist in the forest (but is above the pruned rank). Represents violation +// of condition 3. +// - models.InvalidStateError if the state violates condition 1. or 2. +// - generic error in case of unexpected bug or internal state corruption +func (f *Forks[StateT, VoteT]) EnsureStateIsValidExtension( + state *models.State[StateT], +) error { + if state.Rank < f.forest.LowestLevel { // exclusion (i) + return nil + } + + // LevelledForest enforces conditions 1. and 2. including the respective + // exclusions (ii) and (iii). + stateContainer := ToStateContainer2[StateT](state) + err := f.forest.VerifyVertex(stateContainer) + if err != nil { + if forest.IsInvalidVertexError(err) { + return models.NewInvalidStateErrorf( + state, + "not a valid vertex for state tree: %w", + err, + ) + } + return fmt.Errorf( + "state tree generated unexpected error validating vertex: %w", + err, + ) + } + + // Condition 3: + // LevelledForest implements a more generalized algorithm that also works for + // disjoint graphs. Therefore, LevelledForest _not_ enforce condition 3. Here, + // we additionally require that the pending states form a tree (connected + // graph), i.e. we need to enforce condition 3 + if (state.Rank == f.forest.LowestLevel) || + (state.ParentQuorumCertificate.GetRank() < f.forest.LowestLevel) { // exclusion (ii) and (iii) + return nil + } + // For a state whose parent is _not_ below the pruning height, we expect the + // parent to be known. + _, isParentKnown := f.forest.GetVertex( + state.ParentQuorumCertificate.GetSelector(), + ) + if !isParentKnown { // missing parent + return models.MissingStateError{ + Rank: state.ParentQuorumCertificate.GetRank(), + Identifier: state.ParentQuorumCertificate.GetSelector(), + } + } + return nil +} + +// AddCertifiedState[StateT] appends the given certified state to the tree of +// pending states and updates the latest finalized state (if finalization +// progressed). Unless the parent is below the pruning threshold (latest +// finalized rank), we require that the parent is already stored in Forks. +// Calling this method with previously processed states leaves the consensus +// state invariant (though, it will potentially cause some duplicate +// processing). +// +// Possible error returns: +// - models.MissingStateError if the parent does not exist in the forest (but +// is above the pruned rank). From the perspective of Forks, this error is +// benign (no-op). +// - models.InvalidStateError if the state is invalid (see +// `Forks.EnsureStateIsValidExtension` for details). From the perspective of +// Forks, this error is benign (no-op). However, we assume all states are +// fully verified, i.e. they should satisfy all consistency requirements. +// Hence, this error is likely an indicator of a bug in the compliance +// layer. +// - models.ByzantineThresholdExceededError if conflicting QCs or conflicting +// finalized states have been detected (violating a foundational consensus +// guarantees). This indicates that there are 1/3+ Byzantine nodes (weighted +// by seniority) in the network, breaking the safety guarantees of HotStuff +// (or there is a critical bug / data corruption). Forks cannot recover from +// this exception. +// - All other errors are potential symptoms of bugs or state corruption. +func (f *Forks[StateT, VoteT]) AddCertifiedState( + certifiedState *models.CertifiedState[StateT], +) error { + if !f.IsProcessingNeeded(certifiedState.State) { + return nil + } + + // Check proposal for byzantine evidence, store it and emit + // `OnStateIncorporated` notification. Note: `checkForByzantineEvidence` only + // inspects the state, but _not_ its certifying QC. Hence, we have to + // additionally check here, whether the certifying QC conflicts with any known + // QCs. + err := f.checkForByzantineEvidence(certifiedState.State) + if err != nil { + return fmt.Errorf( + "cannot check for Byzantine evidence in certified state %v: %w", + certifiedState.State.Identifier, + err, + ) + } + err = f.checkForConflictingQCs(&certifiedState.CertifyingQuorumCertificate) + if err != nil { + return fmt.Errorf( + "certifying QC for state %v failed check for conflicts: %w", + certifiedState.State.Identifier, + err, + ) + } + f.forest.AddVertex(ToStateContainer2[StateT](certifiedState.State)) + f.notifier.OnStateIncorporated(certifiedState.State) + + // Update finality status: + err = f.checkForAdvancingFinalization(certifiedState) + if err != nil { + return fmt.Errorf("updating finalization failed: %w", err) + } + return nil +} + +// AddValidatedState appends the validated state to the tree of pending +// states and updates the latest finalized state (if applicable). Unless the +// parent is below the pruning threshold (latest finalized rank), we require +// that the parent is already stored in Forks. Calling this method with +// previously processed states leaves the consensus state invariant (though, it +// will potentially cause some duplicate processing). +// Notes: +// - Method `AddCertifiedState[StateT](..)` should be used preferably, if a QC +// certifying `state` is already known. This is generally the case for the +// consensus follower. Method `AddValidatedState` is intended for active +// consensus participants, which fully validate states (incl. payload), i.e. +// QCs are processed as part of validated proposals. +// +// Possible error returns: +// - models.MissingStateError if the parent does not exist in the forest (but +// is above the pruned rank). From the perspective of Forks, this error is +// benign (no-op). +// - models.InvalidStateError if the state is invalid (see +// `Forks.EnsureStateIsValidExtension` for details). From the perspective of +// Forks, this error is benign (no-op). However, we assume all states are +// fully verified, i.e. they should satisfy all consistency requirements. +// Hence, this error is likely an indicator of a bug in the compliance +// layer. +// - models.ByzantineThresholdExceededError if conflicting QCs or conflicting +// finalized states have been detected (violating a foundational consensus +// guarantees). This indicates that there are 1/3+ Byzantine nodes (weighted +// by seniority) in the network, breaking the safety guarantees of HotStuff +// (or there is a critical bug / data corruption). Forks cannot recover from +// this exception. +// - All other errors are potential symptoms of bugs or state corruption. +func (f *Forks[StateT, VoteT]) AddValidatedState( + proposal *models.State[StateT], +) error { + if !f.IsProcessingNeeded(proposal) { + return nil + } + + // Check proposal for byzantine evidence, store it and emit + // `OnStateIncorporated` notification: + err := f.checkForByzantineEvidence(proposal) + if err != nil { + return fmt.Errorf( + "cannot check Byzantine evidence for state %v: %w", + proposal.Identifier, + err, + ) + } + f.forest.AddVertex(ToStateContainer2[StateT](proposal)) + f.notifier.OnStateIncorporated(proposal) + + // Update finality status: In the implementation, our notion of finality is + // based on certified states. + // The certified parent essentially combines the parent, with the QC contained + // in state, to drive finalization. + parent, found := f.GetState(proposal.ParentQuorumCertificate.GetSelector()) + if !found { + // Not finding the parent means it is already pruned; hence this state does + // not change the finalization state. + return nil + } + certifiedParent, err := models.NewCertifiedState[StateT]( + parent, + proposal.ParentQuorumCertificate, + ) + if err != nil { + return fmt.Errorf( + "mismatching QC with parent (corrupted Forks state):%w", + err, + ) + } + err = f.checkForAdvancingFinalization(&certifiedParent) + if err != nil { + return fmt.Errorf("updating finalization failed: %w", err) + } + return nil +} + +// checkForByzantineEvidence inspects whether the given `state` together with +// the already known information yields evidence of byzantine behaviour. +// Furthermore, the method enforces that `state` is a valid extension of the +// tree of pending states. If the state is a double proposal, we emit an +// `OnStateIncorporated` notification. Though, provided the state is a valid +// extension of the state tree by itself, it passes this method without an +// error. +// +// Possible error returns: +// - models.MissingStateError if the parent does not exist in the forest (but +// is above the pruned rank). From the perspective of Forks, this error is +// benign (no-op). +// - models.InvalidStateError if the state is invalid (see +// `Forks.EnsureStateIsValidExtension` for details). From the perspective of +// Forks, this error is benign (no-op). However, we assume all states are +// fully verified, i.e. they should satisfy all consistency requirements. +// Hence, this error is likely an indicator of a bug in the compliance +// layer. +// - models.ByzantineThresholdExceededError if conflicting QCs have been +// detected. Forks cannot recover from this exception. +// - All other errors are potential symptoms of bugs or state corruption. +func (f *Forks[StateT, VoteT]) checkForByzantineEvidence( + state *models.State[StateT], +) error { + err := f.EnsureStateIsValidExtension(state) + if err != nil { + return fmt.Errorf("consistency check on state failed: %w", err) + } + err = f.checkForConflictingQCs(&state.ParentQuorumCertificate) + if err != nil { + return fmt.Errorf("checking QC for conflicts failed: %w", err) + } + f.checkForDoubleProposal(state) + return nil +} + +// checkForConflictingQCs checks if QC conflicts with a stored Quorum +// Certificate. In case a conflicting QC is found, an +// ByzantineThresholdExceededError is returned. Two Quorum Certificates q1 and +// q2 are defined as conflicting iff: +// +// q1.Rank == q2.Rank AND q1.Identifier ≠ q2.Identifier +// +// This means there are two Quorums for conflicting states at the same rank. +// Per 'Observation 1' from the Jolteon paper https://arxiv.org/pdf/2106.10362v1.pdf, +// two conflicting QCs can exist if and only if the Byzantine threshold is +// exceeded. +// Error returns: +// - models.ByzantineThresholdExceededError if conflicting QCs have been +// detected. Forks cannot recover from this exception. +// - All other errors are potential symptoms of bugs or state corruption. +func (f *Forks[StateT, VoteT]) checkForConflictingQCs( + qc *models.QuorumCertificate, +) error { + it := f.forest.GetVerticesAtLevel((*qc).GetRank()) + for it.HasNext() { + otherState := it.NextVertex() // by construction, must have same rank as qc.Rank + if (*qc).GetSelector() != otherState.VertexID() { + // * we have just found another state at the same rank number as qc.Rank + // but with different hash + // * if this state has a child c, this child will have + // c.qc.rank = parentRank + // c.qc.ID != parentIdentifier + // => conflicting qc + otherChildren := f.forest.GetChildren(otherState.VertexID()) + if otherChildren.HasNext() { + otherChild := otherChildren.NextVertex().(*StateContainer[StateT]).GetState() + conflictingQC := otherChild.ParentQuorumCertificate + return models.ByzantineThresholdExceededError{Evidence: fmt.Sprintf( + "conflicting QCs at rank %d: %v and %v", + (*qc).GetRank(), (*qc).GetSelector(), conflictingQC.GetSelector(), + )} + } + } + } + return nil +} + +// checkForDoubleProposal checks if the input proposal is a double proposal. +// A double proposal occurs when two proposals with the same rank exist in +// Forks. If there is a double proposal, notifier.OnDoubleProposeDetected is +// triggered. +func (f *Forks[StateT, VoteT]) checkForDoubleProposal( + state *models.State[StateT], +) { + it := f.forest.GetVerticesAtLevel(state.Rank) + for it.HasNext() { + otherVertex := it.NextVertex() // by construction, must have same rank as state + otherState := otherVertex.(*StateContainer[StateT]).GetState() + if state.Identifier != otherState.Identifier { + f.notifier.OnDoubleProposeDetected(state, otherState) + } + } +} + +// checkForAdvancingFinalization checks whether observing certifiedState leads +// to progress of finalization. This function should be called every time a new +// state is added to Forks. If the new state is the head of a 2-chain satisfying +// the finalization rule, we update `Forks.finalityProof` to the new latest +// finalized state. Calling this method with previously-processed states leaves +// the consensus state invariant. +// UNVALIDATED: assumes that relevant state properties are consistent with +// previous states +// Error returns: +// - models.MissingStateError if the parent does not exist in the forest (but +// is above the pruned rank). From the perspective of Forks, this error is +// benign (no-op). +// - models.ByzantineThresholdExceededError in case we detect a finalization +// fork (violating a foundational consensus guarantee). This indicates that +// there are 1/3+ Byzantine nodes (weighted by seniority) in the network, +// breaking the safety guarantees of HotStuff (or there is a critical bug / +// data corruption). Forks cannot recover from this exception. +// - generic error in case of unexpected bug or internal state corruption +func (f *Forks[StateT, VoteT]) checkForAdvancingFinalization( + certifiedState *models.CertifiedState[StateT], +) error { + // We prune all states in forest which are below the most recently finalized + // state. Hence, we have a pruned ancestry if and only if either of the + // following conditions applies: + // (a) If a state's parent rank (i.e. state.QC.Rank) is below the most + // recently finalized state. + // (b) If a state's rank is equal to the most recently finalized state. + // Caution: + // * Under normal operation, case (b) is covered by the logic for case (a) + // * However, the existence of a genesis state requires handling case (b) + // explicitly: + // The root state is specified and trusted by the node operator. If the root + // state is the genesis state, it might not contain a QC pointing to a + // parent (as there is no parent). In this case, condition (a) cannot be + // evaluated. + lastFinalizedRank := f.FinalizedRank() + if (certifiedState.Rank() <= lastFinalizedRank) || + (certifiedState.State.ParentQuorumCertificate.GetRank() < lastFinalizedRank) { + // Repeated states are expected during normal operations. We enter this code + // state if and only if the parent's rank is _below_ the last finalized + // state. It is straight forward to show: + // Lemma: Let B be a state whose 2-chain reaches beyond the last finalized + // state => B will not update the locked or finalized state + return nil + } + + // retrieve parent; always expected to succeed, because we passed the checks + // above + qcForParent := certifiedState.State.ParentQuorumCertificate + parentVertex, parentStateKnown := f.forest.GetVertex( + qcForParent.GetSelector(), + ) + if !parentStateKnown { + return models.MissingStateError{ + Rank: qcForParent.GetRank(), + Identifier: qcForParent.GetSelector(), + } + } + parentState := parentVertex.(*StateContainer[StateT]).GetState() + + // Note: we assume that all stored states pass + // Forks.EnsureStateIsValidExtension(state); specifically, that state's + // RankNumber is strictly monotonically increasing which is enforced by + // LevelledForest.VerifyVertex(...) + // We denote: + // * a DIRECT 1-chain as '<-' + // * a general 1-chain as '<~' (direct or indirect) + // Jolteon's rule for finalizing `parentState` is + // parentState <- State <~ certifyingQC (i.e. a DIRECT 1-chain PLUS + // ╰─────────────────────╯ any 1-chain) + // certifiedState + // Hence, we can finalize `parentState` as head of a 2-chain, + // if and only if `State.Rank` is exactly 1 higher than the rank of + // `parentState` + if parentState.Rank+1 != certifiedState.Rank() { + return nil + } + + // `parentState` is now finalized: + // * While Forks is single-threaded, there is still the possibility of + // reentrancy. Specifically, the consumers of our finalization events are + // served by the goroutine executing Forks. It is conceivable that a + // consumer might access Forks and query the latest finalization proof. + // This would be legal, if the component supplying the goroutine to Forks + // also consumes the notifications. + // * Therefore, for API safety, we want to first update Fork's + // `finalityProof` before we emit any notifications. + + // Advancing finalization step (i): we collect all states for finalization (no + // notifications are emitted) + statesToBeFinalized, err := f.collectStatesForFinalization(&qcForParent) + if err != nil { + return fmt.Errorf( + "advancing finalization to state %v from rank %d failed: %w", + qcForParent.GetSelector(), + qcForParent.GetRank(), + err, + ) + } + + // Advancing finalization step (ii): update `finalityProof` and prune + // `LevelledForest` + f.finalityProof = &consensus.FinalityProof[StateT]{ + State: parentState, + CertifiedChild: certifiedState, + } + err = f.forest.PruneUpToLevel(f.FinalizedRank()) + if err != nil { + return fmt.Errorf("pruning levelled forest failed unexpectedly: %w", err) + } + + // Advancing finalization step (iii): iterate over the states from (i) and + // emit finalization events + for _, b := range statesToBeFinalized { + // first notify other critical components about finalized state - all errors + // returned here are fatal exceptions + err = f.finalizationCallback.MakeFinal(b.Identifier) + if err != nil { + return fmt.Errorf("finalization error in other component: %w", err) + } + + // notify less important components about finalized state + f.notifier.OnFinalizedState(b) + } + return nil +} + +// collectStatesForFinalization collects and returns all newly finalized states +// up to (and including) the state pointed to by `qc`. The states are listed in +// order of increasing height. +// Error returns: +// - models.ByzantineThresholdExceededError in case we detect a finalization +// fork (violating a foundational consensus guarantee). This indicates that +// there are 1/3+ Byzantine nodes (weighted by seniority) in the network, +// breaking the safety guarantees of HotStuff (or there is a critical bug / +// data corruption). Forks cannot recover from this exception. +// - generic error in case of bug or internal state corruption +func (f *Forks[StateT, VoteT]) collectStatesForFinalization( + qc *models.QuorumCertificate, +) ([]*models.State[StateT], error) { + lastFinalized := f.FinalizedState() + if (*qc).GetRank() < lastFinalized.Rank { + return nil, models.ByzantineThresholdExceededError{Evidence: fmt.Sprintf( + "finalizing state with rank %d which is lower than previously finalized state at rank %d", + (*qc).GetRank(), lastFinalized.Rank, + )} + } + if (*qc).GetRank() == lastFinalized.Rank { // no new states to be finalized + return nil, nil + } + + // Collect all states that are pending finalization in slice. While we crawl + // the states starting from the newest finalized state backwards (decreasing + // ranks), we would like to return them in order of _increasing_ rank. + // Therefore, we fill the slice starting with the highest index. + l := (*qc).GetRank() - lastFinalized.Rank // l is an upper limit to the number of states that can be maximally finalized + statesToBeFinalized := make([]*models.State[StateT], l) + for (*qc).GetRank() > lastFinalized.Rank { + b, ok := f.GetState((*qc).GetSelector()) + if !ok { + return nil, fmt.Errorf( + "failed to get state (rank=%d, stateID=%x) for finalization", + (*qc).GetRank(), + (*qc).GetSelector(), + ) + } + l-- + statesToBeFinalized[l] = b + qc = &b.ParentQuorumCertificate // move to parent + } + // Now, `l` is the index where we stored the oldest state that should be + // finalized. Note that `l` might be larger than zero, if some ranks have no + // finalized states. Hence, `statesToBeFinalized` might start with nil + // entries, which we remove: + statesToBeFinalized = statesToBeFinalized[l:] + + // qc should now point to the latest finalized state. Otherwise, the + // consensus committee is compromised (or we have a critical internal bug). + if (*qc).GetRank() < lastFinalized.Rank { + return nil, models.ByzantineThresholdExceededError{Evidence: fmt.Sprintf( + "finalizing state with rank %d which is lower than previously finalized state at rank %d", + (*qc).GetRank(), lastFinalized.Rank, + )} + } + if (*qc).GetRank() == lastFinalized.Rank && + lastFinalized.Identifier != (*qc).GetSelector() { + return nil, models.ByzantineThresholdExceededError{Evidence: fmt.Sprintf( + "finalizing states with rank %d at conflicting forks: %x and %x", + (*qc).GetRank(), (*qc).GetSelector(), lastFinalized.Identifier, + )} + } + + return statesToBeFinalized, nil +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) + diff --git a/consensus/forks/state_container.go b/consensus/forks/state_container.go new file mode 100644 index 0000000..a716edf --- /dev/null +++ b/consensus/forks/state_container.go @@ -0,0 +1,77 @@ +package forks + +import ( + "source.quilibrium.com/quilibrium/monorepo/consensus/forest" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// StateContainer wraps a state proposal to implement forest.Vertex +// so the proposal can be stored in forest.LevelledForest +type StateContainer[StateT models.Unique] models.State[StateT] + +var _ forest.Vertex = (*StateContainer[*nilUnique])(nil) + +func ToStateContainer2[StateT models.Unique]( + state *models.State[StateT], +) *StateContainer[StateT] { + return (*StateContainer[StateT])(state) +} + +func (b *StateContainer[StateT]) GetState() *models.State[StateT] { + return (*models.State[StateT])(b) +} + +// Functions implementing forest.Vertex +func (b *StateContainer[StateT]) VertexID() models.Identity { + return b.Identifier +} + +func (b *StateContainer[StateT]) Level() uint64 { + return b.Rank +} + +func (b *StateContainer[StateT]) Parent() (models.Identity, uint64) { + // Caution: not all states have a QC for the parent, such as the spork root + // states. Per API contract, we are obliged to return a value to prevent + // panics during logging. (see vertex `forest.VertexToString` method). + if b.ParentQuorumCertificate == nil { + return "", 0 + } + return b.ParentQuorumCertificate.GetSelector(), + b.ParentQuorumCertificate.GetRank() +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/go.mod b/consensus/go.mod index fe7ec5b..24ff489 100644 --- a/consensus/go.mod +++ b/consensus/go.mod @@ -1,16 +1,9 @@ module source.quilibrium.com/quilibrium/monorepo/consensus -go 1.23.0 +go 1.23.2 toolchain go1.23.4 -replace source.quilibrium.com/quilibrium/monorepo/protobufs => ../protobufs - -replace source.quilibrium.com/quilibrium/monorepo/types => ../types - -replace source.quilibrium.com/quilibrium/monorepo/config => ../config - -replace source.quilibrium.com/quilibrium/monorepo/utils => ../utils replace github.com/multiformats/go-multiaddr => ../go-multiaddr @@ -20,13 +13,36 @@ replace github.com/libp2p/go-libp2p => ../go-libp2p replace github.com/libp2p/go-libp2p-kad-dht => ../go-libp2p-kad-dht -replace source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub => ../go-libp2p-blossomsub - require go.uber.org/zap v1.27.0 require ( - github.com/stretchr/testify v1.10.0 // indirect + github.com/stretchr/testify v1.10.0 + github.com/cloudflare/circl v1.6.1 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect + github.com/iden3/go-iden3-crypto v0.0.17 // indirect + github.com/ipfs/go-cid v0.5.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multiaddr v0.16.1 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.9.1 // indirect + github.com/multiformats/go-multihash v0.2.3 // indirect + github.com/multiformats/go-varint v0.0.7 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect go.uber.org/multierr v1.11.0 // indirect -) - -require github.com/pkg/errors v0.9.1 + golang.org/x/crypto v0.39.0 // indirect + golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect + golang.org/x/net v0.41.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/text v0.26.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/grpc v1.72.0 // indirect + google.golang.org/protobuf v1.36.6 // indirect + lukechampine.com/blake3 v1.4.1 // indirect + github.com/pkg/errors v0.9.1 +) \ No newline at end of file diff --git a/consensus/go.sum b/consensus/go.sum index 63e45b1..a1b6968 100644 --- a/consensus/go.sum +++ b/consensus/go.sum @@ -1,16 +1,92 @@ +github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= +github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= +github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY= +github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= +github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= +github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= +github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo= +github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4= +golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= +google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= +google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= diff --git a/consensus/helper/quorum_certificate.go b/consensus/helper/quorum_certificate.go new file mode 100644 index 0000000..76b78d8 --- /dev/null +++ b/consensus/helper/quorum_certificate.go @@ -0,0 +1,120 @@ +package helper + +import ( + "bytes" + crand "crypto/rand" + "math/rand" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +type TestAggregatedSignature struct { + Signature []byte + PublicKey []byte + Bitmask []byte +} + +func (t *TestAggregatedSignature) GetSignature() []byte { + return t.Signature +} + +func (t *TestAggregatedSignature) GetPublicKey() []byte { + return t.PublicKey +} + +func (t *TestAggregatedSignature) GetBitmask() []byte { + return t.Bitmask +} + +type TestQuorumCertificate struct { + Filter []byte + Rank uint64 + FrameNumber uint64 + Selector models.Identity + Timestamp int64 + AggregatedSignature models.AggregatedSignature +} + +func (t *TestQuorumCertificate) GetFilter() []byte { + return t.Filter +} + +func (t *TestQuorumCertificate) GetRank() uint64 { + return t.Rank +} + +func (t *TestQuorumCertificate) GetFrameNumber() uint64 { + return t.FrameNumber +} + +func (t *TestQuorumCertificate) GetSelector() models.Identity { + return t.Selector +} + +func (t *TestQuorumCertificate) GetTimestamp() int64 { + return t.Timestamp +} + +func (t *TestQuorumCertificate) GetAggregatedSignature() models.AggregatedSignature { + return t.AggregatedSignature +} + +func (t *TestQuorumCertificate) Equals(other models.QuorumCertificate) bool { + return bytes.Equal(t.Filter, other.GetFilter()) && + t.Rank == other.GetRank() && + t.FrameNumber == other.GetFrameNumber() && + t.Selector == other.GetSelector() && + t.Timestamp == other.GetTimestamp() && + bytes.Equal( + t.AggregatedSignature.GetBitmask(), + other.GetAggregatedSignature().GetBitmask(), + ) && + bytes.Equal( + t.AggregatedSignature.GetPublicKey(), + other.GetAggregatedSignature().GetPublicKey(), + ) && + bytes.Equal( + t.AggregatedSignature.GetSignature(), + other.GetAggregatedSignature().GetSignature(), + ) +} + +func MakeQC(options ...func(*TestQuorumCertificate)) models.QuorumCertificate { + s := make([]byte, 32) + crand.Read(s) + qc := &TestQuorumCertificate{ + Rank: rand.Uint64(), + FrameNumber: rand.Uint64() + 1, + Selector: string(s), + Timestamp: time.Now().UnixMilli(), + AggregatedSignature: &TestAggregatedSignature{ + PublicKey: make([]byte, 585), + Signature: make([]byte, 74), + Bitmask: []byte{0x01}, + }, + } + for _, option := range options { + option(qc) + } + return qc +} + +func WithQCState[StateT models.Unique](state *models.State[StateT]) func(TestQuorumCertificate) { + return func(qc TestQuorumCertificate) { + qc.Rank = state.Rank + qc.Selector = state.Identifier + } +} + +func WithQCSigners(signerIndices []byte) func(TestQuorumCertificate) { + return func(qc TestQuorumCertificate) { + qc.AggregatedSignature.(*TestAggregatedSignature).Bitmask = signerIndices + } +} + +func WithQCRank(rank uint64) func(*TestQuorumCertificate) { + return func(qc *TestQuorumCertificate) { + qc.Rank = rank + } +} diff --git a/consensus/helper/state.go b/consensus/helper/state.go new file mode 100644 index 0000000..e2c71a9 --- /dev/null +++ b/consensus/helper/state.go @@ -0,0 +1,112 @@ +package helper + +import ( + crand "crypto/rand" + "math/rand" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +func MakeIdentity() models.Identity { + s := make([]byte, 32) + crand.Read(s) + return models.Identity(s) +} + +func MakeState[StateT models.Unique](options ...func(*models.State[StateT])) *models.State[StateT] { + rank := rand.Uint64() + + state := models.State[StateT]{ + Rank: rank, + Identifier: MakeIdentity(), + ProposerID: MakeIdentity(), + Timestamp: uint64(time.Now().UnixMilli()), + ParentQuorumCertificate: MakeQC(WithQCRank(rank - 1)), + } + for _, option := range options { + option(&state) + } + return &state +} + +func WithStateRank[StateT models.Unique](rank uint64) func(*models.State[StateT]) { + return func(state *models.State[StateT]) { + state.Rank = rank + } +} + +func WithStateProposer[StateT models.Unique](proposerID models.Identity) func(*models.State[StateT]) { + return func(state *models.State[StateT]) { + state.ProposerID = proposerID + } +} + +func WithParentState[StateT models.Unique](parent *models.State[StateT]) func(*models.State[StateT]) { + return func(state *models.State[StateT]) { + state.ParentQuorumCertificate.(*TestQuorumCertificate).Selector = parent.Identifier + state.ParentQuorumCertificate.(*TestQuorumCertificate).Rank = parent.Rank + } +} + +func WithParentSigners[StateT models.Unique](signerIndices []byte) func(*models.State[StateT]) { + return func(state *models.State[StateT]) { + state.ParentQuorumCertificate.(*TestQuorumCertificate).AggregatedSignature.(*TestAggregatedSignature).Bitmask = signerIndices + } +} + +func WithStateQC[StateT models.Unique](qc models.QuorumCertificate) func(*models.State[StateT]) { + return func(state *models.State[StateT]) { + state.ParentQuorumCertificate = qc + } +} + +func MakeVote[VoteT models.Unique]() *VoteT { + return new(VoteT) +} + +func MakeSignedProposal[StateT models.Unique, VoteT models.Unique](options ...func(*models.SignedProposal[StateT, VoteT])) *models.SignedProposal[StateT, VoteT] { + proposal := &models.SignedProposal[StateT, VoteT]{ + Proposal: *MakeProposal[StateT](), + Vote: MakeVote[VoteT](), + } + for _, option := range options { + option(proposal) + } + return proposal +} + +func MakeProposal[StateT models.Unique](options ...func(*models.Proposal[StateT])) *models.Proposal[StateT] { + proposal := &models.Proposal[StateT]{ + State: MakeState[StateT](), + PreviousRankTimeoutCertificate: nil, + } + for _, option := range options { + option(proposal) + } + return proposal +} + +func WithProposal[StateT models.Unique, VoteT models.Unique](proposal *models.Proposal[StateT]) func(*models.SignedProposal[StateT, VoteT]) { + return func(signedProposal *models.SignedProposal[StateT, VoteT]) { + signedProposal.Proposal = *proposal + } +} + +func WithState[StateT models.Unique](state *models.State[StateT]) func(*models.Proposal[StateT]) { + return func(proposal *models.Proposal[StateT]) { + proposal.State = state + } +} + +func WithVote[StateT models.Unique, VoteT models.Unique](vote *VoteT) func(*models.SignedProposal[StateT, VoteT]) { + return func(proposal *models.SignedProposal[StateT, VoteT]) { + proposal.Vote = vote + } +} + +func WithPreviousRankTimeoutCertificate[StateT models.Unique](previousRankTimeoutCert models.TimeoutCertificate) func(*models.Proposal[StateT]) { + return func(proposal *models.Proposal[StateT]) { + proposal.PreviousRankTimeoutCertificate = previousRankTimeoutCert + } +} diff --git a/consensus/helper/timeout_certificate.go b/consensus/helper/timeout_certificate.go new file mode 100644 index 0000000..4f8875a --- /dev/null +++ b/consensus/helper/timeout_certificate.go @@ -0,0 +1,153 @@ +package helper + +import ( + "bytes" + crand "crypto/rand" + "math/rand" + "slices" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +type TestTimeoutCertificate struct { + Filter []byte + Rank uint64 + LatestRanks []uint64 + LatestQuorumCert models.QuorumCertificate + AggregatedSignature models.AggregatedSignature +} + +func (t *TestTimeoutCertificate) GetFilter() []byte { + return t.Filter +} + +func (t *TestTimeoutCertificate) GetRank() uint64 { + return t.Rank +} + +func (t *TestTimeoutCertificate) GetLatestRanks() []uint64 { + return t.LatestRanks +} + +func (t *TestTimeoutCertificate) GetLatestQuorumCert() models.QuorumCertificate { + return t.LatestQuorumCert +} + +func (t *TestTimeoutCertificate) GetAggregatedSignature() models.AggregatedSignature { + return t.AggregatedSignature +} + +func (t *TestTimeoutCertificate) Equals(other models.TimeoutCertificate) bool { + return bytes.Equal(t.Filter, other.GetFilter()) && + t.Rank == other.GetRank() && + slices.Equal(t.LatestRanks, other.GetLatestRanks()) && + t.LatestQuorumCert.Equals(other.GetLatestQuorumCert()) && + bytes.Equal( + t.AggregatedSignature.GetBitmask(), + other.GetAggregatedSignature().GetBitmask(), + ) && + bytes.Equal( + t.AggregatedSignature.GetPublicKey(), + other.GetAggregatedSignature().GetPublicKey(), + ) && + bytes.Equal( + t.AggregatedSignature.GetSignature(), + other.GetAggregatedSignature().GetSignature(), + ) +} + +func MakeTC(options ...func(*TestTimeoutCertificate)) models.TimeoutCertificate { + tcRank := rand.Uint64() + s := make([]byte, 32) + crand.Read(s) + qc := MakeQC(WithQCRank(tcRank - 1)) + highQCRanks := make([]uint64, 3) + for i := range highQCRanks { + highQCRanks[i] = qc.GetRank() + } + tc := &TestTimeoutCertificate{ + Rank: tcRank, + LatestQuorumCert: qc, + LatestRanks: highQCRanks, + AggregatedSignature: &TestAggregatedSignature{ + Signature: make([]byte, 74), + PublicKey: make([]byte, 585), + Bitmask: []byte{0x01}, + }, + } + for _, option := range options { + option(tc) + } + return tc +} + +func WithTCNewestQC(qc models.QuorumCertificate) func(*TestTimeoutCertificate) { + return func(tc *TestTimeoutCertificate) { + tc.LatestQuorumCert = qc + tc.LatestRanks = []uint64{qc.GetRank()} + } +} + +func WithTCSigners(signerIndices []byte) func(*TestTimeoutCertificate) { + return func(tc *TestTimeoutCertificate) { + tc.AggregatedSignature.(*TestAggregatedSignature).Bitmask = signerIndices + } +} + +func WithTCRank(rank uint64) func(*TestTimeoutCertificate) { + return func(tc *TestTimeoutCertificate) { + tc.Rank = rank + } +} + +func WithTCHighQCRanks(highQCRanks []uint64) func(*TestTimeoutCertificate) { + return func(tc *TestTimeoutCertificate) { + tc.LatestRanks = highQCRanks + } +} + +func TimeoutStateFixture[VoteT models.Unique]( + opts ...func(TimeoutState *models.TimeoutState[VoteT]), +) *models.TimeoutState[VoteT] { + timeoutRank := uint64(rand.Uint32()) + newestQC := MakeQC(WithQCRank(timeoutRank - 10)) + + timeout := &models.TimeoutState[VoteT]{ + Rank: timeoutRank, + LatestQuorumCertificate: newestQC, + PriorRankTimeoutCertificate: MakeTC( + WithTCRank(timeoutRank-1), + WithTCNewestQC(MakeQC(WithQCRank(newestQC.GetRank()))), + ), + } + + for _, opt := range opts { + opt(timeout) + } + + return timeout +} + +func WithTimeoutNewestQC[VoteT models.Unique]( + newestQC models.QuorumCertificate, +) func(*models.TimeoutState[VoteT]) { + return func(timeout *models.TimeoutState[VoteT]) { + timeout.LatestQuorumCertificate = newestQC + } +} + +func WithTimeoutPreviousRankTimeoutCertificate[VoteT models.Unique]( + previousRankTimeoutCert models.TimeoutCertificate, +) func(*models.TimeoutState[VoteT]) { + return func(timeout *models.TimeoutState[VoteT]) { + timeout.PriorRankTimeoutCertificate = previousRankTimeoutCert + } +} + +func WithTimeoutStateRank[VoteT models.Unique]( + rank uint64, +) func(*models.TimeoutState[VoteT]) { + return func(timeout *models.TimeoutState[VoteT]) { + timeout.Rank = rank + } +} diff --git a/consensus/mocks/communicator_consumer.go b/consensus/mocks/communicator_consumer.go new file mode 100644 index 0000000..550b034 --- /dev/null +++ b/consensus/mocks/communicator_consumer.go @@ -0,0 +1,44 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + time "time" + + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// CommunicatorConsumer is an autogenerated mock type for the CommunicatorConsumer type +type CommunicatorConsumer[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// OnOwnProposal provides a mock function with given fields: proposal, targetPublicationTime +func (_m *CommunicatorConsumer[StateT, VoteT]) OnOwnProposal(proposal *models.SignedProposal[StateT, VoteT], targetPublicationTime time.Time) { + _m.Called(proposal, targetPublicationTime) +} + +// OnOwnTimeout provides a mock function with given fields: timeout +func (_m *CommunicatorConsumer[StateT, VoteT]) OnOwnTimeout(timeout *models.TimeoutState[VoteT]) { + _m.Called(timeout) +} + +// OnOwnVote provides a mock function with given fields: vote, recipientID +func (_m *CommunicatorConsumer[StateT, VoteT]) OnOwnVote(vote *VoteT, recipientID models.Identity) { + _m.Called(vote, recipientID) +} + +// NewCommunicatorConsumer creates a new instance of CommunicatorConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCommunicatorConsumer[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *CommunicatorConsumer[StateT, VoteT] { + mock := &CommunicatorConsumer[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/consensus_store.go b/consensus/mocks/consensus_store.go new file mode 100644 index 0000000..ab77173 --- /dev/null +++ b/consensus/mocks/consensus_store.go @@ -0,0 +1,123 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// ConsensusStore is an autogenerated mock type for the ConsensusStore type +type ConsensusStore[VoteT models.Unique] struct { + mock.Mock +} + +// GetConsensusState provides a mock function with no fields +func (_m *ConsensusStore[VoteT]) GetConsensusState() (*models.ConsensusState[VoteT], error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetConsensusState") + } + + var r0 *models.ConsensusState[VoteT] + var r1 error + if rf, ok := ret.Get(0).(func() (*models.ConsensusState[VoteT], error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *models.ConsensusState[VoteT]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.ConsensusState[VoteT]) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLivenessState provides a mock function with no fields +func (_m *ConsensusStore[VoteT]) GetLivenessState() (*models.LivenessState, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetLivenessState") + } + + var r0 *models.LivenessState + var r1 error + if rf, ok := ret.Get(0).(func() (*models.LivenessState, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *models.LivenessState); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.LivenessState) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PutConsensusState provides a mock function with given fields: state +func (_m *ConsensusStore[VoteT]) PutConsensusState(state *models.ConsensusState[VoteT]) error { + ret := _m.Called(state) + + if len(ret) == 0 { + panic("no return value specified for PutConsensusState") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.ConsensusState[VoteT]) error); ok { + r0 = rf(state) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PutLivenessState provides a mock function with given fields: state +func (_m *ConsensusStore[VoteT]) PutLivenessState(state *models.LivenessState) error { + ret := _m.Called(state) + + if len(ret) == 0 { + panic("no return value specified for PutLivenessState") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.LivenessState) error); ok { + r0 = rf(state) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewConsensusStore creates a new instance of ConsensusStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConsensusStore[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *ConsensusStore[VoteT] { + mock := &ConsensusStore[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/consumer.go b/consensus/mocks/consumer.go new file mode 100644 index 0000000..c04ba10 --- /dev/null +++ b/consensus/mocks/consumer.go @@ -0,0 +1,126 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" + + time "time" +) + +// Consumer is an autogenerated mock type for the Consumer type +type Consumer[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// OnCurrentRankDetails provides a mock function with given fields: currentRank, finalizedRank, currentLeader +func (_m *Consumer[StateT, VoteT]) OnCurrentRankDetails(currentRank uint64, finalizedRank uint64, currentLeader models.Identity) { + _m.Called(currentRank, finalizedRank, currentLeader) +} + +// OnDoubleProposeDetected provides a mock function with given fields: _a0, _a1 +func (_m *Consumer[StateT, VoteT]) OnDoubleProposeDetected(_a0 *models.State[StateT], _a1 *models.State[StateT]) { + _m.Called(_a0, _a1) +} + +// OnEventProcessed provides a mock function with no fields +func (_m *Consumer[StateT, VoteT]) OnEventProcessed() { + _m.Called() +} + +// OnFinalizedState provides a mock function with given fields: _a0 +func (_m *Consumer[StateT, VoteT]) OnFinalizedState(_a0 *models.State[StateT]) { + _m.Called(_a0) +} + +// OnInvalidStateDetected provides a mock function with given fields: err +func (_m *Consumer[StateT, VoteT]) OnInvalidStateDetected(err *models.InvalidProposalError[StateT, VoteT]) { + _m.Called(err) +} + +// OnLocalTimeout provides a mock function with given fields: currentRank +func (_m *Consumer[StateT, VoteT]) OnLocalTimeout(currentRank uint64) { + _m.Called(currentRank) +} + +// OnOwnProposal provides a mock function with given fields: proposal, targetPublicationTime +func (_m *Consumer[StateT, VoteT]) OnOwnProposal(proposal *models.SignedProposal[StateT, VoteT], targetPublicationTime time.Time) { + _m.Called(proposal, targetPublicationTime) +} + +// OnOwnTimeout provides a mock function with given fields: timeout +func (_m *Consumer[StateT, VoteT]) OnOwnTimeout(timeout *models.TimeoutState[VoteT]) { + _m.Called(timeout) +} + +// OnOwnVote provides a mock function with given fields: vote, recipientID +func (_m *Consumer[StateT, VoteT]) OnOwnVote(vote *VoteT, recipientID models.Identity) { + _m.Called(vote, recipientID) +} + +// OnPartialTimeoutCertificate provides a mock function with given fields: currentRank, partialTimeoutCertificate +func (_m *Consumer[StateT, VoteT]) OnPartialTimeoutCertificate(currentRank uint64, partialTimeoutCertificate *consensus.PartialTimeoutCertificateCreated) { + _m.Called(currentRank, partialTimeoutCertificate) +} + +// OnQuorumCertificateTriggeredRankChange provides a mock function with given fields: oldRank, newRank, qc +func (_m *Consumer[StateT, VoteT]) OnQuorumCertificateTriggeredRankChange(oldRank uint64, newRank uint64, qc models.QuorumCertificate) { + _m.Called(oldRank, newRank, qc) +} + +// OnRankChange provides a mock function with given fields: oldRank, newRank +func (_m *Consumer[StateT, VoteT]) OnRankChange(oldRank uint64, newRank uint64) { + _m.Called(oldRank, newRank) +} + +// OnReceiveProposal provides a mock function with given fields: currentRank, proposal +func (_m *Consumer[StateT, VoteT]) OnReceiveProposal(currentRank uint64, proposal *models.SignedProposal[StateT, VoteT]) { + _m.Called(currentRank, proposal) +} + +// OnReceiveQuorumCertificate provides a mock function with given fields: currentRank, qc +func (_m *Consumer[StateT, VoteT]) OnReceiveQuorumCertificate(currentRank uint64, qc models.QuorumCertificate) { + _m.Called(currentRank, qc) +} + +// OnReceiveTimeoutCertificate provides a mock function with given fields: currentRank, tc +func (_m *Consumer[StateT, VoteT]) OnReceiveTimeoutCertificate(currentRank uint64, tc models.TimeoutCertificate) { + _m.Called(currentRank, tc) +} + +// OnStart provides a mock function with given fields: currentRank +func (_m *Consumer[StateT, VoteT]) OnStart(currentRank uint64) { + _m.Called(currentRank) +} + +// OnStartingTimeout provides a mock function with given fields: startTime, endTime +func (_m *Consumer[StateT, VoteT]) OnStartingTimeout(startTime time.Time, endTime time.Time) { + _m.Called(startTime, endTime) +} + +// OnStateIncorporated provides a mock function with given fields: _a0 +func (_m *Consumer[StateT, VoteT]) OnStateIncorporated(_a0 *models.State[StateT]) { + _m.Called(_a0) +} + +// OnTimeoutCertificateTriggeredRankChange provides a mock function with given fields: oldRank, newRank, tc +func (_m *Consumer[StateT, VoteT]) OnTimeoutCertificateTriggeredRankChange(oldRank uint64, newRank uint64, tc models.TimeoutCertificate) { + _m.Called(oldRank, newRank, tc) +} + +// NewConsumer creates a new instance of Consumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConsumer[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *Consumer[StateT, VoteT] { + mock := &Consumer[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/dynamic_committee.go b/consensus/mocks/dynamic_committee.go new file mode 100644 index 0000000..e48b6a7 --- /dev/null +++ b/consensus/mocks/dynamic_committee.go @@ -0,0 +1,249 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// DynamicCommittee is an autogenerated mock type for the DynamicCommittee type +type DynamicCommittee struct { + mock.Mock +} + +// IdentitiesByRank provides a mock function with given fields: rank +func (_m *DynamicCommittee) IdentitiesByRank(rank uint64) ([]models.WeightedIdentity, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for IdentitiesByRank") + } + + var r0 []models.WeightedIdentity + var r1 error + if rf, ok := ret.Get(0).(func(uint64) ([]models.WeightedIdentity, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) []models.WeightedIdentity); ok { + r0 = rf(rank) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]models.WeightedIdentity) + } + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IdentitiesByState provides a mock function with given fields: stateID +func (_m *DynamicCommittee) IdentitiesByState(stateID models.Identity) ([]models.WeightedIdentity, error) { + ret := _m.Called(stateID) + + if len(ret) == 0 { + panic("no return value specified for IdentitiesByState") + } + + var r0 []models.WeightedIdentity + var r1 error + if rf, ok := ret.Get(0).(func(models.Identity) ([]models.WeightedIdentity, error)); ok { + return rf(stateID) + } + if rf, ok := ret.Get(0).(func(models.Identity) []models.WeightedIdentity); ok { + r0 = rf(stateID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]models.WeightedIdentity) + } + } + + if rf, ok := ret.Get(1).(func(models.Identity) error); ok { + r1 = rf(stateID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IdentityByRank provides a mock function with given fields: rank, participantID +func (_m *DynamicCommittee) IdentityByRank(rank uint64, participantID models.Identity) (models.WeightedIdentity, error) { + ret := _m.Called(rank, participantID) + + if len(ret) == 0 { + panic("no return value specified for IdentityByRank") + } + + var r0 models.WeightedIdentity + var r1 error + if rf, ok := ret.Get(0).(func(uint64, models.Identity) (models.WeightedIdentity, error)); ok { + return rf(rank, participantID) + } + if rf, ok := ret.Get(0).(func(uint64, models.Identity) models.WeightedIdentity); ok { + r0 = rf(rank, participantID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(models.WeightedIdentity) + } + } + + if rf, ok := ret.Get(1).(func(uint64, models.Identity) error); ok { + r1 = rf(rank, participantID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IdentityByState provides a mock function with given fields: stateID, participantID +func (_m *DynamicCommittee) IdentityByState(stateID models.Identity, participantID models.Identity) (*models.WeightedIdentity, error) { + ret := _m.Called(stateID, participantID) + + if len(ret) == 0 { + panic("no return value specified for IdentityByState") + } + + var r0 *models.WeightedIdentity + var r1 error + if rf, ok := ret.Get(0).(func(models.Identity, models.Identity) (*models.WeightedIdentity, error)); ok { + return rf(stateID, participantID) + } + if rf, ok := ret.Get(0).(func(models.Identity, models.Identity) *models.WeightedIdentity); ok { + r0 = rf(stateID, participantID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.WeightedIdentity) + } + } + + if rf, ok := ret.Get(1).(func(models.Identity, models.Identity) error); ok { + r1 = rf(stateID, participantID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LeaderForRank provides a mock function with given fields: rank +func (_m *DynamicCommittee) LeaderForRank(rank uint64) (models.Identity, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for LeaderForRank") + } + + var r0 models.Identity + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (models.Identity, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) models.Identity); ok { + r0 = rf(rank) + } else { + r0 = ret.Get(0).(models.Identity) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QuorumThresholdForRank provides a mock function with given fields: rank +func (_m *DynamicCommittee) QuorumThresholdForRank(rank uint64) (uint64, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for QuorumThresholdForRank") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) uint64); ok { + r0 = rf(rank) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Self provides a mock function with no fields +func (_m *DynamicCommittee) Self() models.Identity { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Self") + } + + var r0 models.Identity + if rf, ok := ret.Get(0).(func() models.Identity); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(models.Identity) + } + + return r0 +} + +// TimeoutThresholdForRank provides a mock function with given fields: rank +func (_m *DynamicCommittee) TimeoutThresholdForRank(rank uint64) (uint64, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for TimeoutThresholdForRank") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) uint64); ok { + r0 = rf(rank) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewDynamicCommittee creates a new instance of DynamicCommittee. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDynamicCommittee(t interface { + mock.TestingT + Cleanup(func()) +}) *DynamicCommittee { + mock := &DynamicCommittee{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/event_handler.go b/consensus/mocks/event_handler.go new file mode 100644 index 0000000..459ba0f --- /dev/null +++ b/consensus/mocks/event_handler.go @@ -0,0 +1,162 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + mock "github.com/stretchr/testify/mock" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" + + time "time" +) + +// EventHandler is an autogenerated mock type for the EventHandler type +type EventHandler[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// OnLocalTimeout provides a mock function with no fields +func (_m *EventHandler[StateT, VoteT]) OnLocalTimeout() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for OnLocalTimeout") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// OnPartialTimeoutCertificateCreated provides a mock function with given fields: partialTimeoutCertificate +func (_m *EventHandler[StateT, VoteT]) OnPartialTimeoutCertificateCreated(partialTimeoutCertificate *consensus.PartialTimeoutCertificateCreated) error { + ret := _m.Called(partialTimeoutCertificate) + + if len(ret) == 0 { + panic("no return value specified for OnPartialTimeoutCertificateCreated") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*consensus.PartialTimeoutCertificateCreated) error); ok { + r0 = rf(partialTimeoutCertificate) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// OnReceiveProposal provides a mock function with given fields: proposal +func (_m *EventHandler[StateT, VoteT]) OnReceiveProposal(proposal *models.SignedProposal[StateT, VoteT]) error { + ret := _m.Called(proposal) + + if len(ret) == 0 { + panic("no return value specified for OnReceiveProposal") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.SignedProposal[StateT, VoteT]) error); ok { + r0 = rf(proposal) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// OnReceiveQuorumCertificate provides a mock function with given fields: quorumCertificate +func (_m *EventHandler[StateT, VoteT]) OnReceiveQuorumCertificate(quorumCertificate models.QuorumCertificate) error { + ret := _m.Called(quorumCertificate) + + if len(ret) == 0 { + panic("no return value specified for OnReceiveQuorumCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(models.QuorumCertificate) error); ok { + r0 = rf(quorumCertificate) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// OnReceiveTimeoutCertificate provides a mock function with given fields: timeoutCertificate +func (_m *EventHandler[StateT, VoteT]) OnReceiveTimeoutCertificate(timeoutCertificate models.TimeoutCertificate) error { + ret := _m.Called(timeoutCertificate) + + if len(ret) == 0 { + panic("no return value specified for OnReceiveTimeoutCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(models.TimeoutCertificate) error); ok { + r0 = rf(timeoutCertificate) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: ctx +func (_m *EventHandler[StateT, VoteT]) Start(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// TimeoutChannel provides a mock function with no fields +func (_m *EventHandler[StateT, VoteT]) TimeoutChannel() <-chan time.Time { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TimeoutChannel") + } + + var r0 <-chan time.Time + if rf, ok := ret.Get(0).(func() <-chan time.Time); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan time.Time) + } + } + + return r0 +} + +// NewEventHandler creates a new instance of EventHandler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEventHandler[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *EventHandler[StateT, VoteT] { + mock := &EventHandler[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/event_loop.go b/consensus/mocks/event_loop.go new file mode 100644 index 0000000..cbee098 --- /dev/null +++ b/consensus/mocks/event_loop.go @@ -0,0 +1,67 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// EventLoop is an autogenerated mock type for the EventLoop type +type EventLoop[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// OnNewQuorumCertificateDiscovered provides a mock function with given fields: certificate +func (_m *EventLoop[StateT, VoteT]) OnNewQuorumCertificateDiscovered(certificate models.QuorumCertificate) { + _m.Called(certificate) +} + +// OnNewTimeoutCertificateDiscovered provides a mock function with given fields: certificate +func (_m *EventLoop[StateT, VoteT]) OnNewTimeoutCertificateDiscovered(certificate models.TimeoutCertificate) { + _m.Called(certificate) +} + +// OnPartialTimeoutCertificateCreated provides a mock function with given fields: rank, newestQC, lastRankTC +func (_m *EventLoop[StateT, VoteT]) OnPartialTimeoutCertificateCreated(rank uint64, newestQC models.QuorumCertificate, lastRankTC models.TimeoutCertificate) { + _m.Called(rank, newestQC, lastRankTC) +} + +// OnQuorumCertificateConstructedFromVotes provides a mock function with given fields: _a0 +func (_m *EventLoop[StateT, VoteT]) OnQuorumCertificateConstructedFromVotes(_a0 models.QuorumCertificate) { + _m.Called(_a0) +} + +// OnTimeoutCertificateConstructedFromTimeouts provides a mock function with given fields: certificate +func (_m *EventLoop[StateT, VoteT]) OnTimeoutCertificateConstructedFromTimeouts(certificate models.TimeoutCertificate) { + _m.Called(certificate) +} + +// OnTimeoutProcessed provides a mock function with given fields: timeout +func (_m *EventLoop[StateT, VoteT]) OnTimeoutProcessed(timeout *models.TimeoutState[VoteT]) { + _m.Called(timeout) +} + +// OnVoteProcessed provides a mock function with given fields: vote +func (_m *EventLoop[StateT, VoteT]) OnVoteProcessed(vote *VoteT) { + _m.Called(vote) +} + +// SubmitProposal provides a mock function with given fields: proposal +func (_m *EventLoop[StateT, VoteT]) SubmitProposal(proposal *models.SignedProposal[StateT, VoteT]) { + _m.Called(proposal) +} + +// NewEventLoop creates a new instance of EventLoop. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEventLoop[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *EventLoop[StateT, VoteT] { + mock := &EventLoop[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/finalization_consumer.go b/consensus/mocks/finalization_consumer.go new file mode 100644 index 0000000..9e9a330 --- /dev/null +++ b/consensus/mocks/finalization_consumer.go @@ -0,0 +1,37 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// FinalizationConsumer is an autogenerated mock type for the FinalizationConsumer type +type FinalizationConsumer[StateT models.Unique] struct { + mock.Mock +} + +// OnFinalizedState provides a mock function with given fields: _a0 +func (_m *FinalizationConsumer[StateT]) OnFinalizedState(_a0 *models.State[StateT]) { + _m.Called(_a0) +} + +// OnStateIncorporated provides a mock function with given fields: _a0 +func (_m *FinalizationConsumer[StateT]) OnStateIncorporated(_a0 *models.State[StateT]) { + _m.Called(_a0) +} + +// NewFinalizationConsumer creates a new instance of FinalizationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFinalizationConsumer[StateT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *FinalizationConsumer[StateT] { + mock := &FinalizationConsumer[StateT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/finalizer.go b/consensus/mocks/finalizer.go new file mode 100644 index 0000000..9e74b8c --- /dev/null +++ b/consensus/mocks/finalizer.go @@ -0,0 +1,45 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Finalizer is an autogenerated mock type for the Finalizer type +type Finalizer struct { + mock.Mock +} + +// MakeFinal provides a mock function with given fields: stateID +func (_m *Finalizer) MakeFinal(stateID models.Identity) error { + ret := _m.Called(stateID) + + if len(ret) == 0 { + panic("no return value specified for MakeFinal") + } + + var r0 error + if rf, ok := ret.Get(0).(func(models.Identity) error); ok { + r0 = rf(stateID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewFinalizer creates a new instance of Finalizer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFinalizer(t interface { + mock.TestingT + Cleanup(func()) +}) *Finalizer { + mock := &Finalizer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/follower_consumer.go b/consensus/mocks/follower_consumer.go new file mode 100644 index 0000000..abc97c9 --- /dev/null +++ b/consensus/mocks/follower_consumer.go @@ -0,0 +1,47 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// FollowerConsumer is an autogenerated mock type for the FollowerConsumer type +type FollowerConsumer[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// OnDoubleProposeDetected provides a mock function with given fields: _a0, _a1 +func (_m *FollowerConsumer[StateT, VoteT]) OnDoubleProposeDetected(_a0 *models.State[StateT], _a1 *models.State[StateT]) { + _m.Called(_a0, _a1) +} + +// OnFinalizedState provides a mock function with given fields: _a0 +func (_m *FollowerConsumer[StateT, VoteT]) OnFinalizedState(_a0 *models.State[StateT]) { + _m.Called(_a0) +} + +// OnInvalidStateDetected provides a mock function with given fields: err +func (_m *FollowerConsumer[StateT, VoteT]) OnInvalidStateDetected(err *models.InvalidProposalError[StateT, VoteT]) { + _m.Called(err) +} + +// OnStateIncorporated provides a mock function with given fields: _a0 +func (_m *FollowerConsumer[StateT, VoteT]) OnStateIncorporated(_a0 *models.State[StateT]) { + _m.Called(_a0) +} + +// NewFollowerConsumer creates a new instance of FollowerConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFollowerConsumer[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *FollowerConsumer[StateT, VoteT] { + mock := &FollowerConsumer[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/follower_loop.go b/consensus/mocks/follower_loop.go new file mode 100644 index 0000000..8360ce5 --- /dev/null +++ b/consensus/mocks/follower_loop.go @@ -0,0 +1,32 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// FollowerLoop is an autogenerated mock type for the FollowerLoop type +type FollowerLoop[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// AddCertifiedState provides a mock function with given fields: certifiedState +func (_m *FollowerLoop[StateT, VoteT]) AddCertifiedState(certifiedState *models.CertifiedState[StateT]) { + _m.Called(certifiedState) +} + +// NewFollowerLoop creates a new instance of FollowerLoop. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFollowerLoop[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *FollowerLoop[StateT, VoteT] { + mock := &FollowerLoop[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/forks.go b/consensus/mocks/forks.go new file mode 100644 index 0000000..6c00114 --- /dev/null +++ b/consensus/mocks/forks.go @@ -0,0 +1,183 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Forks is an autogenerated mock type for the Forks type +type Forks[StateT models.Unique] struct { + mock.Mock +} + +// AddCertifiedState provides a mock function with given fields: certifiedState +func (_m *Forks[StateT]) AddCertifiedState(certifiedState *models.CertifiedState[StateT]) error { + ret := _m.Called(certifiedState) + + if len(ret) == 0 { + panic("no return value specified for AddCertifiedState") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.CertifiedState[StateT]) error); ok { + r0 = rf(certifiedState) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AddValidatedState provides a mock function with given fields: proposal +func (_m *Forks[StateT]) AddValidatedState(proposal *models.State[StateT]) error { + ret := _m.Called(proposal) + + if len(ret) == 0 { + panic("no return value specified for AddValidatedState") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.State[StateT]) error); ok { + r0 = rf(proposal) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FinalityProof provides a mock function with no fields +func (_m *Forks[StateT]) FinalityProof() (*consensus.FinalityProof[StateT], bool) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FinalityProof") + } + + var r0 *consensus.FinalityProof[StateT] + var r1 bool + if rf, ok := ret.Get(0).(func() (*consensus.FinalityProof[StateT], bool)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *consensus.FinalityProof[StateT]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*consensus.FinalityProof[StateT]) + } + } + + if rf, ok := ret.Get(1).(func() bool); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// FinalizedRank provides a mock function with no fields +func (_m *Forks[StateT]) FinalizedRank() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FinalizedRank") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// FinalizedState provides a mock function with no fields +func (_m *Forks[StateT]) FinalizedState() *models.State[StateT] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FinalizedState") + } + + var r0 *models.State[StateT] + if rf, ok := ret.Get(0).(func() *models.State[StateT]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.State[StateT]) + } + } + + return r0 +} + +// GetState provides a mock function with given fields: stateID +func (_m *Forks[StateT]) GetState(stateID models.Identity) (*models.State[StateT], bool) { + ret := _m.Called(stateID) + + if len(ret) == 0 { + panic("no return value specified for GetState") + } + + var r0 *models.State[StateT] + var r1 bool + if rf, ok := ret.Get(0).(func(models.Identity) (*models.State[StateT], bool)); ok { + return rf(stateID) + } + if rf, ok := ret.Get(0).(func(models.Identity) *models.State[StateT]); ok { + r0 = rf(stateID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.State[StateT]) + } + } + + if rf, ok := ret.Get(1).(func(models.Identity) bool); ok { + r1 = rf(stateID) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// GetStatesForRank provides a mock function with given fields: rank +func (_m *Forks[StateT]) GetStatesForRank(rank uint64) []*models.State[StateT] { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for GetStatesForRank") + } + + var r0 []*models.State[StateT] + if rf, ok := ret.Get(0).(func(uint64) []*models.State[StateT]); ok { + r0 = rf(rank) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.State[StateT]) + } + } + + return r0 +} + +// NewForks creates a new instance of Forks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewForks[StateT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *Forks[StateT] { + mock := &Forks[StateT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/leader_provider.go b/consensus/mocks/leader_provider.go new file mode 100644 index 0000000..0a68c54 --- /dev/null +++ b/consensus/mocks/leader_provider.go @@ -0,0 +1,89 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// LeaderProvider is an autogenerated mock type for the LeaderProvider type +type LeaderProvider[StateT models.Unique, PeerIDT models.Unique, CollectedT models.Unique] struct { + mock.Mock +} + +// GetNextLeaders provides a mock function with given fields: ctx, prior +func (_m *LeaderProvider[StateT, PeerIDT, CollectedT]) GetNextLeaders(ctx context.Context, prior *StateT) ([]PeerIDT, error) { + ret := _m.Called(ctx, prior) + + if len(ret) == 0 { + panic("no return value specified for GetNextLeaders") + } + + var r0 []PeerIDT + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *StateT) ([]PeerIDT, error)); ok { + return rf(ctx, prior) + } + if rf, ok := ret.Get(0).(func(context.Context, *StateT) []PeerIDT); ok { + r0 = rf(ctx, prior) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]PeerIDT) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *StateT) error); ok { + r1 = rf(ctx, prior) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ProveNextState provides a mock function with given fields: ctx, filter, priorState +func (_m *LeaderProvider[StateT, PeerIDT, CollectedT]) ProveNextState(ctx context.Context, filter []byte, priorState models.Identity) (*StateT, error) { + ret := _m.Called(ctx, filter, priorState) + + if len(ret) == 0 { + panic("no return value specified for ProveNextState") + } + + var r0 *StateT + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []byte, models.Identity) (*StateT, error)); ok { + return rf(ctx, filter, priorState) + } + if rf, ok := ret.Get(0).(func(context.Context, []byte, models.Identity) *StateT); ok { + r0 = rf(ctx, filter, priorState) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*StateT) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []byte, models.Identity) error); ok { + r1 = rf(ctx, filter, priorState) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewLeaderProvider creates a new instance of LeaderProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLeaderProvider[StateT models.Unique, PeerIDT models.Unique, CollectedT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *LeaderProvider[StateT, PeerIDT, CollectedT] { + mock := &LeaderProvider[StateT, PeerIDT, CollectedT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/liveness_provider.go b/consensus/mocks/liveness_provider.go new file mode 100644 index 0000000..5510dd3 --- /dev/null +++ b/consensus/mocks/liveness_provider.go @@ -0,0 +1,77 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// LivenessProvider is an autogenerated mock type for the LivenessProvider type +type LivenessProvider[StateT models.Unique, PeerIDT models.Unique, CollectedT models.Unique] struct { + mock.Mock +} + +// Collect provides a mock function with given fields: ctx +func (_m *LivenessProvider[StateT, PeerIDT, CollectedT]) Collect(ctx context.Context) (CollectedT, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Collect") + } + + var r0 CollectedT + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (CollectedT, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) CollectedT); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(CollectedT) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SendLiveness provides a mock function with given fields: ctx, prior, collected +func (_m *LivenessProvider[StateT, PeerIDT, CollectedT]) SendLiveness(ctx context.Context, prior *StateT, collected CollectedT) error { + ret := _m.Called(ctx, prior, collected) + + if len(ret) == 0 { + panic("no return value specified for SendLiveness") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *StateT, CollectedT) error); ok { + r0 = rf(ctx, prior, collected) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewLivenessProvider creates a new instance of LivenessProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLivenessProvider[StateT models.Unique, PeerIDT models.Unique, CollectedT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *LivenessProvider[StateT, PeerIDT, CollectedT] { + mock := &LivenessProvider[StateT, PeerIDT, CollectedT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/pacemaker.go b/consensus/mocks/pacemaker.go new file mode 100644 index 0000000..74815e9 --- /dev/null +++ b/consensus/mocks/pacemaker.go @@ -0,0 +1,205 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" + + time "time" +) + +// Pacemaker is an autogenerated mock type for the Pacemaker type +type Pacemaker struct { + mock.Mock +} + +// CurrentRank provides a mock function with no fields +func (_m *Pacemaker) CurrentRank() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CurrentRank") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// LatestQuorumCertificate provides a mock function with no fields +func (_m *Pacemaker) LatestQuorumCertificate() models.QuorumCertificate { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LatestQuorumCertificate") + } + + var r0 models.QuorumCertificate + if rf, ok := ret.Get(0).(func() models.QuorumCertificate); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(models.QuorumCertificate) + } + } + + return r0 +} + +// PriorRankTimeoutCertificate provides a mock function with no fields +func (_m *Pacemaker) PriorRankTimeoutCertificate() models.TimeoutCertificate { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for PriorRankTimeoutCertificate") + } + + var r0 models.TimeoutCertificate + if rf, ok := ret.Get(0).(func() models.TimeoutCertificate); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(models.TimeoutCertificate) + } + } + + return r0 +} + +// ReceiveQuorumCertificate provides a mock function with given fields: quorumCertificate +func (_m *Pacemaker) ReceiveQuorumCertificate(quorumCertificate models.QuorumCertificate) (*models.NextRank, error) { + ret := _m.Called(quorumCertificate) + + if len(ret) == 0 { + panic("no return value specified for ReceiveQuorumCertificate") + } + + var r0 *models.NextRank + var r1 error + if rf, ok := ret.Get(0).(func(models.QuorumCertificate) (*models.NextRank, error)); ok { + return rf(quorumCertificate) + } + if rf, ok := ret.Get(0).(func(models.QuorumCertificate) *models.NextRank); ok { + r0 = rf(quorumCertificate) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.NextRank) + } + } + + if rf, ok := ret.Get(1).(func(models.QuorumCertificate) error); ok { + r1 = rf(quorumCertificate) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReceiveTimeoutCertificate provides a mock function with given fields: timeoutCertificate +func (_m *Pacemaker) ReceiveTimeoutCertificate(timeoutCertificate models.TimeoutCertificate) (*models.NextRank, error) { + ret := _m.Called(timeoutCertificate) + + if len(ret) == 0 { + panic("no return value specified for ReceiveTimeoutCertificate") + } + + var r0 *models.NextRank + var r1 error + if rf, ok := ret.Get(0).(func(models.TimeoutCertificate) (*models.NextRank, error)); ok { + return rf(timeoutCertificate) + } + if rf, ok := ret.Get(0).(func(models.TimeoutCertificate) *models.NextRank); ok { + r0 = rf(timeoutCertificate) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.NextRank) + } + } + + if rf, ok := ret.Get(1).(func(models.TimeoutCertificate) error); ok { + r1 = rf(timeoutCertificate) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Start provides a mock function with given fields: ctx +func (_m *Pacemaker) Start(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// TargetPublicationTime provides a mock function with given fields: proposalRank, timeRankEntered, parentStateId +func (_m *Pacemaker) TargetPublicationTime(proposalRank uint64, timeRankEntered time.Time, parentStateId models.Identity) time.Time { + ret := _m.Called(proposalRank, timeRankEntered, parentStateId) + + if len(ret) == 0 { + panic("no return value specified for TargetPublicationTime") + } + + var r0 time.Time + if rf, ok := ret.Get(0).(func(uint64, time.Time, models.Identity) time.Time); ok { + r0 = rf(proposalRank, timeRankEntered, parentStateId) + } else { + r0 = ret.Get(0).(time.Time) + } + + return r0 +} + +// TimeoutCh provides a mock function with no fields +func (_m *Pacemaker) TimeoutCh() <-chan time.Time { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TimeoutCh") + } + + var r0 <-chan time.Time + if rf, ok := ret.Get(0).(func() <-chan time.Time); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan time.Time) + } + } + + return r0 +} + +// NewPacemaker creates a new instance of Pacemaker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPacemaker(t interface { + mock.TestingT + Cleanup(func()) +}) *Pacemaker { + mock := &Pacemaker{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/packer.go b/consensus/mocks/packer.go new file mode 100644 index 0000000..6c525a6 --- /dev/null +++ b/consensus/mocks/packer.go @@ -0,0 +1,98 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Packer is an autogenerated mock type for the Packer type +type Packer struct { + mock.Mock +} + +// Pack provides a mock function with given fields: rank, sig +func (_m *Packer) Pack(rank uint64, sig *consensus.StateSignatureData) ([]byte, []byte, error) { + ret := _m.Called(rank, sig) + + if len(ret) == 0 { + panic("no return value specified for Pack") + } + + var r0 []byte + var r1 []byte + var r2 error + if rf, ok := ret.Get(0).(func(uint64, *consensus.StateSignatureData) ([]byte, []byte, error)); ok { + return rf(rank, sig) + } + if rf, ok := ret.Get(0).(func(uint64, *consensus.StateSignatureData) []byte); ok { + r0 = rf(rank, sig) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(uint64, *consensus.StateSignatureData) []byte); ok { + r1 = rf(rank, sig) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]byte) + } + } + + if rf, ok := ret.Get(2).(func(uint64, *consensus.StateSignatureData) error); ok { + r2 = rf(rank, sig) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Unpack provides a mock function with given fields: signerIdentities, sigData +func (_m *Packer) Unpack(signerIdentities []models.WeightedIdentity, sigData []byte) (*consensus.StateSignatureData, error) { + ret := _m.Called(signerIdentities, sigData) + + if len(ret) == 0 { + panic("no return value specified for Unpack") + } + + var r0 *consensus.StateSignatureData + var r1 error + if rf, ok := ret.Get(0).(func([]models.WeightedIdentity, []byte) (*consensus.StateSignatureData, error)); ok { + return rf(signerIdentities, sigData) + } + if rf, ok := ret.Get(0).(func([]models.WeightedIdentity, []byte) *consensus.StateSignatureData); ok { + r0 = rf(signerIdentities, sigData) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*consensus.StateSignatureData) + } + } + + if rf, ok := ret.Get(1).(func([]models.WeightedIdentity, []byte) error); ok { + r1 = rf(signerIdentities, sigData) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewPacker creates a new instance of Packer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPacker(t interface { + mock.TestingT + Cleanup(func()) +}) *Packer { + mock := &Packer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/participant_consumer.go b/consensus/mocks/participant_consumer.go new file mode 100644 index 0000000..1c81732 --- /dev/null +++ b/consensus/mocks/participant_consumer.go @@ -0,0 +1,91 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" + + time "time" +) + +// ParticipantConsumer is an autogenerated mock type for the ParticipantConsumer type +type ParticipantConsumer[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// OnCurrentRankDetails provides a mock function with given fields: currentRank, finalizedRank, currentLeader +func (_m *ParticipantConsumer[StateT, VoteT]) OnCurrentRankDetails(currentRank uint64, finalizedRank uint64, currentLeader models.Identity) { + _m.Called(currentRank, finalizedRank, currentLeader) +} + +// OnEventProcessed provides a mock function with no fields +func (_m *ParticipantConsumer[StateT, VoteT]) OnEventProcessed() { + _m.Called() +} + +// OnLocalTimeout provides a mock function with given fields: currentRank +func (_m *ParticipantConsumer[StateT, VoteT]) OnLocalTimeout(currentRank uint64) { + _m.Called(currentRank) +} + +// OnPartialTimeoutCertificate provides a mock function with given fields: currentRank, partialTimeoutCertificate +func (_m *ParticipantConsumer[StateT, VoteT]) OnPartialTimeoutCertificate(currentRank uint64, partialTimeoutCertificate *consensus.PartialTimeoutCertificateCreated) { + _m.Called(currentRank, partialTimeoutCertificate) +} + +// OnQuorumCertificateTriggeredRankChange provides a mock function with given fields: oldRank, newRank, qc +func (_m *ParticipantConsumer[StateT, VoteT]) OnQuorumCertificateTriggeredRankChange(oldRank uint64, newRank uint64, qc models.QuorumCertificate) { + _m.Called(oldRank, newRank, qc) +} + +// OnRankChange provides a mock function with given fields: oldRank, newRank +func (_m *ParticipantConsumer[StateT, VoteT]) OnRankChange(oldRank uint64, newRank uint64) { + _m.Called(oldRank, newRank) +} + +// OnReceiveProposal provides a mock function with given fields: currentRank, proposal +func (_m *ParticipantConsumer[StateT, VoteT]) OnReceiveProposal(currentRank uint64, proposal *models.SignedProposal[StateT, VoteT]) { + _m.Called(currentRank, proposal) +} + +// OnReceiveQuorumCertificate provides a mock function with given fields: currentRank, qc +func (_m *ParticipantConsumer[StateT, VoteT]) OnReceiveQuorumCertificate(currentRank uint64, qc models.QuorumCertificate) { + _m.Called(currentRank, qc) +} + +// OnReceiveTimeoutCertificate provides a mock function with given fields: currentRank, tc +func (_m *ParticipantConsumer[StateT, VoteT]) OnReceiveTimeoutCertificate(currentRank uint64, tc models.TimeoutCertificate) { + _m.Called(currentRank, tc) +} + +// OnStart provides a mock function with given fields: currentRank +func (_m *ParticipantConsumer[StateT, VoteT]) OnStart(currentRank uint64) { + _m.Called(currentRank) +} + +// OnStartingTimeout provides a mock function with given fields: startTime, endTime +func (_m *ParticipantConsumer[StateT, VoteT]) OnStartingTimeout(startTime time.Time, endTime time.Time) { + _m.Called(startTime, endTime) +} + +// OnTimeoutCertificateTriggeredRankChange provides a mock function with given fields: oldRank, newRank, tc +func (_m *ParticipantConsumer[StateT, VoteT]) OnTimeoutCertificateTriggeredRankChange(oldRank uint64, newRank uint64, tc models.TimeoutCertificate) { + _m.Called(oldRank, newRank, tc) +} + +// NewParticipantConsumer creates a new instance of ParticipantConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewParticipantConsumer[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *ParticipantConsumer[StateT, VoteT] { + mock := &ParticipantConsumer[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/proposal_duration_provider.go b/consensus/mocks/proposal_duration_provider.go new file mode 100644 index 0000000..35de879 --- /dev/null +++ b/consensus/mocks/proposal_duration_provider.go @@ -0,0 +1,47 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + time "time" + + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// ProposalDurationProvider is an autogenerated mock type for the ProposalDurationProvider type +type ProposalDurationProvider struct { + mock.Mock +} + +// TargetPublicationTime provides a mock function with given fields: proposalRank, timeRankEntered, parentStateId +func (_m *ProposalDurationProvider) TargetPublicationTime(proposalRank uint64, timeRankEntered time.Time, parentStateId models.Identity) time.Time { + ret := _m.Called(proposalRank, timeRankEntered, parentStateId) + + if len(ret) == 0 { + panic("no return value specified for TargetPublicationTime") + } + + var r0 time.Time + if rf, ok := ret.Get(0).(func(uint64, time.Time, models.Identity) time.Time); ok { + r0 = rf(proposalRank, timeRankEntered, parentStateId) + } else { + r0 = ret.Get(0).(time.Time) + } + + return r0 +} + +// NewProposalDurationProvider creates a new instance of ProposalDurationProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProposalDurationProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *ProposalDurationProvider { + mock := &ProposalDurationProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/proposal_violation_consumer.go b/consensus/mocks/proposal_violation_consumer.go new file mode 100644 index 0000000..2cb61e4 --- /dev/null +++ b/consensus/mocks/proposal_violation_consumer.go @@ -0,0 +1,37 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// ProposalViolationConsumer is an autogenerated mock type for the ProposalViolationConsumer type +type ProposalViolationConsumer[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// OnDoubleProposeDetected provides a mock function with given fields: _a0, _a1 +func (_m *ProposalViolationConsumer[StateT, VoteT]) OnDoubleProposeDetected(_a0 *models.State[StateT], _a1 *models.State[StateT]) { + _m.Called(_a0, _a1) +} + +// OnInvalidStateDetected provides a mock function with given fields: err +func (_m *ProposalViolationConsumer[StateT, VoteT]) OnInvalidStateDetected(err *models.InvalidProposalError[StateT, VoteT]) { + _m.Called(err) +} + +// NewProposalViolationConsumer creates a new instance of ProposalViolationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProposalViolationConsumer[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *ProposalViolationConsumer[StateT, VoteT] { + mock := &ProposalViolationConsumer[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/read_only_consensus_store.go b/consensus/mocks/read_only_consensus_store.go new file mode 100644 index 0000000..7cf8fbd --- /dev/null +++ b/consensus/mocks/read_only_consensus_store.go @@ -0,0 +1,87 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// ReadOnlyConsensusStore is an autogenerated mock type for the ReadOnlyConsensusStore type +type ReadOnlyConsensusStore[VoteT models.Unique] struct { + mock.Mock +} + +// GetConsensusState provides a mock function with no fields +func (_m *ReadOnlyConsensusStore[VoteT]) GetConsensusState() (*models.ConsensusState[VoteT], error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetConsensusState") + } + + var r0 *models.ConsensusState[VoteT] + var r1 error + if rf, ok := ret.Get(0).(func() (*models.ConsensusState[VoteT], error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *models.ConsensusState[VoteT]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.ConsensusState[VoteT]) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLivenessState provides a mock function with no fields +func (_m *ReadOnlyConsensusStore[VoteT]) GetLivenessState() (*models.LivenessState, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetLivenessState") + } + + var r0 *models.LivenessState + var r1 error + if rf, ok := ret.Get(0).(func() (*models.LivenessState, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *models.LivenessState); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.LivenessState) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewReadOnlyConsensusStore creates a new instance of ReadOnlyConsensusStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReadOnlyConsensusStore[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *ReadOnlyConsensusStore[VoteT] { + mock := &ReadOnlyConsensusStore[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/replicas.go b/consensus/mocks/replicas.go new file mode 100644 index 0000000..1daee9d --- /dev/null +++ b/consensus/mocks/replicas.go @@ -0,0 +1,189 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Replicas is an autogenerated mock type for the Replicas type +type Replicas struct { + mock.Mock +} + +// IdentitiesByRank provides a mock function with given fields: rank +func (_m *Replicas) IdentitiesByRank(rank uint64) ([]models.WeightedIdentity, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for IdentitiesByRank") + } + + var r0 []models.WeightedIdentity + var r1 error + if rf, ok := ret.Get(0).(func(uint64) ([]models.WeightedIdentity, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) []models.WeightedIdentity); ok { + r0 = rf(rank) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]models.WeightedIdentity) + } + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IdentityByRank provides a mock function with given fields: rank, participantID +func (_m *Replicas) IdentityByRank(rank uint64, participantID models.Identity) (models.WeightedIdentity, error) { + ret := _m.Called(rank, participantID) + + if len(ret) == 0 { + panic("no return value specified for IdentityByRank") + } + + var r0 models.WeightedIdentity + var r1 error + if rf, ok := ret.Get(0).(func(uint64, models.Identity) (models.WeightedIdentity, error)); ok { + return rf(rank, participantID) + } + if rf, ok := ret.Get(0).(func(uint64, models.Identity) models.WeightedIdentity); ok { + r0 = rf(rank, participantID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(models.WeightedIdentity) + } + } + + if rf, ok := ret.Get(1).(func(uint64, models.Identity) error); ok { + r1 = rf(rank, participantID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LeaderForRank provides a mock function with given fields: rank +func (_m *Replicas) LeaderForRank(rank uint64) (models.Identity, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for LeaderForRank") + } + + var r0 models.Identity + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (models.Identity, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) models.Identity); ok { + r0 = rf(rank) + } else { + r0 = ret.Get(0).(models.Identity) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QuorumThresholdForRank provides a mock function with given fields: rank +func (_m *Replicas) QuorumThresholdForRank(rank uint64) (uint64, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for QuorumThresholdForRank") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) uint64); ok { + r0 = rf(rank) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Self provides a mock function with no fields +func (_m *Replicas) Self() models.Identity { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Self") + } + + var r0 models.Identity + if rf, ok := ret.Get(0).(func() models.Identity); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(models.Identity) + } + + return r0 +} + +// TimeoutThresholdForRank provides a mock function with given fields: rank +func (_m *Replicas) TimeoutThresholdForRank(rank uint64) (uint64, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for TimeoutThresholdForRank") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) uint64); ok { + r0 = rf(rank) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewReplicas creates a new instance of Replicas. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReplicas(t interface { + mock.TestingT + Cleanup(func()) +}) *Replicas { + mock := &Replicas{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/safety_rules.go b/consensus/mocks/safety_rules.go new file mode 100644 index 0000000..04b72b2 --- /dev/null +++ b/consensus/mocks/safety_rules.go @@ -0,0 +1,117 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// SafetyRules is an autogenerated mock type for the SafetyRules type +type SafetyRules[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// ProduceTimeout provides a mock function with given fields: curRank, newestQC, lastRankTC +func (_m *SafetyRules[StateT, VoteT]) ProduceTimeout(curRank uint64, newestQC models.QuorumCertificate, lastRankTC models.TimeoutCertificate) (*models.TimeoutState[VoteT], error) { + ret := _m.Called(curRank, newestQC, lastRankTC) + + if len(ret) == 0 { + panic("no return value specified for ProduceTimeout") + } + + var r0 *models.TimeoutState[VoteT] + var r1 error + if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) (*models.TimeoutState[VoteT], error)); ok { + return rf(curRank, newestQC, lastRankTC) + } + if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) *models.TimeoutState[VoteT]); ok { + r0 = rf(curRank, newestQC, lastRankTC) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.TimeoutState[VoteT]) + } + } + + if rf, ok := ret.Get(1).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) error); ok { + r1 = rf(curRank, newestQC, lastRankTC) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ProduceVote provides a mock function with given fields: proposal, curRank +func (_m *SafetyRules[StateT, VoteT]) ProduceVote(proposal *models.SignedProposal[StateT, VoteT], curRank uint64) (*VoteT, error) { + ret := _m.Called(proposal, curRank) + + if len(ret) == 0 { + panic("no return value specified for ProduceVote") + } + + var r0 *VoteT + var r1 error + if rf, ok := ret.Get(0).(func(*models.SignedProposal[StateT, VoteT], uint64) (*VoteT, error)); ok { + return rf(proposal, curRank) + } + if rf, ok := ret.Get(0).(func(*models.SignedProposal[StateT, VoteT], uint64) *VoteT); ok { + r0 = rf(proposal, curRank) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*VoteT) + } + } + + if rf, ok := ret.Get(1).(func(*models.SignedProposal[StateT, VoteT], uint64) error); ok { + r1 = rf(proposal, curRank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SignOwnProposal provides a mock function with given fields: unsignedProposal +func (_m *SafetyRules[StateT, VoteT]) SignOwnProposal(unsignedProposal *models.Proposal[StateT]) (*VoteT, error) { + ret := _m.Called(unsignedProposal) + + if len(ret) == 0 { + panic("no return value specified for SignOwnProposal") + } + + var r0 *VoteT + var r1 error + if rf, ok := ret.Get(0).(func(*models.Proposal[StateT]) (*VoteT, error)); ok { + return rf(unsignedProposal) + } + if rf, ok := ret.Get(0).(func(*models.Proposal[StateT]) *VoteT); ok { + r0 = rf(unsignedProposal) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*VoteT) + } + } + + if rf, ok := ret.Get(1).(func(*models.Proposal[StateT]) error); ok { + r1 = rf(unsignedProposal) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewSafetyRules creates a new instance of SafetyRules. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSafetyRules[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *SafetyRules[StateT, VoteT] { + mock := &SafetyRules[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/signature_aggregator.go b/consensus/mocks/signature_aggregator.go new file mode 100644 index 0000000..97d4c5f --- /dev/null +++ b/consensus/mocks/signature_aggregator.go @@ -0,0 +1,93 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// SignatureAggregator is an autogenerated mock type for the SignatureAggregator type +type SignatureAggregator struct { + mock.Mock +} + +// Aggregate provides a mock function with given fields: publicKeys, signatures +func (_m *SignatureAggregator) Aggregate(publicKeys [][]byte, signatures [][]byte) (models.AggregatedSignature, error) { + ret := _m.Called(publicKeys, signatures) + + if len(ret) == 0 { + panic("no return value specified for Aggregate") + } + + var r0 models.AggregatedSignature + var r1 error + if rf, ok := ret.Get(0).(func([][]byte, [][]byte) (models.AggregatedSignature, error)); ok { + return rf(publicKeys, signatures) + } + if rf, ok := ret.Get(0).(func([][]byte, [][]byte) models.AggregatedSignature); ok { + r0 = rf(publicKeys, signatures) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(models.AggregatedSignature) + } + } + + if rf, ok := ret.Get(1).(func([][]byte, [][]byte) error); ok { + r1 = rf(publicKeys, signatures) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// VerifySignatureMultiMessage provides a mock function with given fields: publicKeys, signature, messages, context +func (_m *SignatureAggregator) VerifySignatureMultiMessage(publicKeys [][]byte, signature []byte, messages [][]byte, context []byte) bool { + ret := _m.Called(publicKeys, signature, messages, context) + + if len(ret) == 0 { + panic("no return value specified for VerifySignatureMultiMessage") + } + + var r0 bool + if rf, ok := ret.Get(0).(func([][]byte, []byte, [][]byte, []byte) bool); ok { + r0 = rf(publicKeys, signature, messages, context) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// VerifySignatureRaw provides a mock function with given fields: publicKey, signature, message, context +func (_m *SignatureAggregator) VerifySignatureRaw(publicKey []byte, signature []byte, message []byte, context []byte) bool { + ret := _m.Called(publicKey, signature, message, context) + + if len(ret) == 0 { + panic("no return value specified for VerifySignatureRaw") + } + + var r0 bool + if rf, ok := ret.Get(0).(func([]byte, []byte, []byte, []byte) bool); ok { + r0 = rf(publicKey, signature, message, context) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// NewSignatureAggregator creates a new instance of SignatureAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSignatureAggregator(t interface { + mock.TestingT + Cleanup(func()) +}) *SignatureAggregator { + mock := &SignatureAggregator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/signer.go b/consensus/mocks/signer.go new file mode 100644 index 0000000..618a92b --- /dev/null +++ b/consensus/mocks/signer.go @@ -0,0 +1,87 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Signer is an autogenerated mock type for the Signer type +type Signer[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// CreateTimeout provides a mock function with given fields: curView, newestQC, previousRankTimeoutCert +func (_m *Signer[StateT, VoteT]) CreateTimeout(curView uint64, newestQC models.QuorumCertificate, previousRankTimeoutCert models.TimeoutCertificate) (*models.TimeoutState[VoteT], error) { + ret := _m.Called(curView, newestQC, previousRankTimeoutCert) + + if len(ret) == 0 { + panic("no return value specified for CreateTimeout") + } + + var r0 *models.TimeoutState[VoteT] + var r1 error + if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) (*models.TimeoutState[VoteT], error)); ok { + return rf(curView, newestQC, previousRankTimeoutCert) + } + if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) *models.TimeoutState[VoteT]); ok { + r0 = rf(curView, newestQC, previousRankTimeoutCert) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.TimeoutState[VoteT]) + } + } + + if rf, ok := ret.Get(1).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) error); ok { + r1 = rf(curView, newestQC, previousRankTimeoutCert) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateVote provides a mock function with given fields: state +func (_m *Signer[StateT, VoteT]) CreateVote(state *models.State[StateT]) (*VoteT, error) { + ret := _m.Called(state) + + if len(ret) == 0 { + panic("no return value specified for CreateVote") + } + + var r0 *VoteT + var r1 error + if rf, ok := ret.Get(0).(func(*models.State[StateT]) (*VoteT, error)); ok { + return rf(state) + } + if rf, ok := ret.Get(0).(func(*models.State[StateT]) *VoteT); ok { + r0 = rf(state) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*VoteT) + } + } + + if rf, ok := ret.Get(1).(func(*models.State[StateT]) error); ok { + r1 = rf(state) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewSigner creates a new instance of Signer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSigner[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *Signer[StateT, VoteT] { + mock := &Signer[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/state_producer.go b/consensus/mocks/state_producer.go new file mode 100644 index 0000000..6a396c1 --- /dev/null +++ b/consensus/mocks/state_producer.go @@ -0,0 +1,57 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// StateProducer is an autogenerated mock type for the StateProducer type +type StateProducer[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// MakeStateProposal provides a mock function with given fields: rank, qc, lastRankTC +func (_m *StateProducer[StateT, VoteT]) MakeStateProposal(rank uint64, qc models.QuorumCertificate, lastRankTC models.TimeoutCertificate) (*models.SignedProposal[StateT, VoteT], error) { + ret := _m.Called(rank, qc, lastRankTC) + + if len(ret) == 0 { + panic("no return value specified for MakeStateProposal") + } + + var r0 *models.SignedProposal[StateT, VoteT] + var r1 error + if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) (*models.SignedProposal[StateT, VoteT], error)); ok { + return rf(rank, qc, lastRankTC) + } + if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) *models.SignedProposal[StateT, VoteT]); ok { + r0 = rf(rank, qc, lastRankTC) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.SignedProposal[StateT, VoteT]) + } + } + + if rf, ok := ret.Get(1).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) error); ok { + r1 = rf(rank, qc, lastRankTC) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewStateProducer creates a new instance of StateProducer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStateProducer[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *StateProducer[StateT, VoteT] { + mock := &StateProducer[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/state_signer_decoder.go b/consensus/mocks/state_signer_decoder.go new file mode 100644 index 0000000..8690501 --- /dev/null +++ b/consensus/mocks/state_signer_decoder.go @@ -0,0 +1,57 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// StateSignerDecoder is an autogenerated mock type for the StateSignerDecoder type +type StateSignerDecoder[StateT models.Unique] struct { + mock.Mock +} + +// DecodeSignerIDs provides a mock function with given fields: state +func (_m *StateSignerDecoder[StateT]) DecodeSignerIDs(state *models.State[StateT]) ([]models.WeightedIdentity, error) { + ret := _m.Called(state) + + if len(ret) == 0 { + panic("no return value specified for DecodeSignerIDs") + } + + var r0 []models.WeightedIdentity + var r1 error + if rf, ok := ret.Get(0).(func(*models.State[StateT]) ([]models.WeightedIdentity, error)); ok { + return rf(state) + } + if rf, ok := ret.Get(0).(func(*models.State[StateT]) []models.WeightedIdentity); ok { + r0 = rf(state) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]models.WeightedIdentity) + } + } + + if rf, ok := ret.Get(1).(func(*models.State[StateT]) error); ok { + r1 = rf(state) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewStateSignerDecoder creates a new instance of StateSignerDecoder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStateSignerDecoder[StateT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *StateSignerDecoder[StateT] { + mock := &StateSignerDecoder[StateT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/sync_provider.go b/consensus/mocks/sync_provider.go new file mode 100644 index 0000000..4ab1fcb --- /dev/null +++ b/consensus/mocks/sync_provider.go @@ -0,0 +1,61 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// SyncProvider is an autogenerated mock type for the SyncProvider type +type SyncProvider[StateT models.Unique] struct { + mock.Mock +} + +// Synchronize provides a mock function with given fields: ctx, existing +func (_m *SyncProvider[StateT]) Synchronize(ctx context.Context, existing *StateT) (<-chan *StateT, <-chan error) { + ret := _m.Called(ctx, existing) + + if len(ret) == 0 { + panic("no return value specified for Synchronize") + } + + var r0 <-chan *StateT + var r1 <-chan error + if rf, ok := ret.Get(0).(func(context.Context, *StateT) (<-chan *StateT, <-chan error)); ok { + return rf(ctx, existing) + } + if rf, ok := ret.Get(0).(func(context.Context, *StateT) <-chan *StateT); ok { + r0 = rf(ctx, existing) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *StateT) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *StateT) <-chan error); ok { + r1 = rf(ctx, existing) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + return r0, r1 +} + +// NewSyncProvider creates a new instance of SyncProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSyncProvider[StateT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *SyncProvider[StateT] { + mock := &SyncProvider[StateT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_aggregation_consumer.go b/consensus/mocks/timeout_aggregation_consumer.go new file mode 100644 index 0000000..50bfa2e --- /dev/null +++ b/consensus/mocks/timeout_aggregation_consumer.go @@ -0,0 +1,62 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutAggregationConsumer is an autogenerated mock type for the TimeoutAggregationConsumer type +type TimeoutAggregationConsumer[VoteT models.Unique] struct { + mock.Mock +} + +// OnDoubleTimeoutDetected provides a mock function with given fields: _a0, _a1 +func (_m *TimeoutAggregationConsumer[VoteT]) OnDoubleTimeoutDetected(_a0 *models.TimeoutState[VoteT], _a1 *models.TimeoutState[VoteT]) { + _m.Called(_a0, _a1) +} + +// OnInvalidTimeoutDetected provides a mock function with given fields: err +func (_m *TimeoutAggregationConsumer[VoteT]) OnInvalidTimeoutDetected(err models.InvalidTimeoutError[VoteT]) { + _m.Called(err) +} + +// OnNewQuorumCertificateDiscovered provides a mock function with given fields: certificate +func (_m *TimeoutAggregationConsumer[VoteT]) OnNewQuorumCertificateDiscovered(certificate models.QuorumCertificate) { + _m.Called(certificate) +} + +// OnNewTimeoutCertificateDiscovered provides a mock function with given fields: certificate +func (_m *TimeoutAggregationConsumer[VoteT]) OnNewTimeoutCertificateDiscovered(certificate models.TimeoutCertificate) { + _m.Called(certificate) +} + +// OnPartialTimeoutCertificateCreated provides a mock function with given fields: rank, newestQC, lastRankTC +func (_m *TimeoutAggregationConsumer[VoteT]) OnPartialTimeoutCertificateCreated(rank uint64, newestQC models.QuorumCertificate, lastRankTC models.TimeoutCertificate) { + _m.Called(rank, newestQC, lastRankTC) +} + +// OnTimeoutCertificateConstructedFromTimeouts provides a mock function with given fields: certificate +func (_m *TimeoutAggregationConsumer[VoteT]) OnTimeoutCertificateConstructedFromTimeouts(certificate models.TimeoutCertificate) { + _m.Called(certificate) +} + +// OnTimeoutProcessed provides a mock function with given fields: timeout +func (_m *TimeoutAggregationConsumer[VoteT]) OnTimeoutProcessed(timeout *models.TimeoutState[VoteT]) { + _m.Called(timeout) +} + +// NewTimeoutAggregationConsumer creates a new instance of TimeoutAggregationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutAggregationConsumer[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutAggregationConsumer[VoteT] { + mock := &TimeoutAggregationConsumer[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_aggregation_violation_consumer.go b/consensus/mocks/timeout_aggregation_violation_consumer.go new file mode 100644 index 0000000..ce724ac --- /dev/null +++ b/consensus/mocks/timeout_aggregation_violation_consumer.go @@ -0,0 +1,37 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutAggregationViolationConsumer is an autogenerated mock type for the TimeoutAggregationViolationConsumer type +type TimeoutAggregationViolationConsumer[VoteT models.Unique] struct { + mock.Mock +} + +// OnDoubleTimeoutDetected provides a mock function with given fields: _a0, _a1 +func (_m *TimeoutAggregationViolationConsumer[VoteT]) OnDoubleTimeoutDetected(_a0 *models.TimeoutState[VoteT], _a1 *models.TimeoutState[VoteT]) { + _m.Called(_a0, _a1) +} + +// OnInvalidTimeoutDetected provides a mock function with given fields: err +func (_m *TimeoutAggregationViolationConsumer[VoteT]) OnInvalidTimeoutDetected(err models.InvalidTimeoutError[VoteT]) { + _m.Called(err) +} + +// NewTimeoutAggregationViolationConsumer creates a new instance of TimeoutAggregationViolationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutAggregationViolationConsumer[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutAggregationViolationConsumer[VoteT] { + mock := &TimeoutAggregationViolationConsumer[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_aggregator.go b/consensus/mocks/timeout_aggregator.go new file mode 100644 index 0000000..b2953ed --- /dev/null +++ b/consensus/mocks/timeout_aggregator.go @@ -0,0 +1,57 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutAggregator is an autogenerated mock type for the TimeoutAggregator type +type TimeoutAggregator[VoteT models.Unique] struct { + mock.Mock +} + +// AddTimeout provides a mock function with given fields: timeoutState +func (_m *TimeoutAggregator[VoteT]) AddTimeout(timeoutState *models.TimeoutState[VoteT]) { + _m.Called(timeoutState) +} + +// PruneUpToRank provides a mock function with given fields: lowestRetainedRank +func (_m *TimeoutAggregator[VoteT]) PruneUpToRank(lowestRetainedRank uint64) { + _m.Called(lowestRetainedRank) +} + +// Start provides a mock function with given fields: ctx +func (_m *TimeoutAggregator[VoteT]) Start(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewTimeoutAggregator creates a new instance of TimeoutAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutAggregator[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutAggregator[VoteT] { + mock := &TimeoutAggregator[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_collector.go b/consensus/mocks/timeout_collector.go new file mode 100644 index 0000000..53d84d3 --- /dev/null +++ b/consensus/mocks/timeout_collector.go @@ -0,0 +1,63 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutCollector is an autogenerated mock type for the TimeoutCollector type +type TimeoutCollector[VoteT models.Unique] struct { + mock.Mock +} + +// AddTimeout provides a mock function with given fields: timeoutState +func (_m *TimeoutCollector[VoteT]) AddTimeout(timeoutState *models.TimeoutState[VoteT]) error { + ret := _m.Called(timeoutState) + + if len(ret) == 0 { + panic("no return value specified for AddTimeout") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.TimeoutState[VoteT]) error); ok { + r0 = rf(timeoutState) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Rank provides a mock function with no fields +func (_m *TimeoutCollector[VoteT]) Rank() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Rank") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// NewTimeoutCollector creates a new instance of TimeoutCollector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutCollector[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutCollector[VoteT] { + mock := &TimeoutCollector[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_collector_consumer.go b/consensus/mocks/timeout_collector_consumer.go new file mode 100644 index 0000000..07708b3 --- /dev/null +++ b/consensus/mocks/timeout_collector_consumer.go @@ -0,0 +1,52 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutCollectorConsumer is an autogenerated mock type for the TimeoutCollectorConsumer type +type TimeoutCollectorConsumer[VoteT models.Unique] struct { + mock.Mock +} + +// OnNewQuorumCertificateDiscovered provides a mock function with given fields: certificate +func (_m *TimeoutCollectorConsumer[VoteT]) OnNewQuorumCertificateDiscovered(certificate models.QuorumCertificate) { + _m.Called(certificate) +} + +// OnNewTimeoutCertificateDiscovered provides a mock function with given fields: certificate +func (_m *TimeoutCollectorConsumer[VoteT]) OnNewTimeoutCertificateDiscovered(certificate models.TimeoutCertificate) { + _m.Called(certificate) +} + +// OnPartialTimeoutCertificateCreated provides a mock function with given fields: rank, newestQC, lastRankTC +func (_m *TimeoutCollectorConsumer[VoteT]) OnPartialTimeoutCertificateCreated(rank uint64, newestQC models.QuorumCertificate, lastRankTC models.TimeoutCertificate) { + _m.Called(rank, newestQC, lastRankTC) +} + +// OnTimeoutCertificateConstructedFromTimeouts provides a mock function with given fields: certificate +func (_m *TimeoutCollectorConsumer[VoteT]) OnTimeoutCertificateConstructedFromTimeouts(certificate models.TimeoutCertificate) { + _m.Called(certificate) +} + +// OnTimeoutProcessed provides a mock function with given fields: timeout +func (_m *TimeoutCollectorConsumer[VoteT]) OnTimeoutProcessed(timeout *models.TimeoutState[VoteT]) { + _m.Called(timeout) +} + +// NewTimeoutCollectorConsumer creates a new instance of TimeoutCollectorConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutCollectorConsumer[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutCollectorConsumer[VoteT] { + mock := &TimeoutCollectorConsumer[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_collector_factory.go b/consensus/mocks/timeout_collector_factory.go new file mode 100644 index 0000000..a6843a4 --- /dev/null +++ b/consensus/mocks/timeout_collector_factory.go @@ -0,0 +1,59 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutCollectorFactory is an autogenerated mock type for the TimeoutCollectorFactory type +type TimeoutCollectorFactory[VoteT models.Unique] struct { + mock.Mock +} + +// Create provides a mock function with given fields: rank +func (_m *TimeoutCollectorFactory[VoteT]) Create(rank uint64) (consensus.TimeoutCollector[VoteT], error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 consensus.TimeoutCollector[VoteT] + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (consensus.TimeoutCollector[VoteT], error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) consensus.TimeoutCollector[VoteT]); ok { + r0 = rf(rank) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(consensus.TimeoutCollector[VoteT]) + } + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewTimeoutCollectorFactory creates a new instance of TimeoutCollectorFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutCollectorFactory[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutCollectorFactory[VoteT] { + mock := &TimeoutCollectorFactory[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_collectors.go b/consensus/mocks/timeout_collectors.go new file mode 100644 index 0000000..cb5189f --- /dev/null +++ b/consensus/mocks/timeout_collectors.go @@ -0,0 +1,71 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutCollectors is an autogenerated mock type for the TimeoutCollectors type +type TimeoutCollectors[VoteT models.Unique] struct { + mock.Mock +} + +// GetOrCreateCollector provides a mock function with given fields: rank +func (_m *TimeoutCollectors[VoteT]) GetOrCreateCollector(rank uint64) (consensus.TimeoutCollector[VoteT], bool, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for GetOrCreateCollector") + } + + var r0 consensus.TimeoutCollector[VoteT] + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(uint64) (consensus.TimeoutCollector[VoteT], bool, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) consensus.TimeoutCollector[VoteT]); ok { + r0 = rf(rank) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(consensus.TimeoutCollector[VoteT]) + } + } + + if rf, ok := ret.Get(1).(func(uint64) bool); ok { + r1 = rf(rank) + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func(uint64) error); ok { + r2 = rf(rank) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// PruneUpToRank provides a mock function with given fields: lowestRetainedRank +func (_m *TimeoutCollectors[VoteT]) PruneUpToRank(lowestRetainedRank uint64) { + _m.Called(lowestRetainedRank) +} + +// NewTimeoutCollectors creates a new instance of TimeoutCollectors. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutCollectors[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutCollectors[VoteT] { + mock := &TimeoutCollectors[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_processor.go b/consensus/mocks/timeout_processor.go new file mode 100644 index 0000000..596ec3f --- /dev/null +++ b/consensus/mocks/timeout_processor.go @@ -0,0 +1,45 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutProcessor is an autogenerated mock type for the TimeoutProcessor type +type TimeoutProcessor[VoteT models.Unique] struct { + mock.Mock +} + +// Process provides a mock function with given fields: timeout +func (_m *TimeoutProcessor[VoteT]) Process(timeout *models.TimeoutState[VoteT]) error { + ret := _m.Called(timeout) + + if len(ret) == 0 { + panic("no return value specified for Process") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.TimeoutState[VoteT]) error); ok { + r0 = rf(timeout) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewTimeoutProcessor creates a new instance of TimeoutProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutProcessor[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutProcessor[VoteT] { + mock := &TimeoutProcessor[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_processor_factory.go b/consensus/mocks/timeout_processor_factory.go new file mode 100644 index 0000000..774b005 --- /dev/null +++ b/consensus/mocks/timeout_processor_factory.go @@ -0,0 +1,59 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutProcessorFactory is an autogenerated mock type for the TimeoutProcessorFactory type +type TimeoutProcessorFactory[VoteT models.Unique] struct { + mock.Mock +} + +// Create provides a mock function with given fields: rank +func (_m *TimeoutProcessorFactory[VoteT]) Create(rank uint64) (consensus.TimeoutProcessor[VoteT], error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 consensus.TimeoutProcessor[VoteT] + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (consensus.TimeoutProcessor[VoteT], error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) consensus.TimeoutProcessor[VoteT]); ok { + r0 = rf(rank) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(consensus.TimeoutProcessor[VoteT]) + } + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(rank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewTimeoutProcessorFactory creates a new instance of TimeoutProcessorFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutProcessorFactory[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutProcessorFactory[VoteT] { + mock := &TimeoutProcessorFactory[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/timeout_signature_aggregator.go b/consensus/mocks/timeout_signature_aggregator.go new file mode 100644 index 0000000..aeca0b4 --- /dev/null +++ b/consensus/mocks/timeout_signature_aggregator.go @@ -0,0 +1,132 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutSignatureAggregator is an autogenerated mock type for the TimeoutSignatureAggregator type +type TimeoutSignatureAggregator struct { + mock.Mock +} + +// Aggregate provides a mock function with no fields +func (_m *TimeoutSignatureAggregator) Aggregate() ([]consensus.TimeoutSignerInfo, models.AggregatedSignature, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Aggregate") + } + + var r0 []consensus.TimeoutSignerInfo + var r1 models.AggregatedSignature + var r2 error + if rf, ok := ret.Get(0).(func() ([]consensus.TimeoutSignerInfo, models.AggregatedSignature, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []consensus.TimeoutSignerInfo); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]consensus.TimeoutSignerInfo) + } + } + + if rf, ok := ret.Get(1).(func() models.AggregatedSignature); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(models.AggregatedSignature) + } + } + + if rf, ok := ret.Get(2).(func() error); ok { + r2 = rf() + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Rank provides a mock function with no fields +func (_m *TimeoutSignatureAggregator) Rank() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Rank") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// TotalWeight provides a mock function with no fields +func (_m *TimeoutSignatureAggregator) TotalWeight() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TotalWeight") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// VerifyAndAdd provides a mock function with given fields: signerID, sig, newestQCRank +func (_m *TimeoutSignatureAggregator) VerifyAndAdd(signerID models.Identity, sig []byte, newestQCRank uint64) (uint64, error) { + ret := _m.Called(signerID, sig, newestQCRank) + + if len(ret) == 0 { + panic("no return value specified for VerifyAndAdd") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(models.Identity, []byte, uint64) (uint64, error)); ok { + return rf(signerID, sig, newestQCRank) + } + if rf, ok := ret.Get(0).(func(models.Identity, []byte, uint64) uint64); ok { + r0 = rf(signerID, sig, newestQCRank) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(models.Identity, []byte, uint64) error); ok { + r1 = rf(signerID, sig, newestQCRank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewTimeoutSignatureAggregator creates a new instance of TimeoutSignatureAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutSignatureAggregator(t interface { + mock.TestingT + Cleanup(func()) +}) *TimeoutSignatureAggregator { + mock := &TimeoutSignatureAggregator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/trace_logger.go b/consensus/mocks/trace_logger.go new file mode 100644 index 0000000..a2b4677 --- /dev/null +++ b/consensus/mocks/trace_logger.go @@ -0,0 +1,34 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// TraceLogger is an autogenerated mock type for the TraceLogger type +type TraceLogger struct { + mock.Mock +} + +// Error provides a mock function with given fields: message, err +func (_m *TraceLogger) Error(message string, err error) { + _m.Called(message, err) +} + +// Trace provides a mock function with given fields: message +func (_m *TraceLogger) Trace(message string) { + _m.Called(message) +} + +// NewTraceLogger creates a new instance of TraceLogger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTraceLogger(t interface { + mock.TestingT + Cleanup(func()) +}) *TraceLogger { + mock := &TraceLogger{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/validator.go b/consensus/mocks/validator.go new file mode 100644 index 0000000..9c0d317 --- /dev/null +++ b/consensus/mocks/validator.go @@ -0,0 +1,111 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Validator is an autogenerated mock type for the Validator type +type Validator[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// ValidateProposal provides a mock function with given fields: proposal +func (_m *Validator[StateT, VoteT]) ValidateProposal(proposal *models.SignedProposal[StateT, VoteT]) error { + ret := _m.Called(proposal) + + if len(ret) == 0 { + panic("no return value specified for ValidateProposal") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.SignedProposal[StateT, VoteT]) error); ok { + r0 = rf(proposal) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ValidateQuorumCertificate provides a mock function with given fields: qc +func (_m *Validator[StateT, VoteT]) ValidateQuorumCertificate(qc models.QuorumCertificate) error { + ret := _m.Called(qc) + + if len(ret) == 0 { + panic("no return value specified for ValidateQuorumCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(models.QuorumCertificate) error); ok { + r0 = rf(qc) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ValidateTimeoutCertificate provides a mock function with given fields: tc +func (_m *Validator[StateT, VoteT]) ValidateTimeoutCertificate(tc models.TimeoutCertificate) error { + ret := _m.Called(tc) + + if len(ret) == 0 { + panic("no return value specified for ValidateTimeoutCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(models.TimeoutCertificate) error); ok { + r0 = rf(tc) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ValidateVote provides a mock function with given fields: vote +func (_m *Validator[StateT, VoteT]) ValidateVote(vote *VoteT) (*models.WeightedIdentity, error) { + ret := _m.Called(vote) + + if len(ret) == 0 { + panic("no return value specified for ValidateVote") + } + + var r0 *models.WeightedIdentity + var r1 error + if rf, ok := ret.Get(0).(func(*VoteT) (*models.WeightedIdentity, error)); ok { + return rf(vote) + } + if rf, ok := ret.Get(0).(func(*VoteT) *models.WeightedIdentity); ok { + r0 = rf(vote) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.WeightedIdentity) + } + } + + if rf, ok := ret.Get(1).(func(*VoteT) error); ok { + r1 = rf(vote) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewValidator creates a new instance of Validator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewValidator[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *Validator[StateT, VoteT] { + mock := &Validator[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/verifier.go b/consensus/mocks/verifier.go new file mode 100644 index 0000000..14e4d3f --- /dev/null +++ b/consensus/mocks/verifier.go @@ -0,0 +1,81 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Verifier is an autogenerated mock type for the Verifier type +type Verifier[VoteT models.Unique] struct { + mock.Mock +} + +// VerifyQuorumCertificate provides a mock function with given fields: quorumCertificate +func (_m *Verifier[VoteT]) VerifyQuorumCertificate(quorumCertificate models.QuorumCertificate) error { + ret := _m.Called(quorumCertificate) + + if len(ret) == 0 { + panic("no return value specified for VerifyQuorumCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(models.QuorumCertificate) error); ok { + r0 = rf(quorumCertificate) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// VerifyTimeoutCertificate provides a mock function with given fields: timeoutCertificate +func (_m *Verifier[VoteT]) VerifyTimeoutCertificate(timeoutCertificate models.TimeoutCertificate) error { + ret := _m.Called(timeoutCertificate) + + if len(ret) == 0 { + panic("no return value specified for VerifyTimeoutCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(models.TimeoutCertificate) error); ok { + r0 = rf(timeoutCertificate) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// VerifyVote provides a mock function with given fields: vote +func (_m *Verifier[VoteT]) VerifyVote(vote *VoteT) error { + ret := _m.Called(vote) + + if len(ret) == 0 { + panic("no return value specified for VerifyVote") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*VoteT) error); ok { + r0 = rf(vote) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewVerifier creates a new instance of Verifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVerifier[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *Verifier[VoteT] { + mock := &Verifier[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/verifying_vote_processor.go b/consensus/mocks/verifying_vote_processor.go new file mode 100644 index 0000000..6e34cc3 --- /dev/null +++ b/consensus/mocks/verifying_vote_processor.go @@ -0,0 +1,85 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VerifyingVoteProcessor is an autogenerated mock type for the VerifyingVoteProcessor type +type VerifyingVoteProcessor[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// Process provides a mock function with given fields: vote +func (_m *VerifyingVoteProcessor[StateT, VoteT]) Process(vote *VoteT) error { + ret := _m.Called(vote) + + if len(ret) == 0 { + panic("no return value specified for Process") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*VoteT) error); ok { + r0 = rf(vote) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// State provides a mock function with no fields +func (_m *VerifyingVoteProcessor[StateT, VoteT]) State() *StateT { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for State") + } + + var r0 *StateT + if rf, ok := ret.Get(0).(func() *StateT); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*StateT) + } + } + + return r0 +} + +// Status provides a mock function with no fields +func (_m *VerifyingVoteProcessor[StateT, VoteT]) Status() consensus.VoteCollectorStatus { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Status") + } + + var r0 consensus.VoteCollectorStatus + if rf, ok := ret.Get(0).(func() consensus.VoteCollectorStatus); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(consensus.VoteCollectorStatus) + } + + return r0 +} + +// NewVerifyingVoteProcessor creates a new instance of VerifyingVoteProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVerifyingVoteProcessor[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VerifyingVoteProcessor[StateT, VoteT] { + mock := &VerifyingVoteProcessor[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/vote_aggregation_consumer.go b/consensus/mocks/vote_aggregation_consumer.go new file mode 100644 index 0000000..bf582da --- /dev/null +++ b/consensus/mocks/vote_aggregation_consumer.go @@ -0,0 +1,52 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteAggregationConsumer is an autogenerated mock type for the VoteAggregationConsumer type +type VoteAggregationConsumer[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// OnDoubleVotingDetected provides a mock function with given fields: _a0, _a1 +func (_m *VoteAggregationConsumer[StateT, VoteT]) OnDoubleVotingDetected(_a0 *VoteT, _a1 *VoteT) { + _m.Called(_a0, _a1) +} + +// OnInvalidVoteDetected provides a mock function with given fields: err +func (_m *VoteAggregationConsumer[StateT, VoteT]) OnInvalidVoteDetected(err models.InvalidVoteError[VoteT]) { + _m.Called(err) +} + +// OnQuorumCertificateConstructedFromVotes provides a mock function with given fields: _a0 +func (_m *VoteAggregationConsumer[StateT, VoteT]) OnQuorumCertificateConstructedFromVotes(_a0 models.QuorumCertificate) { + _m.Called(_a0) +} + +// OnVoteForInvalidStateDetected provides a mock function with given fields: vote, invalidProposal +func (_m *VoteAggregationConsumer[StateT, VoteT]) OnVoteForInvalidStateDetected(vote *VoteT, invalidProposal *models.SignedProposal[StateT, VoteT]) { + _m.Called(vote, invalidProposal) +} + +// OnVoteProcessed provides a mock function with given fields: vote +func (_m *VoteAggregationConsumer[StateT, VoteT]) OnVoteProcessed(vote *VoteT) { + _m.Called(vote) +} + +// NewVoteAggregationConsumer creates a new instance of VoteAggregationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteAggregationConsumer[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VoteAggregationConsumer[StateT, VoteT] { + mock := &VoteAggregationConsumer[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/vote_aggregation_violation_consumer.go b/consensus/mocks/vote_aggregation_violation_consumer.go new file mode 100644 index 0000000..f7f3c6a --- /dev/null +++ b/consensus/mocks/vote_aggregation_violation_consumer.go @@ -0,0 +1,42 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteAggregationViolationConsumer is an autogenerated mock type for the VoteAggregationViolationConsumer type +type VoteAggregationViolationConsumer[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// OnDoubleVotingDetected provides a mock function with given fields: _a0, _a1 +func (_m *VoteAggregationViolationConsumer[StateT, VoteT]) OnDoubleVotingDetected(_a0 *VoteT, _a1 *VoteT) { + _m.Called(_a0, _a1) +} + +// OnInvalidVoteDetected provides a mock function with given fields: err +func (_m *VoteAggregationViolationConsumer[StateT, VoteT]) OnInvalidVoteDetected(err models.InvalidVoteError[VoteT]) { + _m.Called(err) +} + +// OnVoteForInvalidStateDetected provides a mock function with given fields: vote, invalidProposal +func (_m *VoteAggregationViolationConsumer[StateT, VoteT]) OnVoteForInvalidStateDetected(vote *VoteT, invalidProposal *models.SignedProposal[StateT, VoteT]) { + _m.Called(vote, invalidProposal) +} + +// NewVoteAggregationViolationConsumer creates a new instance of VoteAggregationViolationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteAggregationViolationConsumer[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VoteAggregationViolationConsumer[StateT, VoteT] { + mock := &VoteAggregationViolationConsumer[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/vote_aggregator.go b/consensus/mocks/vote_aggregator.go new file mode 100644 index 0000000..385d136 --- /dev/null +++ b/consensus/mocks/vote_aggregator.go @@ -0,0 +1,80 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteAggregator is an autogenerated mock type for the VoteAggregator type +type VoteAggregator[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// AddState provides a mock function with given fields: state +func (_m *VoteAggregator[StateT, VoteT]) AddState(state *models.SignedProposal[StateT, VoteT]) { + _m.Called(state) +} + +// AddVote provides a mock function with given fields: vote +func (_m *VoteAggregator[StateT, VoteT]) AddVote(vote *VoteT) { + _m.Called(vote) +} + +// InvalidState provides a mock function with given fields: state +func (_m *VoteAggregator[StateT, VoteT]) InvalidState(state *models.SignedProposal[StateT, VoteT]) error { + ret := _m.Called(state) + + if len(ret) == 0 { + panic("no return value specified for InvalidState") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.SignedProposal[StateT, VoteT]) error); ok { + r0 = rf(state) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PruneUpToRank provides a mock function with given fields: rank +func (_m *VoteAggregator[StateT, VoteT]) PruneUpToRank(rank uint64) { + _m.Called(rank) +} + +// Start provides a mock function with given fields: ctx +func (_m *VoteAggregator[StateT, VoteT]) Start(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewVoteAggregator creates a new instance of VoteAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteAggregator[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VoteAggregator[StateT, VoteT] { + mock := &VoteAggregator[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/vote_collector.go b/consensus/mocks/vote_collector.go new file mode 100644 index 0000000..ca72d71 --- /dev/null +++ b/consensus/mocks/vote_collector.go @@ -0,0 +1,106 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteCollector is an autogenerated mock type for the VoteCollector type +type VoteCollector[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// AddVote provides a mock function with given fields: vote +func (_m *VoteCollector[StateT, VoteT]) AddVote(vote *VoteT) error { + ret := _m.Called(vote) + + if len(ret) == 0 { + panic("no return value specified for AddVote") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*VoteT) error); ok { + r0 = rf(vote) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ProcessState provides a mock function with given fields: state +func (_m *VoteCollector[StateT, VoteT]) ProcessState(state *models.SignedProposal[StateT, VoteT]) error { + ret := _m.Called(state) + + if len(ret) == 0 { + panic("no return value specified for ProcessState") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.SignedProposal[StateT, VoteT]) error); ok { + r0 = rf(state) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RegisterVoteConsumer provides a mock function with given fields: consumer +func (_m *VoteCollector[StateT, VoteT]) RegisterVoteConsumer(consumer consensus.VoteConsumer[VoteT]) { + _m.Called(consumer) +} + +// Status provides a mock function with no fields +func (_m *VoteCollector[StateT, VoteT]) Status() consensus.VoteCollectorStatus { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Status") + } + + var r0 consensus.VoteCollectorStatus + if rf, ok := ret.Get(0).(func() consensus.VoteCollectorStatus); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(consensus.VoteCollectorStatus) + } + + return r0 +} + +// View provides a mock function with no fields +func (_m *VoteCollector[StateT, VoteT]) View() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for View") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// NewVoteCollector creates a new instance of VoteCollector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteCollector[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VoteCollector[StateT, VoteT] { + mock := &VoteCollector[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/vote_collector_consumer.go b/consensus/mocks/vote_collector_consumer.go new file mode 100644 index 0000000..7e0cdc5 --- /dev/null +++ b/consensus/mocks/vote_collector_consumer.go @@ -0,0 +1,37 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteCollectorConsumer is an autogenerated mock type for the VoteCollectorConsumer type +type VoteCollectorConsumer[VoteT models.Unique] struct { + mock.Mock +} + +// OnQuorumCertificateConstructedFromVotes provides a mock function with given fields: _a0 +func (_m *VoteCollectorConsumer[VoteT]) OnQuorumCertificateConstructedFromVotes(_a0 models.QuorumCertificate) { + _m.Called(_a0) +} + +// OnVoteProcessed provides a mock function with given fields: vote +func (_m *VoteCollectorConsumer[VoteT]) OnVoteProcessed(vote *VoteT) { + _m.Called(vote) +} + +// NewVoteCollectorConsumer creates a new instance of VoteCollectorConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteCollectorConsumer[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VoteCollectorConsumer[VoteT] { + mock := &VoteCollectorConsumer[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/vote_collectors.go b/consensus/mocks/vote_collectors.go new file mode 100644 index 0000000..0717719 --- /dev/null +++ b/consensus/mocks/vote_collectors.go @@ -0,0 +1,92 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + mock "github.com/stretchr/testify/mock" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteCollectors is an autogenerated mock type for the VoteCollectors type +type VoteCollectors[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// GetOrCreateCollector provides a mock function with given fields: rank +func (_m *VoteCollectors[StateT, VoteT]) GetOrCreateCollector(rank uint64) (consensus.VoteCollector[StateT, VoteT], bool, error) { + ret := _m.Called(rank) + + if len(ret) == 0 { + panic("no return value specified for GetOrCreateCollector") + } + + var r0 consensus.VoteCollector[StateT, VoteT] + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(uint64) (consensus.VoteCollector[StateT, VoteT], bool, error)); ok { + return rf(rank) + } + if rf, ok := ret.Get(0).(func(uint64) consensus.VoteCollector[StateT, VoteT]); ok { + r0 = rf(rank) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(consensus.VoteCollector[StateT, VoteT]) + } + } + + if rf, ok := ret.Get(1).(func(uint64) bool); ok { + r1 = rf(rank) + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func(uint64) error); ok { + r2 = rf(rank) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// PruneUpToRank provides a mock function with given fields: lowestRetainedRank +func (_m *VoteCollectors[StateT, VoteT]) PruneUpToRank(lowestRetainedRank uint64) { + _m.Called(lowestRetainedRank) +} + +// Start provides a mock function with given fields: ctx +func (_m *VoteCollectors[StateT, VoteT]) Start(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewVoteCollectors creates a new instance of VoteCollectors. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteCollectors[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VoteCollectors[StateT, VoteT] { + mock := &VoteCollectors[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/vote_processor.go b/consensus/mocks/vote_processor.go new file mode 100644 index 0000000..757a97e --- /dev/null +++ b/consensus/mocks/vote_processor.go @@ -0,0 +1,65 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteProcessor is an autogenerated mock type for the VoteProcessor type +type VoteProcessor[VoteT models.Unique] struct { + mock.Mock +} + +// Process provides a mock function with given fields: vote +func (_m *VoteProcessor[VoteT]) Process(vote *VoteT) error { + ret := _m.Called(vote) + + if len(ret) == 0 { + panic("no return value specified for Process") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*VoteT) error); ok { + r0 = rf(vote) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Status provides a mock function with no fields +func (_m *VoteProcessor[VoteT]) Status() consensus.VoteCollectorStatus { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Status") + } + + var r0 consensus.VoteCollectorStatus + if rf, ok := ret.Get(0).(func() consensus.VoteCollectorStatus); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(consensus.VoteCollectorStatus) + } + + return r0 +} + +// NewVoteProcessor creates a new instance of VoteProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteProcessor[VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VoteProcessor[VoteT] { + mock := &VoteProcessor[VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/vote_processor_factory.go b/consensus/mocks/vote_processor_factory.go new file mode 100644 index 0000000..c1cb62f --- /dev/null +++ b/consensus/mocks/vote_processor_factory.go @@ -0,0 +1,59 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + consensus "source.quilibrium.com/quilibrium/monorepo/consensus" + + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteProcessorFactory is an autogenerated mock type for the VoteProcessorFactory type +type VoteProcessorFactory[StateT models.Unique, VoteT models.Unique] struct { + mock.Mock +} + +// Create provides a mock function with given fields: tracer, proposal +func (_m *VoteProcessorFactory[StateT, VoteT]) Create(tracer consensus.TraceLogger, proposal *models.SignedProposal[StateT, VoteT]) (consensus.VerifyingVoteProcessor[StateT, VoteT], error) { + ret := _m.Called(tracer, proposal) + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 consensus.VerifyingVoteProcessor[StateT, VoteT] + var r1 error + if rf, ok := ret.Get(0).(func(consensus.TraceLogger, *models.SignedProposal[StateT, VoteT]) (consensus.VerifyingVoteProcessor[StateT, VoteT], error)); ok { + return rf(tracer, proposal) + } + if rf, ok := ret.Get(0).(func(consensus.TraceLogger, *models.SignedProposal[StateT, VoteT]) consensus.VerifyingVoteProcessor[StateT, VoteT]); ok { + r0 = rf(tracer, proposal) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(consensus.VerifyingVoteProcessor[StateT, VoteT]) + } + } + + if rf, ok := ret.Get(1).(func(consensus.TraceLogger, *models.SignedProposal[StateT, VoteT]) error); ok { + r1 = rf(tracer, proposal) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewVoteProcessorFactory creates a new instance of VoteProcessorFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteProcessorFactory[StateT models.Unique, VoteT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VoteProcessorFactory[StateT, VoteT] { + mock := &VoteProcessorFactory[StateT, VoteT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/voting_provider.go b/consensus/mocks/voting_provider.go new file mode 100644 index 0000000..ababa49 --- /dev/null +++ b/consensus/mocks/voting_provider.go @@ -0,0 +1,252 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VotingProvider is an autogenerated mock type for the VotingProvider type +type VotingProvider[StateT models.Unique, VoteT models.Unique, PeerIDT models.Unique] struct { + mock.Mock +} + +// FinalizeTimeout provides a mock function with given fields: ctx, filter, rank, latestQuorumCertificateRanks, aggregatedSignature +func (_m *VotingProvider[StateT, VoteT, PeerIDT]) FinalizeTimeout(ctx context.Context, filter []byte, rank uint64, latestQuorumCertificateRanks []uint64, aggregatedSignature models.AggregatedSignature) (models.TimeoutCertificate, error) { + ret := _m.Called(ctx, filter, rank, latestQuorumCertificateRanks, aggregatedSignature) + + if len(ret) == 0 { + panic("no return value specified for FinalizeTimeout") + } + + var r0 models.TimeoutCertificate + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []byte, uint64, []uint64, models.AggregatedSignature) (models.TimeoutCertificate, error)); ok { + return rf(ctx, filter, rank, latestQuorumCertificateRanks, aggregatedSignature) + } + if rf, ok := ret.Get(0).(func(context.Context, []byte, uint64, []uint64, models.AggregatedSignature) models.TimeoutCertificate); ok { + r0 = rf(ctx, filter, rank, latestQuorumCertificateRanks, aggregatedSignature) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(models.TimeoutCertificate) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []byte, uint64, []uint64, models.AggregatedSignature) error); ok { + r1 = rf(ctx, filter, rank, latestQuorumCertificateRanks, aggregatedSignature) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FinalizeVotes provides a mock function with given fields: ctx, proposals, proposalVotes +func (_m *VotingProvider[StateT, VoteT, PeerIDT]) FinalizeVotes(ctx context.Context, proposals map[models.Identity]*StateT, proposalVotes map[models.Identity]*VoteT) (*StateT, PeerIDT, error) { + ret := _m.Called(ctx, proposals, proposalVotes) + + if len(ret) == 0 { + panic("no return value specified for FinalizeVotes") + } + + var r0 *StateT + var r1 PeerIDT + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, map[models.Identity]*StateT, map[models.Identity]*VoteT) (*StateT, PeerIDT, error)); ok { + return rf(ctx, proposals, proposalVotes) + } + if rf, ok := ret.Get(0).(func(context.Context, map[models.Identity]*StateT, map[models.Identity]*VoteT) *StateT); ok { + r0 = rf(ctx, proposals, proposalVotes) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*StateT) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, map[models.Identity]*StateT, map[models.Identity]*VoteT) PeerIDT); ok { + r1 = rf(ctx, proposals, proposalVotes) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(PeerIDT) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, map[models.Identity]*StateT, map[models.Identity]*VoteT) error); ok { + r2 = rf(ctx, proposals, proposalVotes) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// IsQuorum provides a mock function with given fields: ctx, proposalVotes +func (_m *VotingProvider[StateT, VoteT, PeerIDT]) IsQuorum(ctx context.Context, proposalVotes map[models.Identity]*VoteT) (bool, error) { + ret := _m.Called(ctx, proposalVotes) + + if len(ret) == 0 { + panic("no return value specified for IsQuorum") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, map[models.Identity]*VoteT) (bool, error)); ok { + return rf(ctx, proposalVotes) + } + if rf, ok := ret.Get(0).(func(context.Context, map[models.Identity]*VoteT) bool); ok { + r0 = rf(ctx, proposalVotes) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, map[models.Identity]*VoteT) error); ok { + r1 = rf(ctx, proposalVotes) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SendConfirmation provides a mock function with given fields: ctx, finalized +func (_m *VotingProvider[StateT, VoteT, PeerIDT]) SendConfirmation(ctx context.Context, finalized *StateT) error { + ret := _m.Called(ctx, finalized) + + if len(ret) == 0 { + panic("no return value specified for SendConfirmation") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *StateT) error); ok { + r0 = rf(ctx, finalized) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SendProposal provides a mock function with given fields: ctx, proposal +func (_m *VotingProvider[StateT, VoteT, PeerIDT]) SendProposal(ctx context.Context, proposal *StateT) error { + ret := _m.Called(ctx, proposal) + + if len(ret) == 0 { + panic("no return value specified for SendProposal") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *StateT) error); ok { + r0 = rf(ctx, proposal) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SendVote provides a mock function with given fields: ctx, vote +func (_m *VotingProvider[StateT, VoteT, PeerIDT]) SendVote(ctx context.Context, vote *VoteT) (PeerIDT, error) { + ret := _m.Called(ctx, vote) + + if len(ret) == 0 { + panic("no return value specified for SendVote") + } + + var r0 PeerIDT + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *VoteT) (PeerIDT, error)); ok { + return rf(ctx, vote) + } + if rf, ok := ret.Get(0).(func(context.Context, *VoteT) PeerIDT); ok { + r0 = rf(ctx, vote) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(PeerIDT) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *VoteT) error); ok { + r1 = rf(ctx, vote) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SignTimeoutVote provides a mock function with given fields: ctx, filter, currentRank, newestQuorumCertificateRank +func (_m *VotingProvider[StateT, VoteT, PeerIDT]) SignTimeoutVote(ctx context.Context, filter []byte, currentRank uint64, newestQuorumCertificateRank uint64) (*VoteT, error) { + ret := _m.Called(ctx, filter, currentRank, newestQuorumCertificateRank) + + if len(ret) == 0 { + panic("no return value specified for SignTimeoutVote") + } + + var r0 *VoteT + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []byte, uint64, uint64) (*VoteT, error)); ok { + return rf(ctx, filter, currentRank, newestQuorumCertificateRank) + } + if rf, ok := ret.Get(0).(func(context.Context, []byte, uint64, uint64) *VoteT); ok { + r0 = rf(ctx, filter, currentRank, newestQuorumCertificateRank) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*VoteT) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []byte, uint64, uint64) error); ok { + r1 = rf(ctx, filter, currentRank, newestQuorumCertificateRank) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SignVote provides a mock function with given fields: ctx, state +func (_m *VotingProvider[StateT, VoteT, PeerIDT]) SignVote(ctx context.Context, state *models.State[StateT]) (*VoteT, error) { + ret := _m.Called(ctx, state) + + if len(ret) == 0 { + panic("no return value specified for SignVote") + } + + var r0 *VoteT + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *models.State[StateT]) (*VoteT, error)); ok { + return rf(ctx, state) + } + if rf, ok := ret.Get(0).(func(context.Context, *models.State[StateT]) *VoteT); ok { + r0 = rf(ctx, state) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*VoteT) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *models.State[StateT]) error); ok { + r1 = rf(ctx, state) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewVotingProvider creates a new instance of VotingProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVotingProvider[StateT models.Unique, VoteT models.Unique, PeerIDT models.Unique](t interface { + mock.TestingT + Cleanup(func()) +}) *VotingProvider[StateT, VoteT, PeerIDT] { + mock := &VotingProvider[StateT, VoteT, PeerIDT]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/weight_provider.go b/consensus/mocks/weight_provider.go new file mode 100644 index 0000000..5e2440c --- /dev/null +++ b/consensus/mocks/weight_provider.go @@ -0,0 +1,42 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// WeightProvider is an autogenerated mock type for the WeightProvider type +type WeightProvider struct { + mock.Mock +} + +// GetWeightForBitmask provides a mock function with given fields: filter, bitmask +func (_m *WeightProvider) GetWeightForBitmask(filter []byte, bitmask []byte) uint64 { + ret := _m.Called(filter, bitmask) + + if len(ret) == 0 { + panic("no return value specified for GetWeightForBitmask") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func([]byte, []byte) uint64); ok { + r0 = rf(filter, bitmask) + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// NewWeightProvider creates a new instance of WeightProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWeightProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *WeightProvider { + mock := &WeightProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/weighted_signature_aggregator.go b/consensus/mocks/weighted_signature_aggregator.go new file mode 100644 index 0000000..bf2920b --- /dev/null +++ b/consensus/mocks/weighted_signature_aggregator.go @@ -0,0 +1,130 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + models "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// WeightedSignatureAggregator is an autogenerated mock type for the WeightedSignatureAggregator type +type WeightedSignatureAggregator struct { + mock.Mock +} + +// Aggregate provides a mock function with no fields +func (_m *WeightedSignatureAggregator) Aggregate() ([]models.WeightedIdentity, models.AggregatedSignature, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Aggregate") + } + + var r0 []models.WeightedIdentity + var r1 models.AggregatedSignature + var r2 error + if rf, ok := ret.Get(0).(func() ([]models.WeightedIdentity, models.AggregatedSignature, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []models.WeightedIdentity); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]models.WeightedIdentity) + } + } + + if rf, ok := ret.Get(1).(func() models.AggregatedSignature); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(models.AggregatedSignature) + } + } + + if rf, ok := ret.Get(2).(func() error); ok { + r2 = rf() + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// TotalWeight provides a mock function with no fields +func (_m *WeightedSignatureAggregator) TotalWeight() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TotalWeight") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// TrustedAdd provides a mock function with given fields: signerID, sig +func (_m *WeightedSignatureAggregator) TrustedAdd(signerID models.Identity, sig []byte) (uint64, error) { + ret := _m.Called(signerID, sig) + + if len(ret) == 0 { + panic("no return value specified for TrustedAdd") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(models.Identity, []byte) (uint64, error)); ok { + return rf(signerID, sig) + } + if rf, ok := ret.Get(0).(func(models.Identity, []byte) uint64); ok { + r0 = rf(signerID, sig) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(models.Identity, []byte) error); ok { + r1 = rf(signerID, sig) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Verify provides a mock function with given fields: signerID, sig +func (_m *WeightedSignatureAggregator) Verify(signerID models.Identity, sig []byte) error { + ret := _m.Called(signerID, sig) + + if len(ret) == 0 { + panic("no return value specified for Verify") + } + + var r0 error + if rf, ok := ret.Get(0).(func(models.Identity, []byte) error); ok { + r0 = rf(signerID, sig) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewWeightedSignatureAggregator creates a new instance of WeightedSignatureAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWeightedSignatureAggregator(t interface { + mock.TestingT + Cleanup(func()) +}) *WeightedSignatureAggregator { + mock := &WeightedSignatureAggregator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/workerpool.go b/consensus/mocks/workerpool.go new file mode 100644 index 0000000..447fc39 --- /dev/null +++ b/consensus/mocks/workerpool.go @@ -0,0 +1,34 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// Workerpool is an autogenerated mock type for the Workerpool type +type Workerpool struct { + mock.Mock +} + +// StopWait provides a mock function with no fields +func (_m *Workerpool) StopWait() { + _m.Called() +} + +// Submit provides a mock function with given fields: task +func (_m *Workerpool) Submit(task func()) { + _m.Called(task) +} + +// NewWorkerpool creates a new instance of Workerpool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWorkerpool(t interface { + mock.TestingT + Cleanup(func()) +}) *Workerpool { + mock := &Workerpool{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/mocks/workers.go b/consensus/mocks/workers.go new file mode 100644 index 0000000..3d0a4b9 --- /dev/null +++ b/consensus/mocks/workers.go @@ -0,0 +1,29 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// Workers is an autogenerated mock type for the Workers type +type Workers struct { + mock.Mock +} + +// Submit provides a mock function with given fields: task +func (_m *Workers) Submit(task func()) { + _m.Called(task) +} + +// NewWorkers creates a new instance of Workers. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWorkers(t interface { + mock.TestingT + Cleanup(func()) +}) *Workers { + mock := &Workers{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/models/aggregated_signature.go b/consensus/models/aggregated_signature.go new file mode 100644 index 0000000..738d70a --- /dev/null +++ b/consensus/models/aggregated_signature.go @@ -0,0 +1,48 @@ +package models + +// AggregatedSignature provides a generic interface over an aggregatable +// signature type +type AggregatedSignature interface { + // GetSignature returns the aggregated signature in raw canonical bytes + GetSignature() []byte + // GetPublicKey returns the public key in raw canonical bytes + GetPublicKey() []byte + // GetBitmask returns the bitmask of the signers in the signature, in matching + // order to the clique's prover set (in ascending ring order). + GetBitmask() []byte +} + +// AggregatedSigner provides a generic interface over an aggregatable signature +// scheme. Embeds the validation-only methods. +type AggregatedSigner interface { + AggregatedSignatureValidator + // AggregateSignatures produces an AggregatedSignature object, expecting + // public keys and signatures to be in matching order, with nil slices for + // bitmask entries that are not present. The order should be aligned to the + // clique's prover set (in ascending ring order). + AggregateSignatures( + publicKeys [][]byte, + signatures [][]byte, + ) (AggregatedSignature, error) + // SignWithContext produces an AggregatedSignature object, optionally taking + // an existing AggregatedSignature and builds on top of it. + SignWithContext( + aggregatedSignature AggregatedSignature, + bitmaskIndex int, + privateKey []byte, + message []byte, + context []byte, + ) (AggregatedSignature, error) +} + +// AggregatedSignatureValidator provides a generic interface over aggregated +// signature validation. +type AggregatedSignatureValidator interface { + // VerifySignature validates the AggregatedSignature, with a binary pass/fail + // result. + VerifySignature( + aggregatedSignature AggregatedSignature, + message []byte, + context []byte, + ) bool +} diff --git a/consensus/models/consensus_state.go b/consensus/models/consensus_state.go new file mode 100644 index 0000000..50b6d66 --- /dev/null +++ b/consensus/models/consensus_state.go @@ -0,0 +1,15 @@ +package models + +// ConsensusState defines the core minimum data required to maintain consensus +// safety betwixt the core consensus state machine and the deriving users of the +// state machine, different from StateT (the object being built by the user). +type ConsensusState[VoteT Unique] struct { + // The filter scope of the consensus state. + Filter []byte + // The latest rank that has been finalized (e.g. cannot be forked below). + FinalizedRank uint64 + // The latest rank voted on in a quorum certificate or timeout certificate. + LatestAcknowledgedRank uint64 + // The latest timeout data produced by this instance. + LatestTimeout *TimeoutState[VoteT] +} diff --git a/consensus/models/control_flows.go b/consensus/models/control_flows.go new file mode 100644 index 0000000..8bd25ba --- /dev/null +++ b/consensus/models/control_flows.go @@ -0,0 +1,13 @@ +package models + +import "time" + +// NextRank is the control flow event for when the next rank should be entered. +type NextRank struct { + // Rank is the next rank value. + Rank uint64 + // Start is the time the next rank was entered. + Start time.Time + // End is the time the next rank ends (i.e. times out). + End time.Time +} diff --git a/consensus/models/errors.go b/consensus/models/errors.go new file mode 100644 index 0000000..06d363c --- /dev/null +++ b/consensus/models/errors.go @@ -0,0 +1,588 @@ +package models + +import ( + "errors" + "fmt" +) + +var ( + ErrUnverifiableState = errors.New("state proposal can't be verified") + ErrInvalidSignature = errors.New("invalid signature") + ErrRankUnknown = errors.New("rank is unknown") +) + +type NoVoteError struct { + Err error +} + +func (e NoVoteError) Error() string { + return fmt.Sprintf("not voting - %s", e.Err.Error()) +} + +func (e NoVoteError) Unwrap() error { + return e.Err +} + +// IsNoVoteError returns whether an error is NoVoteError +func IsNoVoteError(err error) bool { + var e NoVoteError + return errors.As(err, &e) +} + +func NewNoVoteErrorf(msg string, args ...interface{}) error { + return NoVoteError{Err: fmt.Errorf(msg, args...)} +} + +type NoTimeoutError struct { + Err error +} + +func (e NoTimeoutError) Error() string { + return fmt.Sprintf( + "conditions not satisfied to generate valid TimeoutState: %s", + e.Err.Error(), + ) +} + +func (e NoTimeoutError) Unwrap() error { + return e.Err +} + +func IsNoTimeoutError(err error) bool { + var e NoTimeoutError + return errors.As(err, &e) +} + +func NewNoTimeoutErrorf(msg string, args ...interface{}) error { + return NoTimeoutError{Err: fmt.Errorf(msg, args...)} +} + +type InvalidFormatError struct { + err error +} + +func NewInvalidFormatError(err error) error { + return InvalidFormatError{err} +} + +func NewInvalidFormatErrorf(msg string, args ...interface{}) error { + return InvalidFormatError{fmt.Errorf(msg, args...)} +} + +func (e InvalidFormatError) Error() string { return e.err.Error() } +func (e InvalidFormatError) Unwrap() error { return e.err } + +func IsInvalidFormatError(err error) bool { + var e InvalidFormatError + return errors.As(err, &e) +} + +type ConfigurationError struct { + err error +} + +func NewConfigurationError(err error) error { + return ConfigurationError{err} +} + +func NewConfigurationErrorf(msg string, args ...interface{}) error { + return ConfigurationError{fmt.Errorf(msg, args...)} +} + +func (e ConfigurationError) Error() string { return e.err.Error() } +func (e ConfigurationError) Unwrap() error { return e.err } + +func IsConfigurationError(err error) bool { + var e ConfigurationError + return errors.As(err, &e) +} + +type MissingStateError struct { + Rank uint64 + Identifier Identity +} + +func (e MissingStateError) Error() string { + return fmt.Sprintf( + "missing state at rank %d with ID %v", + e.Rank, + e.Identifier, + ) +} + +func IsMissingStateError(err error) bool { + var e MissingStateError + return errors.As(err, &e) +} + +type InvalidQuorumCertificateError struct { + Identifier Identity + Rank uint64 + Err error +} + +func (e InvalidQuorumCertificateError) Error() string { + return fmt.Sprintf( + "invalid QuorumCertificate for state %x at rank %d: %s", + e.Identifier, + e.Rank, + e.Err.Error(), + ) +} + +func IsInvalidQuorumCertificateError(err error) bool { + var e InvalidQuorumCertificateError + return errors.As(err, &e) +} + +func (e InvalidQuorumCertificateError) Unwrap() error { + return e.Err +} + +type InvalidTimeoutCertificateError struct { + Rank uint64 + Err error +} + +func (e InvalidTimeoutCertificateError) Error() string { + return fmt.Sprintf( + "invalid TimeoutCertificate at rank %d: %s", + e.Rank, + e.Err.Error(), + ) +} + +func IsInvalidTimeoutCertificateError(err error) bool { + var e InvalidTimeoutCertificateError + return errors.As(err, &e) +} + +func (e InvalidTimeoutCertificateError) Unwrap() error { + return e.Err +} + +type InvalidProposalError[StateT Unique, VoteT Unique] struct { + InvalidProposal *SignedProposal[StateT, VoteT] + Err error +} + +func NewInvalidProposalErrorf[StateT Unique, VoteT Unique]( + proposal *SignedProposal[StateT, VoteT], + msg string, + args ...interface{}, +) error { + return InvalidProposalError[StateT, VoteT]{ + InvalidProposal: proposal, + Err: fmt.Errorf(msg, args...), + } +} + +func (e InvalidProposalError[StateT, VoteT]) Error() string { + return fmt.Sprintf( + "invalid proposal %x at rank %d: %s", + e.InvalidProposal.State.Identifier, + e.InvalidProposal.State.Rank, + e.Err.Error(), + ) +} + +func (e InvalidProposalError[StateT, VoteT]) Unwrap() error { + return e.Err +} + +func IsInvalidProposalError[StateT Unique, VoteT Unique](err error) bool { + var e InvalidProposalError[StateT, VoteT] + return errors.As(err, &e) +} + +func AsInvalidProposalError[StateT Unique, VoteT Unique]( + err error, +) (*InvalidProposalError[StateT, VoteT], bool) { + var e InvalidProposalError[StateT, VoteT] + ok := errors.As(err, &e) + if ok { + return &e, true + } + return nil, false +} + +type InvalidStateError[StateT Unique] struct { + InvalidState *State[StateT] + Err error +} + +func NewInvalidStateErrorf[StateT Unique]( + state *State[StateT], + msg string, + args ...interface{}, +) error { + return InvalidStateError[StateT]{ + InvalidState: state, + Err: fmt.Errorf(msg, args...), + } +} + +func (e InvalidStateError[StateT]) Error() string { + return fmt.Sprintf( + "invalid state %x at rank %d: %s", + e.InvalidState.Identifier, + e.InvalidState.Rank, + e.Err.Error(), + ) +} + +func IsInvalidStateError[StateT Unique](err error) bool { + var e InvalidStateError[StateT] + return errors.As(err, &e) +} + +func AsInvalidStateError[StateT Unique](err error) ( + *InvalidStateError[StateT], + bool, +) { + var e InvalidStateError[StateT] + ok := errors.As(err, &e) + if ok { + return &e, true + } + return nil, false +} + +func (e InvalidStateError[StateT]) Unwrap() error { + return e.Err +} + +type InvalidVoteError[VoteT Unique] struct { + Vote *VoteT + Err error +} + +func NewInvalidVoteErrorf[VoteT Unique]( + vote *VoteT, + msg string, + args ...interface{}, +) error { + return InvalidVoteError[VoteT]{ + Vote: vote, + Err: fmt.Errorf(msg, args...), + } +} + +func (e InvalidVoteError[VoteT]) Error() string { + return fmt.Sprintf( + "invalid vote at rank %d for state %x: %s", + (*e.Vote).GetRank(), + (*e.Vote).Identity(), + e.Err.Error(), + ) +} + +func IsInvalidVoteError[VoteT Unique](err error) bool { + var e InvalidVoteError[VoteT] + return errors.As(err, &e) +} + +func AsInvalidVoteError[VoteT Unique](err error) ( + *InvalidVoteError[VoteT], + bool, +) { + var e InvalidVoteError[VoteT] + ok := errors.As(err, &e) + if ok { + return &e, true + } + return nil, false +} + +func (e InvalidVoteError[VoteT]) Unwrap() error { + return e.Err +} + +type ByzantineThresholdExceededError struct { + Evidence string +} + +func (e ByzantineThresholdExceededError) Error() string { + return e.Evidence +} + +func IsByzantineThresholdExceededError(err error) bool { + var target ByzantineThresholdExceededError + return errors.As(err, &target) +} + +type DoubleVoteError[VoteT Unique] struct { + FirstVote *VoteT + ConflictingVote *VoteT + err error +} + +func (e DoubleVoteError[VoteT]) Error() string { + return e.err.Error() +} + +func IsDoubleVoteError[VoteT Unique](err error) bool { + var e DoubleVoteError[VoteT] + return errors.As(err, &e) +} + +func AsDoubleVoteError[VoteT Unique](err error) ( + *DoubleVoteError[VoteT], + bool, +) { + var e DoubleVoteError[VoteT] + ok := errors.As(err, &e) + if ok { + return &e, true + } + return nil, false +} + +func (e DoubleVoteError[VoteT]) Unwrap() error { + return e.err +} + +func NewDoubleVoteErrorf[VoteT Unique]( + firstVote, conflictingVote *VoteT, + msg string, + args ...interface{}, +) error { + return DoubleVoteError[VoteT]{ + FirstVote: firstVote, + ConflictingVote: conflictingVote, + err: fmt.Errorf(msg, args...), + } +} + +type DuplicatedSignerError struct { + err error +} + +func NewDuplicatedSignerError(err error) error { + return DuplicatedSignerError{err} +} + +func NewDuplicatedSignerErrorf(msg string, args ...interface{}) error { + return DuplicatedSignerError{err: fmt.Errorf(msg, args...)} +} + +func (e DuplicatedSignerError) Error() string { return e.err.Error() } +func (e DuplicatedSignerError) Unwrap() error { return e.err } + +func IsDuplicatedSignerError(err error) bool { + var e DuplicatedSignerError + return errors.As(err, &e) +} + +type InvalidSignatureIncludedError struct { + err error +} + +func NewInvalidSignatureIncludedError(err error) error { + return InvalidSignatureIncludedError{err} +} + +func NewInvalidSignatureIncludedErrorf(msg string, args ...interface{}) error { + return InvalidSignatureIncludedError{fmt.Errorf(msg, args...)} +} + +func (e InvalidSignatureIncludedError) Error() string { return e.err.Error() } +func (e InvalidSignatureIncludedError) Unwrap() error { return e.err } + +func IsInvalidSignatureIncludedError(err error) bool { + var e InvalidSignatureIncludedError + return errors.As(err, &e) +} + +type InvalidAggregatedKeyError struct { + error +} + +func NewInvalidAggregatedKeyError(err error) error { + return InvalidAggregatedKeyError{err} +} + +func NewInvalidAggregatedKeyErrorf(msg string, args ...interface{}) error { + return InvalidAggregatedKeyError{fmt.Errorf(msg, args...)} +} + +func (e InvalidAggregatedKeyError) Unwrap() error { return e.error } + +func IsInvalidAggregatedKeyError(err error) bool { + var e InvalidAggregatedKeyError + return errors.As(err, &e) +} + +type InsufficientSignaturesError struct { + err error +} + +func NewInsufficientSignaturesError(err error) error { + return InsufficientSignaturesError{err} +} + +func NewInsufficientSignaturesErrorf(msg string, args ...interface{}) error { + return InsufficientSignaturesError{fmt.Errorf(msg, args...)} +} + +func (e InsufficientSignaturesError) Error() string { return e.err.Error() } +func (e InsufficientSignaturesError) Unwrap() error { return e.err } + +func IsInsufficientSignaturesError(err error) bool { + var e InsufficientSignaturesError + return errors.As(err, &e) +} + +type InvalidSignerError struct { + err error +} + +func NewInvalidSignerError(err error) error { + return InvalidSignerError{err} +} + +func NewInvalidSignerErrorf(msg string, args ...interface{}) error { + return InvalidSignerError{fmt.Errorf(msg, args...)} +} + +func (e InvalidSignerError) Error() string { return e.err.Error() } +func (e InvalidSignerError) Unwrap() error { return e.err } + +func IsInvalidSignerError(err error) bool { + var e InvalidSignerError + return errors.As(err, &e) +} + +type DoubleTimeoutError[VoteT Unique] struct { + FirstTimeout *TimeoutState[VoteT] + ConflictingTimeout *TimeoutState[VoteT] + err error +} + +func (e DoubleTimeoutError[VoteT]) Error() string { + return e.err.Error() +} + +func IsDoubleTimeoutError[VoteT Unique](err error) bool { + var e DoubleTimeoutError[VoteT] + return errors.As(err, &e) +} + +func AsDoubleTimeoutError[VoteT Unique](err error) ( + *DoubleTimeoutError[VoteT], + bool, +) { + var e DoubleTimeoutError[VoteT] + ok := errors.As(err, &e) + if ok { + return &e, true + } + return nil, false +} + +func (e DoubleTimeoutError[VoteT]) Unwrap() error { + return e.err +} + +func NewDoubleTimeoutErrorf[VoteT Unique]( + firstTimeout, conflictingTimeout *TimeoutState[VoteT], + msg string, + args ...interface{}, +) error { + return DoubleTimeoutError[VoteT]{ + FirstTimeout: firstTimeout, + ConflictingTimeout: conflictingTimeout, + err: fmt.Errorf(msg, args...), + } +} + +type InvalidTimeoutError[VoteT Unique] struct { + Timeout *TimeoutState[VoteT] + Err error +} + +func NewInvalidTimeoutErrorf[VoteT Unique]( + timeout *TimeoutState[VoteT], + msg string, + args ...interface{}, +) error { + return InvalidTimeoutError[VoteT]{ + Timeout: timeout, + Err: fmt.Errorf(msg, args...), + } +} + +func (e InvalidTimeoutError[VoteT]) Error() string { + return fmt.Sprintf("invalid timeout: %d: %s", + e.Timeout.Rank, + e.Err.Error(), + ) +} + +func IsInvalidTimeoutError[VoteT Unique](err error) bool { + var e InvalidTimeoutError[VoteT] + return errors.As(err, &e) +} + +func AsInvalidTimeoutError[VoteT Unique](err error) ( + *InvalidTimeoutError[VoteT], + bool, +) { + var e InvalidTimeoutError[VoteT] + ok := errors.As(err, &e) + if ok { + return &e, true + } + return nil, false +} + +func (e InvalidTimeoutError[VoteT]) Unwrap() error { + return e.Err +} + +// UnknownExecutionResultError indicates that the Execution Result is unknown +type UnknownExecutionResultError struct { + err error +} + +func NewUnknownExecutionResultErrorf(msg string, args ...interface{}) error { + return UnknownExecutionResultError{ + err: fmt.Errorf(msg, args...), + } +} + +func (e UnknownExecutionResultError) Unwrap() error { + return e.err +} + +func (e UnknownExecutionResultError) Error() string { + return e.err.Error() +} + +func IsUnknownExecutionResultError(err error) bool { + var unknownExecutionResultError UnknownExecutionResultError + return errors.As(err, &unknownExecutionResultError) +} + +type BelowPrunedThresholdError struct { + err error +} + +func NewBelowPrunedThresholdErrorf(msg string, args ...interface{}) error { + return BelowPrunedThresholdError{ + err: fmt.Errorf(msg, args...), + } +} + +func (e BelowPrunedThresholdError) Unwrap() error { + return e.err +} + +func (e BelowPrunedThresholdError) Error() string { + return e.err.Error() +} + +func IsBelowPrunedThresholdError(err error) bool { + var newIsBelowPrunedThresholdError BelowPrunedThresholdError + return errors.As(err, &newIsBelowPrunedThresholdError) +} diff --git a/consensus/models/liveness_state.go b/consensus/models/liveness_state.go new file mode 100644 index 0000000..e7b3495 --- /dev/null +++ b/consensus/models/liveness_state.go @@ -0,0 +1,14 @@ +package models + +// LivenessState defines the core minimum data required to maintain liveness +// of the pacemaker of the consensus state machine. +type LivenessState struct { + // The filter scope of the consensus state. + Filter []byte + // The current rank of the pacemaker. + CurrentRank uint64 + // The latest quorum certificate seen by the pacemaker. + LatestQuorumCertificate QuorumCertificate + // The previous rank's timeout certificate, if applicable. + PriorRankTimeoutCertificate TimeoutCertificate +} diff --git a/consensus/models/proposal.go b/consensus/models/proposal.go new file mode 100644 index 0000000..a21bf86 --- /dev/null +++ b/consensus/models/proposal.go @@ -0,0 +1,45 @@ +package models + +import ( + "errors" +) + +type Proposal[StateT Unique] struct { + State *State[StateT] + PreviousRankTimeoutCertificate TimeoutCertificate +} + +func ProposalFrom[StateT Unique]( + state *State[StateT], + prevTC TimeoutCertificate, +) *Proposal[StateT] { + return &Proposal[StateT]{ + State: state, + PreviousRankTimeoutCertificate: prevTC, + } +} + +type SignedProposal[StateT Unique, VoteT Unique] struct { + Proposal[StateT] + Vote *VoteT +} + +func (p *SignedProposal[StateT, VoteT]) ProposerVote() (*VoteT, error) { + if p.Vote == nil { + return nil, errors.New("missing vote") + } + return p.Vote, nil +} + +func SignedProposalFromState[StateT Unique, VoteT Unique]( + p *Proposal[StateT], + v *VoteT, +) *SignedProposal[StateT, VoteT] { + return &SignedProposal[StateT, VoteT]{ + Proposal: Proposal[StateT]{ + State: p.State, + PreviousRankTimeoutCertificate: p.PreviousRankTimeoutCertificate, + }, + Vote: v, + } +} diff --git a/consensus/models/quorum_certificate.go b/consensus/models/quorum_certificate.go new file mode 100644 index 0000000..59b50ba --- /dev/null +++ b/consensus/models/quorum_certificate.go @@ -0,0 +1,20 @@ +package models + +// QuorumCertificate defines the minimum properties required of a consensus +// clique's validating set of data for a frame. +type QuorumCertificate interface { + // GetFilter returns the applicable filter for the consensus clique. + GetFilter() []byte + // GetRank returns the rank of the consensus loop. + GetRank() uint64 + // GetFrameNumber returns the frame number applied to the round. + GetFrameNumber() uint64 + // GetSelector returns the selector of the frame. + GetSelector() Identity + // GetTimestamp returns the timestamp of the certificate. + GetTimestamp() int64 + // GetAggregatedSignature returns the set of signers who voted on the round. + GetAggregatedSignature() AggregatedSignature + // Equals compares inner equality with another quorum certificate. + Equals(other QuorumCertificate) bool +} diff --git a/consensus/models/state.go b/consensus/models/state.go new file mode 100644 index 0000000..a823ef1 --- /dev/null +++ b/consensus/models/state.go @@ -0,0 +1,101 @@ +package models + +import ( + "fmt" +) + +// State is the HotStuff algorithm's concept of a state, which - in the bigger +// picture - corresponds to the state header. +type State[StateT Unique] struct { + Rank uint64 + Identifier Identity + ProposerID Identity + ParentQuorumCertificate QuorumCertificate + Timestamp uint64 // Unix milliseconds + State *StateT +} + +// StateFrom combines external state with source parent quorum certificate. +func StateFrom[StateT Unique]( + t *StateT, + parentCert QuorumCertificate, +) *State[StateT] { + state := State[StateT]{ + Identifier: (*t).Identity(), + Rank: (*t).GetRank(), + ParentQuorumCertificate: parentCert, + ProposerID: (*t).Source(), + Timestamp: (*t).GetTimestamp(), + State: t, + } + + return &state +} + +// GenesisStateFrom returns a generic consensus model of genesis state. +func GenesisStateFrom[StateT Unique](internal *StateT) *State[StateT] { + genesis := &State[StateT]{ + Identifier: (*internal).Identity(), + Rank: (*internal).GetRank(), + ProposerID: (*internal).Source(), + ParentQuorumCertificate: nil, + Timestamp: (*internal).GetTimestamp(), + State: internal, + } + return genesis +} + +// CertifiedState holds a certified state, which is a state and a +// QuorumCertificate that is pointing to the state. A QuorumCertificate is the +// aggregated form of votes from a supermajority of HotStuff and +// therefore proves validity of the state. A certified state satisfies: +// State.Rank == QuorumCertificate.Rank and +// State.Identifier == QuorumCertificate.Identifier +type CertifiedState[StateT Unique] struct { + State *State[StateT] + CertifyingQuorumCertificate QuorumCertificate +} + +// NewCertifiedState constructs a new certified state. It checks the consistency +// requirements and returns an exception otherwise: +// +// State.Rank == QuorumCertificate.Rank and State.Identifier == +// +// QuorumCertificate.Identifier +func NewCertifiedState[StateT Unique]( + state *State[StateT], + quorumCertificate QuorumCertificate, +) (CertifiedState[StateT], error) { + if state.Rank != quorumCertificate.GetRank() { + return CertifiedState[StateT]{}, + fmt.Errorf( + "state's rank (%d) should equal the qc's rank (%d)", + state.Rank, + quorumCertificate.GetRank(), + ) + } + if state.Identifier != quorumCertificate.GetSelector() { + return CertifiedState[StateT]{}, + fmt.Errorf( + "state's ID (%x) should equal the state referenced by the qc (%x)", + state.Identifier, + quorumCertificate.GetSelector(), + ) + } + return CertifiedState[StateT]{ + State: state, + CertifyingQuorumCertificate: quorumCertificate, + }, nil +} + +// Identifier returns a unique identifier for the state (the ID signed to +// produce a state vote). To avoid repeated computation, we use value from the +// QuorumCertificate. +func (b *CertifiedState[StateT]) Identifier() Identity { + return b.CertifyingQuorumCertificate.GetSelector() +} + +// Rank returns rank where the state was proposed. +func (b *CertifiedState[StateT]) Rank() uint64 { + return b.State.Rank +} diff --git a/consensus/models/timeout_certificate.go b/consensus/models/timeout_certificate.go new file mode 100644 index 0000000..288d886 --- /dev/null +++ b/consensus/models/timeout_certificate.go @@ -0,0 +1,19 @@ +package models + +// TimeoutCertificate defines the minimum properties required of a consensus +// clique's invalidating set of data for a frame. +type TimeoutCertificate interface { + // GetFilter returns the applicable filter for the consensus clique. + GetFilter() []byte + // GetRank returns the rank of the consensus loop. + GetRank() uint64 + // GetLatestRanks returns the latest ranks seen by members of clique, in + // matching order to the clique's prover set (in ascending ring order). + GetLatestRanks() []uint64 + // GetLatestQuorumCert returns the latest quorum certificate accepted. + GetLatestQuorumCert() QuorumCertificate + // GetAggregatedSignature returns the set of signers who voted on the round. + GetAggregatedSignature() AggregatedSignature + // Equals compares inner equality with another timeout certificate. + Equals(other TimeoutCertificate) bool +} diff --git a/consensus/models/timeout_state.go b/consensus/models/timeout_state.go new file mode 100644 index 0000000..c847f4b --- /dev/null +++ b/consensus/models/timeout_state.go @@ -0,0 +1,45 @@ +package models + +import "bytes" + +// TimeoutState represents the stored state change step relevant to the point of +// rank of a given instance of the consensus state machine. +type TimeoutState[VoteT Unique] struct { + // The rank of the timeout data. + Rank uint64 + // The latest quorum certificate seen by the pacemaker. + LatestQuorumCertificate QuorumCertificate + // The previous rank's timeout certificate, if applicable. + PriorRankTimeoutCertificate TimeoutCertificate + // The signed payload which will become part of the new timeout certificate. + Vote *VoteT + // TimeoutTick is the number of times the `timeout.Controller` has + // (re-)emitted the timeout for this rank. When the timer for the rank's + // original duration expires, a `TimeoutState` with `TimeoutTick = 0` is + // broadcast. Subsequently, `timeout.Controller` re-broadcasts the + // `TimeoutState` periodically based on some internal heuristic. Each time + // we attempt a re-broadcast, the `TimeoutTick` is incremented. Incrementing + // the field prevents de-duplicated within the network layer, which in turn + // guarantees quick delivery of the `TimeoutState` after GST and facilitates + // recovery. + TimeoutTick uint64 +} + +func (t *TimeoutState[VoteT]) Equals(other *TimeoutState[VoteT]) bool { + // Shortcut if `t` and `other` point to the same object; covers case where + // both are nil. + if t == other { + return true + } + if t == nil || other == nil { + // only one is nil, the other not (otherwise we would have returned above) + return false + } + + // both are not nil, so we can compare the fields + return t.Rank == other.Rank && + t.LatestQuorumCertificate.Equals(other.LatestQuorumCertificate) && + t.PriorRankTimeoutCertificate.Equals(other.PriorRankTimeoutCertificate) && + (*t.Vote).Source() == (*other.Vote).Source() && + bytes.Equal((*t.Vote).GetSignature(), (*other.Vote).GetSignature()) +} diff --git a/consensus/models/unique.go b/consensus/models/unique.go new file mode 100644 index 0000000..de158c0 --- /dev/null +++ b/consensus/models/unique.go @@ -0,0 +1,26 @@ +package models + +type Identity = string + +// Unique defines important attributes for distinguishing relative basis of +// items. +type Unique interface { + // Identity provides the relevant identity of the given Unique. + Identity() Identity + // Clone should provide a shallow clone of the Unique. + Clone() Unique + // GetRank indicates the ordinal basis of comparison. + GetRank() uint64 + // Source provides the relevant identity of who issued the given Unique. + Source() Identity + // GetTimestamp provides the relevant timestamp of the given Unique. + GetTimestamp() uint64 + // GetSignature provides the signature of the given Unique (if present). + GetSignature() []byte +} + +type WeightedIdentity interface { + PublicKey() []byte + Identity() Identity + Weight() uint64 +} diff --git a/consensus/notifications/pubsub/communicator_distributor.go b/consensus/notifications/pubsub/communicator_distributor.go new file mode 100644 index 0000000..e125486 --- /dev/null +++ b/consensus/notifications/pubsub/communicator_distributor.go @@ -0,0 +1,104 @@ +package pubsub + +import ( + "sync" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// CommunicatorDistributor ingests outbound consensus messages from HotStuff's +// core logic and distributes them to consumers. This logic only runs inside +// active consensus participants proposing state, voting, collecting + +// aggregating votes to QCs, and participating in the pacemaker (sending +// timeouts, collecting + aggregating timeouts to TCs). +// Concurrency safe. +type CommunicatorDistributor[StateT models.Unique, VoteT models.Unique] struct { + consumers []consensus.CommunicatorConsumer[StateT, VoteT] + lock sync.RWMutex +} + +var _ consensus.CommunicatorConsumer[*nilUnique, *nilUnique] = (*CommunicatorDistributor[*nilUnique, *nilUnique])(nil) + +func NewCommunicatorDistributor[ + StateT models.Unique, + VoteT models.Unique, +]() *CommunicatorDistributor[StateT, VoteT] { + return &CommunicatorDistributor[StateT, VoteT]{} +} + +func (d *CommunicatorDistributor[StateT, VoteT]) AddCommunicatorConsumer( + consumer consensus.CommunicatorConsumer[StateT, VoteT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.consumers = append(d.consumers, consumer) +} + +func (d *CommunicatorDistributor[StateT, VoteT]) OnOwnVote( + vote *VoteT, + recipientID models.Identity, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, s := range d.consumers { + s.OnOwnVote(vote, recipientID) + } +} + +func (d *CommunicatorDistributor[StateT, VoteT]) OnOwnTimeout( + timeout *models.TimeoutState[VoteT], +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, s := range d.consumers { + s.OnOwnTimeout(timeout) + } +} + +func (d *CommunicatorDistributor[StateT, VoteT]) OnOwnProposal( + proposal *models.SignedProposal[StateT, VoteT], + targetPublicationTime time.Time, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, s := range d.consumers { + s.OnOwnProposal(proposal, targetPublicationTime) + } +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/notifications/pubsub/distributor.go b/consensus/notifications/pubsub/distributor.go new file mode 100644 index 0000000..322369c --- /dev/null +++ b/consensus/notifications/pubsub/distributor.go @@ -0,0 +1,127 @@ +package pubsub + +import ( + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Distributor distributes notifications to a list of consumers (event +// consumers). +// +// It allows thread-safe subscription of multiple consumers to events. +type Distributor[StateT models.Unique, VoteT models.Unique] struct { + *FollowerDistributor[StateT, VoteT] + *CommunicatorDistributor[StateT, VoteT] + *ParticipantDistributor[StateT, VoteT] +} + +var _ consensus.Consumer[*nilUnique, *nilUnique] = (*Distributor[*nilUnique, *nilUnique])(nil) + +func NewDistributor[ + StateT models.Unique, + VoteT models.Unique, +]() *Distributor[StateT, VoteT] { + return &Distributor[StateT, VoteT]{ + FollowerDistributor: NewFollowerDistributor[StateT, VoteT](), + CommunicatorDistributor: NewCommunicatorDistributor[StateT, VoteT](), + ParticipantDistributor: NewParticipantDistributor[StateT, VoteT](), + } +} + +// AddConsumer adds an event consumer to the Distributor +func (p *Distributor[StateT, VoteT]) AddConsumer( + consumer consensus.Consumer[StateT, VoteT], +) { + p.FollowerDistributor.AddFollowerConsumer(consumer) + p.CommunicatorDistributor.AddCommunicatorConsumer(consumer) + p.ParticipantDistributor.AddParticipantConsumer(consumer) +} + +// FollowerDistributor ingests consensus follower events and distributes it to +// consumers. It allows thread-safe subscription of multiple consumers to +// events. +type FollowerDistributor[StateT models.Unique, VoteT models.Unique] struct { + *ProposalViolationDistributor[StateT, VoteT] + *FinalizationDistributor[StateT] +} + +var _ consensus.FollowerConsumer[*nilUnique, *nilUnique] = (*FollowerDistributor[*nilUnique, *nilUnique])(nil) + +func NewFollowerDistributor[ + StateT models.Unique, + VoteT models.Unique, +]() *FollowerDistributor[StateT, VoteT] { + return &FollowerDistributor[StateT, VoteT]{ + ProposalViolationDistributor: NewProposalViolationDistributor[StateT, VoteT](), + FinalizationDistributor: NewFinalizationDistributor[StateT](), + } +} + +// AddFollowerConsumer registers the input `consumer` to be notified on +// `consensus.ConsensusFollowerConsumer` events. +func (d *FollowerDistributor[StateT, VoteT]) AddFollowerConsumer( + consumer consensus.FollowerConsumer[StateT, VoteT], +) { + d.FinalizationDistributor.AddFinalizationConsumer(consumer) + d.ProposalViolationDistributor.AddProposalViolationConsumer(consumer) +} + +// TimeoutAggregationDistributor ingests timeout aggregation events and +// distributes it to consumers. It allows thread-safe subscription of multiple +// consumers to events. +type TimeoutAggregationDistributor[VoteT models.Unique] struct { + *TimeoutAggregationViolationDistributor[VoteT] + *TimeoutCollectorDistributor[VoteT] +} + +var _ consensus.TimeoutAggregationConsumer[*nilUnique] = (*TimeoutAggregationDistributor[*nilUnique])(nil) + +func NewTimeoutAggregationDistributor[ + VoteT models.Unique, +]() *TimeoutAggregationDistributor[VoteT] { + return &TimeoutAggregationDistributor[VoteT]{ + TimeoutAggregationViolationDistributor: NewTimeoutAggregationViolationDistributor[VoteT](), + TimeoutCollectorDistributor: NewTimeoutCollectorDistributor[VoteT](), + } +} + +func (d *TimeoutAggregationDistributor[VoteT]) AddTimeoutAggregationConsumer( + consumer consensus.TimeoutAggregationConsumer[VoteT], +) { + d.TimeoutAggregationViolationDistributor. + AddTimeoutAggregationViolationConsumer(consumer) + d.TimeoutCollectorDistributor.AddTimeoutCollectorConsumer(consumer) +} + +// VoteAggregationDistributor ingests vote aggregation events and distributes it +// to consumers. It allows thread-safe subscription of multiple consumers to +// events. +type VoteAggregationDistributor[ + StateT models.Unique, + VoteT models.Unique, +] struct { + *VoteAggregationViolationDistributor[StateT, VoteT] + *VoteCollectorDistributor[VoteT] +} + +var _ consensus.VoteAggregationConsumer[*nilUnique, *nilUnique] = (*VoteAggregationDistributor[*nilUnique, *nilUnique])(nil) + +func NewVoteAggregationDistributor[ + StateT models.Unique, + VoteT models.Unique, +]() *VoteAggregationDistributor[StateT, VoteT] { + return &VoteAggregationDistributor[StateT, VoteT]{ + VoteAggregationViolationDistributor: NewVoteAggregationViolationDistributor[StateT, VoteT](), + VoteCollectorDistributor: NewQCCreatedDistributor[VoteT](), + } +} + +func ( + d *VoteAggregationDistributor[StateT, VoteT], +) AddVoteAggregationConsumer( + consumer consensus.VoteAggregationConsumer[StateT, VoteT], +) { + d.VoteAggregationViolationDistributor. + AddVoteAggregationViolationConsumer(consumer) + d.VoteCollectorDistributor.AddVoteCollectorConsumer(consumer) +} diff --git a/consensus/notifications/pubsub/finalization_distributor.go b/consensus/notifications/pubsub/finalization_distributor.go new file mode 100644 index 0000000..87aaf30 --- /dev/null +++ b/consensus/notifications/pubsub/finalization_distributor.go @@ -0,0 +1,83 @@ +package pubsub + +import ( + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +type OnStateFinalizedConsumer[StateT models.Unique] = func( + state *models.State[StateT], +) + +type OnStateIncorporatedConsumer[StateT models.Unique] = func( + state *models.State[StateT], +) + +// FinalizationDistributor ingests events from HotStuff's logic for tracking +// forks + finalization and distributes them to consumers. This logic generally +// runs inside all nodes (irrespectively whether they are active consensus +// participants or or only consensus followers). Concurrency safe. +type FinalizationDistributor[StateT models.Unique] struct { + stateFinalizedConsumers []OnStateFinalizedConsumer[StateT] + stateIncorporatedConsumers []OnStateIncorporatedConsumer[StateT] + consumers []consensus.FinalizationConsumer[StateT] + lock sync.RWMutex +} + +var _ consensus.FinalizationConsumer[*nilUnique] = (*FinalizationDistributor[*nilUnique])(nil) + +func NewFinalizationDistributor[StateT models.Unique]() *FinalizationDistributor[StateT] { + return &FinalizationDistributor[StateT]{} +} + +func (d *FinalizationDistributor[StateT]) AddOnStateFinalizedConsumer( + consumer OnStateFinalizedConsumer[StateT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.stateFinalizedConsumers = append(d.stateFinalizedConsumers, consumer) +} + +func (d *FinalizationDistributor[StateT]) AddOnStateIncorporatedConsumer( + consumer OnStateIncorporatedConsumer[StateT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.stateIncorporatedConsumers = append(d.stateIncorporatedConsumers, consumer) +} + +func (d *FinalizationDistributor[StateT]) AddFinalizationConsumer( + consumer consensus.FinalizationConsumer[StateT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.consumers = append(d.consumers, consumer) +} + +func (d *FinalizationDistributor[StateT]) OnStateIncorporated( + state *models.State[StateT], +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, consumer := range d.stateIncorporatedConsumers { + consumer(state) + } + for _, consumer := range d.consumers { + consumer.OnStateIncorporated(state) + } +} + +func (d *FinalizationDistributor[StateT]) OnFinalizedState( + state *models.State[StateT], +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, consumer := range d.stateFinalizedConsumers { + consumer(state) + } + for _, consumer := range d.consumers { + consumer.OnFinalizedState(state) + } +} diff --git a/consensus/notifications/pubsub/participant_distributor.go b/consensus/notifications/pubsub/participant_distributor.go new file mode 100644 index 0000000..0a7e053 --- /dev/null +++ b/consensus/notifications/pubsub/participant_distributor.go @@ -0,0 +1,181 @@ +package pubsub + +import ( + "sync" + "time" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// ParticipantDistributor ingests events from HotStuff's core logic and +// distributes them to consumers. This logic only runs inside active consensus +// participants proposing states, voting, collecting + aggregating votes to QCs, +// and participating in the pacemaker (sending timeouts, collecting + +// aggregating timeouts to TCs). Concurrency safe. +type ParticipantDistributor[ + StateT models.Unique, + VoteT models.Unique, +] struct { + consumers []consensus.ParticipantConsumer[StateT, VoteT] + lock sync.RWMutex +} + +var _ consensus.ParticipantConsumer[*nilUnique, *nilUnique] = (*ParticipantDistributor[*nilUnique, *nilUnique])(nil) + +func NewParticipantDistributor[ + StateT models.Unique, + VoteT models.Unique, +]() *ParticipantDistributor[StateT, VoteT] { + return &ParticipantDistributor[StateT, VoteT]{} +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) AddParticipantConsumer( + consumer consensus.ParticipantConsumer[StateT, VoteT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.consumers = append(d.consumers, consumer) +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnEventProcessed() { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnEventProcessed() + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnStart(currentView uint64) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnStart(currentView) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnReceiveProposal( + currentView uint64, + proposal *models.SignedProposal[StateT, VoteT], +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnReceiveProposal(currentView, proposal) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnReceiveQuorumCertificate(currentView uint64, qc models.QuorumCertificate) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnReceiveQuorumCertificate(currentView, qc) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnReceiveTimeoutCertificate( + currentView uint64, + tc models.TimeoutCertificate, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnReceiveTimeoutCertificate(currentView, tc) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnPartialTimeoutCertificate( + currentView uint64, + partialTc *consensus.PartialTimeoutCertificateCreated, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnPartialTimeoutCertificate(currentView, partialTc) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnLocalTimeout(currentView uint64) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnLocalTimeout(currentView) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnRankChange(oldView, newView uint64) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnRankChange(oldView, newView) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnQuorumCertificateTriggeredRankChange( + oldView uint64, + newView uint64, + qc models.QuorumCertificate, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnQuorumCertificateTriggeredRankChange(oldView, newView, qc) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnTimeoutCertificateTriggeredRankChange( + oldView uint64, + newView uint64, + tc models.TimeoutCertificate, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnTimeoutCertificateTriggeredRankChange(oldView, newView, tc) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnStartingTimeout(start time.Time, end time.Time) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnStartingTimeout(start, end) + } +} + +func ( + d *ParticipantDistributor[StateT, VoteT], +) OnCurrentRankDetails( + currentView, finalizedView uint64, + currentLeader models.Identity, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnCurrentRankDetails(currentView, finalizedView, currentLeader) + } +} diff --git a/consensus/notifications/pubsub/proposal_violation_distributor.go b/consensus/notifications/pubsub/proposal_violation_distributor.go new file mode 100644 index 0000000..40b6502 --- /dev/null +++ b/consensus/notifications/pubsub/proposal_violation_distributor.go @@ -0,0 +1,59 @@ +package pubsub + +import ( + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// ProposalViolationDistributor ingests notifications about HotStuff-protocol +// violations and distributes them to consumers. Such notifications are produced +// by the active consensus participants and the consensus follower. Concurrently +// safe. +type ProposalViolationDistributor[ + StateT models.Unique, + VoteT models.Unique, +] struct { + consumers []consensus.ProposalViolationConsumer[StateT, VoteT] + lock sync.RWMutex +} + +var _ consensus.ProposalViolationConsumer[*nilUnique, *nilUnique] = (*ProposalViolationDistributor[*nilUnique, *nilUnique])(nil) + +func NewProposalViolationDistributor[ + StateT models.Unique, + VoteT models.Unique, +]() *ProposalViolationDistributor[StateT, VoteT] { + return &ProposalViolationDistributor[StateT, VoteT]{} +} + +func ( + d *ProposalViolationDistributor[StateT, VoteT], +) AddProposalViolationConsumer( + consumer consensus.ProposalViolationConsumer[StateT, VoteT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.consumers = append(d.consumers, consumer) +} + +func ( + d *ProposalViolationDistributor[StateT, VoteT], +) OnInvalidStateDetected(err *models.InvalidProposalError[StateT, VoteT]) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnInvalidStateDetected(err) + } +} + +func ( + d *ProposalViolationDistributor[StateT, VoteT], +) OnDoubleProposeDetected(state1, state2 *models.State[StateT]) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnDoubleProposeDetected(state1, state2) + } +} diff --git a/consensus/notifications/pubsub/timeout_aggregation_violation_consumer.go b/consensus/notifications/pubsub/timeout_aggregation_violation_consumer.go new file mode 100644 index 0000000..8e7a1a7 --- /dev/null +++ b/consensus/notifications/pubsub/timeout_aggregation_violation_consumer.go @@ -0,0 +1,59 @@ +package pubsub + +import ( + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutAggregationViolationDistributor ingests notifications about timeout +// aggregation violations and distributes them to consumers. Such notifications +// are produced by the timeout aggregation logic. Concurrency safe. +type TimeoutAggregationViolationDistributor[VoteT models.Unique] struct { + consumers []consensus.TimeoutAggregationViolationConsumer[VoteT] + lock sync.RWMutex +} + +var _ consensus.TimeoutAggregationViolationConsumer[*nilUnique] = (*TimeoutAggregationViolationDistributor[*nilUnique])(nil) + +func NewTimeoutAggregationViolationDistributor[ + VoteT models.Unique, +]() *TimeoutAggregationViolationDistributor[VoteT] { + return &TimeoutAggregationViolationDistributor[VoteT]{} +} + +func ( + d *TimeoutAggregationViolationDistributor[VoteT], +) AddTimeoutAggregationViolationConsumer( + consumer consensus.TimeoutAggregationViolationConsumer[VoteT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.consumers = append(d.consumers, consumer) +} + +func ( + d *TimeoutAggregationViolationDistributor[VoteT], +) OnDoubleTimeoutDetected( + timeout *models.TimeoutState[VoteT], + altTimeout *models.TimeoutState[VoteT], +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnDoubleTimeoutDetected(timeout, altTimeout) + } +} + +func ( + d *TimeoutAggregationViolationDistributor[VoteT], +) OnInvalidTimeoutDetected( + err models.InvalidTimeoutError[VoteT], +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnInvalidTimeoutDetected(err) + } +} diff --git a/consensus/notifications/pubsub/timeout_collector_distributor.go b/consensus/notifications/pubsub/timeout_collector_distributor.go new file mode 100644 index 0000000..3fe319c --- /dev/null +++ b/consensus/notifications/pubsub/timeout_collector_distributor.go @@ -0,0 +1,88 @@ +package pubsub + +import ( + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutCollectorDistributor ingests notifications about timeout aggregation +// and distributes them to consumers. Such notifications are produced by the +// timeout aggregation logic. Concurrency safe. +type TimeoutCollectorDistributor[VoteT models.Unique] struct { + lock sync.RWMutex + consumers []consensus.TimeoutCollectorConsumer[VoteT] +} + +var _ consensus.TimeoutCollectorConsumer[*nilUnique] = (*TimeoutCollectorDistributor[*nilUnique])(nil) + +func NewTimeoutCollectorDistributor[VoteT models.Unique]() *TimeoutCollectorDistributor[VoteT] { + return &TimeoutCollectorDistributor[VoteT]{} +} + +func (d *TimeoutCollectorDistributor[VoteT]) AddTimeoutCollectorConsumer( + consumer consensus.TimeoutCollectorConsumer[VoteT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.consumers = append(d.consumers, consumer) +} + +func ( + d *TimeoutCollectorDistributor[VoteT], +) OnTimeoutCertificateConstructedFromTimeouts( + tc models.TimeoutCertificate, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, consumer := range d.consumers { + consumer.OnTimeoutCertificateConstructedFromTimeouts(tc) + } +} + +func (d *TimeoutCollectorDistributor[VoteT]) OnPartialTimeoutCertificateCreated( + rank uint64, + newestQC models.QuorumCertificate, + previousRankTimeoutCert models.TimeoutCertificate, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, consumer := range d.consumers { + consumer.OnPartialTimeoutCertificateCreated( + rank, + newestQC, + previousRankTimeoutCert, + ) + } +} + +func (d *TimeoutCollectorDistributor[VoteT]) OnNewQuorumCertificateDiscovered( + qc models.QuorumCertificate, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, consumer := range d.consumers { + consumer.OnNewQuorumCertificateDiscovered(qc) + } +} + +func (d *TimeoutCollectorDistributor[VoteT]) OnNewTimeoutCertificateDiscovered( + tc models.TimeoutCertificate, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, consumer := range d.consumers { + consumer.OnNewTimeoutCertificateDiscovered(tc) + } +} + +func (d *TimeoutCollectorDistributor[VoteT]) OnTimeoutProcessed( + timeout *models.TimeoutState[VoteT], +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnTimeoutProcessed(timeout) + } +} diff --git a/consensus/notifications/pubsub/vote_aggregation_violation_consumer.go b/consensus/notifications/pubsub/vote_aggregation_violation_consumer.go new file mode 100644 index 0000000..bc63dee --- /dev/null +++ b/consensus/notifications/pubsub/vote_aggregation_violation_consumer.go @@ -0,0 +1,75 @@ +package pubsub + +import ( + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteAggregationViolationDistributor ingests notifications about vote +// aggregation violations and distributes them to consumers. Such notifications +// are produced by the vote aggregation logic. Concurrency safe. +type VoteAggregationViolationDistributor[ + StateT models.Unique, + VoteT models.Unique, +] struct { + consumers []consensus.VoteAggregationViolationConsumer[StateT, VoteT] + lock sync.RWMutex +} + +var _ consensus.VoteAggregationViolationConsumer[*nilUnique, *nilUnique] = (*VoteAggregationViolationDistributor[*nilUnique, *nilUnique])(nil) + +func NewVoteAggregationViolationDistributor[ + StateT models.Unique, + VoteT models.Unique, +]() *VoteAggregationViolationDistributor[StateT, VoteT] { + return &VoteAggregationViolationDistributor[StateT, VoteT]{} +} + +func (d *VoteAggregationViolationDistributor[ + StateT, + VoteT, +]) AddVoteAggregationViolationConsumer( + consumer consensus.VoteAggregationViolationConsumer[StateT, VoteT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.consumers = append(d.consumers, consumer) +} + +func (d *VoteAggregationViolationDistributor[ + StateT, + VoteT, +]) OnDoubleVotingDetected(vote1, vote2 *VoteT) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnDoubleVotingDetected(vote1, vote2) + } +} + +func (d *VoteAggregationViolationDistributor[ + StateT, + VoteT, +]) OnInvalidVoteDetected(err models.InvalidVoteError[VoteT]) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnInvalidVoteDetected(err) + } +} + +func (d *VoteAggregationViolationDistributor[ + StateT, + VoteT, +]) OnVoteForInvalidStateDetected( + vote *VoteT, + invalidProposal *models.SignedProposal[StateT, VoteT], +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnVoteForInvalidStateDetected(vote, invalidProposal) + } +} diff --git a/consensus/notifications/pubsub/vote_collector_distributor.go b/consensus/notifications/pubsub/vote_collector_distributor.go new file mode 100644 index 0000000..58f3ad9 --- /dev/null +++ b/consensus/notifications/pubsub/vote_collector_distributor.go @@ -0,0 +1,52 @@ +package pubsub + +import ( + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteCollectorDistributor ingests notifications about vote aggregation and +// distributes them to consumers. Such notifications are produced by the vote aggregation logic. +// Concurrency safe. +type VoteCollectorDistributor[VoteT models.Unique] struct { + consumers []consensus.VoteCollectorConsumer[VoteT] + lock sync.RWMutex +} + +var _ consensus.VoteCollectorConsumer[*nilUnique] = (*VoteCollectorDistributor[*nilUnique])(nil) + +func NewQCCreatedDistributor[ + VoteT models.Unique, +]() *VoteCollectorDistributor[VoteT] { + return &VoteCollectorDistributor[VoteT]{} +} + +func (d *VoteCollectorDistributor[VoteT]) AddVoteCollectorConsumer( + consumer consensus.VoteCollectorConsumer[VoteT], +) { + d.lock.Lock() + defer d.lock.Unlock() + d.consumers = append(d.consumers, consumer) +} + +func ( + d *VoteCollectorDistributor[VoteT], +) OnQuorumCertificateConstructedFromVotes( + qc models.QuorumCertificate, +) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, consumer := range d.consumers { + consumer.OnQuorumCertificateConstructedFromVotes(qc) + } +} + +func (d *VoteCollectorDistributor[VoteT]) OnVoteProcessed(vote *VoteT) { + d.lock.RLock() + defer d.lock.RUnlock() + for _, subscriber := range d.consumers { + subscriber.OnVoteProcessed(vote) + } +} diff --git a/consensus/pacemaker/pacemaker.go b/consensus/pacemaker/pacemaker.go new file mode 100644 index 0000000..5ead574 --- /dev/null +++ b/consensus/pacemaker/pacemaker.go @@ -0,0 +1,314 @@ +package pacemaker + +import ( + "context" + "time" + + "github.com/pkg/errors" + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +type Pacemaker[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, + CollectedT models.Unique, +] struct { + ctx context.Context + started bool + proposalDurationProvider consensus.ProposalDurationProvider + notifier consensus.Consumer[StateT, VoteT] + store consensus.ConsensusStore[VoteT] + backoffTimer *consensus.BackoffTimer + traceLogger consensus.TraceLogger + livenessState *models.LivenessState +} + +func NewPacemaker[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, + CollectedT models.Unique, +]( + initialParameters func() *models.LivenessState, + proposalDurationProvider consensus.ProposalDurationProvider, + notifier consensus.Consumer[StateT, VoteT], + store consensus.ConsensusStore[VoteT], + traceLogger consensus.TraceLogger, +) (*Pacemaker[StateT, VoteT, PeerIDT, CollectedT], error) { + livenessState, err := store.GetLivenessState() + if err != nil { + livenessState = initialParameters() + } + + return &Pacemaker[StateT, VoteT, PeerIDT, CollectedT]{ + proposalDurationProvider: proposalDurationProvider, + notifier: notifier, + store: store, + traceLogger: traceLogger, + livenessState: livenessState, + started: false, + }, nil +} + +// CurrentRank implements consensus.PacemakerProvider. +func (p *Pacemaker[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) CurrentRank() uint64 { + return p.livenessState.CurrentRank +} + +// LatestQuorumCertificate implements consensus.PacemakerProvider. +func (p *Pacemaker[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) LatestQuorumCertificate() models.QuorumCertificate { + return p.livenessState.LatestQuorumCertificate +} + +// PriorRankTimeoutCertificate implements consensus.PacemakerProvider. +func (p *Pacemaker[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) PriorRankTimeoutCertificate() models.TimeoutCertificate { + return p.livenessState.PriorRankTimeoutCertificate +} + +func (p *Pacemaker[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) newRankAndTimeout( + currentRank uint64, + newRank uint64, +) (*models.NextRank, error) { + p.notifier.OnRankChange(currentRank, newRank) + start, end := p.backoffTimer.Start(p.ctx) + p.notifier.OnStartingTimeout(start, end) + + return &models.NextRank{ + Rank: newRank, + Start: start, + End: end, + }, nil +} + +// ReceiveQuorumCertificate implements consensus.PacemakerProvider. +func (p *Pacemaker[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) ReceiveQuorumCertificate( + quorumCertificate models.QuorumCertificate, +) (*models.NextRank, error) { + currentRank := p.livenessState.CurrentRank + newRank, err := p.processQuorumCertificate(quorumCertificate) + if err != nil { + return nil, errors.Wrap(err, "receive quorum certificate") + } + + p.backoffTimer.ReceiveSuccess() + p.notifier.OnQuorumCertificateTriggeredRankChange( + currentRank, + newRank, + quorumCertificate, + ) + + return p.newRankAndTimeout(currentRank, newRank) +} + +func (p *Pacemaker[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) processQuorumCertificate( + quorumCertificate models.QuorumCertificate, +) (uint64, error) { + currentRank := p.livenessState.CurrentRank + if quorumCertificate.GetRank() < currentRank { + if p.livenessState.LatestQuorumCertificate.GetRank() >= + quorumCertificate.GetRank() { + return currentRank, nil + } + + p.livenessState.LatestQuorumCertificate = quorumCertificate + err := p.store.PutLivenessState(p.livenessState) + if err != nil { + return currentRank, errors.Wrap(err, "process quorum certificate") + } + + return currentRank, nil + } + + newRank := quorumCertificate.GetRank() + 1 + p.livenessState.CurrentRank = newRank + p.livenessState.LatestQuorumCertificate = quorumCertificate + p.livenessState.PriorRankTimeoutCertificate = nil + err := p.store.PutLivenessState(p.livenessState) + if err != nil { + return 0, errors.Wrap(err, "process quorum certificate") + } + + return newRank, nil +} + +// ReceiveTimeoutCertificate implements consensus.PacemakerProvider. +func (p *Pacemaker[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) ReceiveTimeoutCertificate( + timeoutCertificate models.TimeoutCertificate, +) (*models.NextRank, error) { + currentRank := p.livenessState.CurrentRank + newRank, err := p.processTimeoutCertificate(timeoutCertificate) + if err != nil { + return nil, errors.Wrap(err, "receive timeout certificate") + } + if newRank <= currentRank { + return nil, nil + } + + p.backoffTimer.ReceiveTimeout() + p.notifier.OnTimeoutCertificateTriggeredRankChange( + currentRank, + newRank, + timeoutCertificate, + ) + + return p.newRankAndTimeout(currentRank, newRank) +} + +func (p *Pacemaker[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) processTimeoutCertificate( + timeoutCertificate models.TimeoutCertificate, +) (uint64, error) { + currentRank := p.livenessState.CurrentRank + if timeoutCertificate == nil { + return currentRank, nil + } + + if timeoutCertificate.GetRank() < currentRank { + if p.livenessState.LatestQuorumCertificate.GetRank() >= + timeoutCertificate.GetLatestQuorumCert().GetRank() { + return currentRank, nil + } + + p.livenessState.LatestQuorumCertificate = timeoutCertificate. + GetLatestQuorumCert() + err := p.store.PutLivenessState(p.livenessState) + if err != nil { + return currentRank, errors.Wrap(err, "process timeout certificate") + } + + return currentRank, nil + } + + newRank := timeoutCertificate.GetRank() + 1 + p.livenessState.CurrentRank = newRank + p.livenessState.LatestQuorumCertificate = timeoutCertificate. + GetLatestQuorumCert() + p.livenessState.PriorRankTimeoutCertificate = timeoutCertificate + err := p.store.PutLivenessState(p.livenessState) + if err != nil { + return 0, errors.Wrap(err, "process timeout certificate") + } + + return newRank, nil +} + +// TimeoutCh implements consensus.PacemakerProvider. +func (p *Pacemaker[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) TimeoutCh() <-chan time.Time { + return p.backoffTimer.TimeoutCh() +} + +func (p *Pacemaker[ + StateT, + VoteT, + PeerIDT, + CollectedT, +]) Start(ctx context.Context) error { + if p.started { + return nil + } + p.started = true + p.ctx = ctx + start, end := p.backoffTimer.Start(ctx) + p.notifier.OnStartingTimeout(start, end) + return nil +} + +func (p *Pacemaker[StateT, VoteT, PeerIDT, CollectedT]) TargetPublicationTime( + proposalRank uint64, + timeRankEntered time.Time, + parentStateId models.Identity, +) time.Time { + return p.proposalDurationProvider.TargetPublicationTime( + proposalRank, + timeRankEntered, + parentStateId, + ) +} + +var _ consensus.Pacemaker = (*Pacemaker[ + *nilUnique, + *nilUnique, + *nilUnique, + *nilUnique, +])(nil) + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/safetyrules/safety_rules.go b/consensus/safetyrules/safety_rules.go new file mode 100644 index 0000000..3fc22e2 --- /dev/null +++ b/consensus/safetyrules/safety_rules.go @@ -0,0 +1,549 @@ +package safetyrules + +import ( + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// SafetyRules is a dedicated module that enforces consensus safety. This +// component has the sole authority to generate votes and timeouts. It follows +// voting and timeout rules for creating votes and timeouts respectively. +// Caller can be sure that created vote or timeout doesn't break safety and can +// be used in consensus process. SafetyRules relies on consensus.ConsensusStore +// to store latest state of consensus.SafetyData. +// +// The voting rules implemented by SafetyRules are: +// 1. Replicas vote in strictly increasing ranks. At most one vote can be +// signed per rank. Caution: The leader's state signature is formally a vote +// for their own proposal. +// 2. Each state has to include a TC or a QC from the previous rank. +// a. [Happy path] If the previous rank resulted in a QC then the proposer +// should include it in their state. +// b. [Recovery path] If the previous round did *not* result in a QC, the +// leader of the subsequent round *must* include a valid TC for the +// previous rank in its state. +// +// Condition 1 guarantees a foundational security theorem for HotStuff (incl. +// the DiemBFT / Jolteon variant): +// +// THEOREM: For each rank, there can be at most 1 certified state. +// +// NOT safe for concurrent use. +type SafetyRules[StateT models.Unique, VoteT models.Unique] struct { + signer consensus.Signer[StateT, VoteT] + store consensus.ConsensusStore[VoteT] + committee consensus.DynamicCommittee // only produce votes when we are valid committee members + consensusState *models.ConsensusState[VoteT] +} + +var _ consensus.SafetyRules[*nilUnique, *nilUnique] = (*SafetyRules[*nilUnique, *nilUnique])(nil) + +// NewSafetyRules creates a new SafetyRules instance +func NewSafetyRules[StateT models.Unique, VoteT models.Unique]( + signer consensus.Signer[StateT, VoteT], + store consensus.ConsensusStore[VoteT], + committee consensus.DynamicCommittee, +) (*SafetyRules[StateT, VoteT], error) { + // get the last stored safety data + consensusState, err := store.GetConsensusState() + if err != nil { + return nil, fmt.Errorf("could not load safety data: %w", err) + } + return &SafetyRules[StateT, VoteT]{ + signer: signer, + store: store, + committee: committee, + consensusState: consensusState, + }, nil +} + +// ProduceVote will make a decision on whether it will vote for the given +// proposal, the returned error indicates whether to vote or not. To ensure +// that only safe proposals are being voted on, we check that the proposer is a +// valid committee member and that the proposal complies with voting rules. +// We expect that only well-formed proposals with valid signatures are submitted +// for voting. The curRank is taken as input to ensure SafetyRules will only +// vote for proposals at current rank and prevent double voting. +// Returns: +// - (vote, nil): On the _first_ state for the current rank that is safe to +// vote for. Subsequently, voter does _not_ vote for any other state with +// the same (or lower) rank. +// - (nil, models.NoVoteError): If the voter decides that it does not want to +// vote for the given state. This is a sentinel error and _expected_ during +// normal operation. +// +// All other errors are unexpected and potential symptoms of uncovered edge +// cases or corrupted internal state (fatal). +func (r *SafetyRules[StateT, VoteT]) ProduceVote( + signedProposal *models.SignedProposal[StateT, VoteT], + curRank uint64, +) (*VoteT, error) { + return r.produceVote(&signedProposal.Proposal, curRank) +} + +// produceVote implements the core Safety Rules to validate whether it is safe +// to vote. This method is to be used to vote for other leaders' states as well +// as this node's own proposals under construction. We explicitly codify the +// important aspect that a proposer's signature for their own state is +// conceptually also just a vote (we explicitly use that property when +// aggregating votes and including the proposer's own vote into a QC). In order +// to express this conceptual equivalence in code, the voting logic in Safety +// Rules must also operate on an unsigned Proposal. +// +// The curRank is taken as input to ensure SafetyRules will only vote for +// proposals at current rank and prevent double voting. +// Returns: +// - (vote, nil): On the _first_ state for the current rank that is safe to +// vote for. Subsequently, voter does _not_ vote for any other state with +// the same (or lower) rank. +// - (nil, models.NoVoteError): If the voter decides that it does not want to +// vote for the given state. This is a sentinel error and _expected_ during +// normal operation. +// +// All other errors are unexpected and potential symptoms of uncovered edge +// cases or corrupted internal state (fatal). +func (r *SafetyRules[StateT, VoteT]) produceVote( + proposal *models.Proposal[StateT], + curRank uint64, +) (*VoteT, error) { + state := proposal.State + // sanity checks: + if curRank != state.Rank { + return nil, fmt.Errorf( + "expecting state for current rank %d, but state's rank is %d", + curRank, + state.Rank, + ) + } + + err := r.isSafeToVote(proposal) + if err != nil { + return nil, fmt.Errorf( + "not safe to vote for proposal %x: %w", + proposal.State.Identifier, + err, + ) + } + + currentLeader, err := r.committee.LeaderForRank(state.Rank) + if err != nil { + return nil, fmt.Errorf( + "expect to have a valid leader for rank %d: %w", + curRank, + err, + ) + } + // This sanity check confirms that the proposal is from the correct leader of + // this rank. In case this sanity check fails, we return an exception, because + // the compliance layer should have verified this already. However, proposals + // from this node might not go through the compliance engine, and must be + // signed before anyway. Therefore, we still include this sanity check, but + // return an exception because signing a proposal should be only for ranks + // where this node is actually the leader. + if state.ProposerID != currentLeader { + return nil, fmt.Errorf( + "incorrect proposal, as proposer %x is different from the leader %x for rank %d", + state.ProposerID, + currentLeader, + curRank, + ) + } + + // In case this node is the leader, we can skip the following checks. + // • If this node is ejected (check (ii) would fail), voting for any states or + // signing own proposals is of no harm. This is because all other honest + // nodes should have terminated their connection to us, so we are not + // risking to use up the networking bandwidth of honest nodes. This is + // relevant in case of self-ejection: a node operator suspecting their + // node's keys to be compromised can request for their node to be ejected to + // prevent malicious actors impersonating their node, launching an attack on + // the network, and the seniority being slashed. The self-ejection mechanism + // corresponds to key-revocation and reduces attack surface for the network + // and the node operator's seniority. In case of self-ejection, a node is no + // longer part of the network, hence it cannot harm the network and is no + // longer subject to slashing for actions during the respective ranks. + // Therefore, voting or continuing to signing state proposals is of no + // concern. + // • In case this node is the leader, `state.ProposerID` and + // `r.committee.Self()` are identical. In other words, check (i) also + // verifies that this node itself is not ejected -- the same as check (ii). + // Hence, also check (i) can be skipped with the same reasoning. + if currentLeader != r.committee.Self() { + // (i): we need to make sure that proposer is not ejected to vote + _, err = r.committee.IdentityByState(state.Identifier, state.ProposerID) + if models.IsInvalidSignerError(err) { + // the proposer must be ejected since the proposal has already been + // validated, which ensures that the proposer was a valid committee member + // at the start of the epoch + return nil, models.NewNoVoteErrorf("proposer ejected: %w", err) + } + if err != nil { + return nil, fmt.Errorf( + "internal error retrieving Identity of proposer %x at state %x: %w", + state.ProposerID, + state.Identifier, + err, + ) + } + + // (ii) Do not produce a vote for states where we are not an active + // committee member. The HotStuff state machine may request to vote during + // grace periods outside the epochs, where the node is authorized to + // actively participate. If we voted during those grace periods, we would + // needlessly waste network bandwidth, as such votes can't be used to + // produce valid QCs. + _, err = r.committee.IdentityByState(state.Identifier, r.committee.Self()) + if models.IsInvalidSignerError(err) { + return nil, models.NewNoVoteErrorf( + "I am not authorized to vote for state %x: %w", + state.Identifier, + err, + ) + } + if err != nil { + return nil, fmt.Errorf("could not get self identity: %w", err) + } + } + + vote, err := r.signer.CreateVote(state.State) + if err != nil { + return nil, fmt.Errorf("could not vote for state: %w", err) + } + + // vote for the current rank has been produced, update safetyData + r.consensusState.LatestAcknowledgedRank = curRank + if r.consensusState.FinalizedRank < state.ParentQuorumCertificate.GetRank() { + r.consensusState.FinalizedRank = state.ParentQuorumCertificate.GetRank() + } + + err = r.store.PutConsensusState(r.consensusState) + if err != nil { + return nil, fmt.Errorf("could not persist safety data: %w", err) + } + + return &vote, nil +} + +// ProduceTimeout takes current rank, highest locally known QC and TC (optional, +// must be nil if and only if QC is for previous rank) and decides whether to +// produce timeout for current rank. +// Returns: +// - (timeout, nil): It is safe to timeout for current rank using newestQC and +// previousRankTimeoutCert. +// - (nil, models.NoTimeoutError): If replica is not part of the authorized +// consensus committee (anymore) and therefore is not authorized to produce +// a valid timeout state. This sentinel error is _expected_ during normal +// operation, e.g. during the grace-period after Epoch switchover or after +// the replica self-ejected. +// +// All other errors are unexpected and potential symptoms of uncovered edge +// cases or corrupted internal state (fatal). +func (r *SafetyRules[StateT, VoteT]) ProduceTimeout( + curRank uint64, + newestQC models.QuorumCertificate, + previousRankTimeoutCert models.TimeoutCertificate, +) (*models.TimeoutState[VoteT], error) { + lastTimeout := r.consensusState.LatestTimeout + if lastTimeout != nil && lastTimeout.Rank == curRank { + updatedTimeout := &models.TimeoutState[VoteT]{ + Rank: lastTimeout.Rank, + LatestQuorumCertificate: lastTimeout.LatestQuorumCertificate, + PriorRankTimeoutCertificate: lastTimeout.PriorRankTimeoutCertificate, + TimeoutTick: lastTimeout.TimeoutTick + 1, + } + + // persist updated TimeoutState in `safetyData` and return it + r.consensusState.LatestTimeout = updatedTimeout + err := r.store.PutConsensusState(r.consensusState) + if err != nil { + return nil, fmt.Errorf("could not persist safety data: %w", err) + } + return r.consensusState.LatestTimeout, nil + } + + err := r.IsSafeToTimeout(curRank, newestQC, previousRankTimeoutCert) + if err != nil { + return nil, fmt.Errorf("local, trusted inputs failed safety rules: %w", err) + } + + // Do not produce a timeout for rank where we are not a valid committee + // member. + _, err = r.committee.IdentityByRank(curRank, r.committee.Self()) + if err != nil { + if models.IsInvalidSignerError(err) { + return nil, models.NewNoTimeoutErrorf( + "I am not authorized to timeout for rank %d: %w", + curRank, + err, + ) + } + return nil, fmt.Errorf("could not get self identity: %w", err) + } + + timeout, err := r.signer.CreateTimeout( + curRank, + newestQC, + previousRankTimeoutCert, + ) + if err != nil { + return nil, fmt.Errorf( + "could not create timeout at rank %d: %w", + curRank, + err, + ) + } + + r.consensusState.LatestAcknowledgedRank = curRank + r.consensusState.LatestTimeout = timeout + + err = r.store.PutConsensusState(r.consensusState) + if err != nil { + return nil, fmt.Errorf("could not persist safety data: %w", err) + } + + return timeout, nil +} + +// SignOwnProposal takes an unsigned state proposal and produces a vote for it. +// Vote is a cryptographic commitment to the proposal. By adding the vote to an +// unsigned proposal, the caller constructs a signed state proposal. This method +// has to be used only by the leader, which must be the proposer of the state +// (or an exception is returned). Implementors must guarantee that: +// - vote on the proposal satisfies safety rules +// - maximum one proposal is signed per rank +// Returns: +// - (vote, nil): the passed unsigned proposal is a valid one, and it's safe +// to make a proposal. Subsequently, leader does _not_ produce any _other_ +// proposal with the same (or lower) rank. +// - (nil, models.NoVoteError): according to HotStuff's Safety Rules, it is +// not safe to sign the given proposal. This could happen because we have +// already proposed or timed out for the given rank. This is a sentinel +// error and _expected_ during normal operation. +// +// All other errors are unexpected and potential symptoms of uncovered edge +// cases or corrupted internal state (fatal). +func (r *SafetyRules[StateT, VoteT]) SignOwnProposal( + unsignedProposal *models.Proposal[StateT], +) (*VoteT, error) { + // check that the state is created by us + if unsignedProposal.State.ProposerID != r.committee.Self() { + return nil, fmt.Errorf("can't sign proposal for someone else's state") + } + + return r.produceVote(unsignedProposal, unsignedProposal.State.Rank) +} + +// isSafeToVote checks if this proposal is valid in terms of voting rules, if +// voting for this proposal won't break safety rules. Expected errors during +// normal operations: +// - NoVoteError if replica already acted during this rank (either voted o +// generated timeout) +func (r *SafetyRules[StateT, VoteT]) isSafeToVote( + proposal *models.Proposal[StateT], +) error { + stateRank := proposal.State.Rank + + err := r.validateEvidenceForEnteringRank( + stateRank, + proposal.State.ParentQuorumCertificate, + proposal.PreviousRankTimeoutCertificate, + ) + if err != nil { + // As we are expecting the states to be pre-validated, any failure here is a + // symptom of an internal bug. + return fmt.Errorf("proposal failed consensus validity check: %w", err) + } + + // This check satisfies voting rule 1 + // 1. Replicas vote strictly in increasing rounds, + // state's rank must be greater than the rank that we have voted for + acRank := r.consensusState.LatestAcknowledgedRank + if stateRank == acRank { + return models.NewNoVoteErrorf( + "already voted or generated timeout in rank %d", + stateRank, + ) + } + if stateRank < acRank { + return fmt.Errorf( + "already acted during rank %d but got proposal for lower rank %d", + acRank, + stateRank, + ) + } + + return nil +} + +// IsSafeToTimeout checks if it's safe to timeout with proposed data, i.e. +// timing out won't break safety. newestQC is the valid QC with the greatest +// rank that we have observed. previousRankTimeoutCert is the TC for the +// previous rank (might be nil). +// +// When generating a timeout, the inputs are provided by node-internal +// components. Failure to comply with the protocol is a symptom of an internal +// bug. We don't expect any errors during normal operations. +func (r *SafetyRules[StateT, VoteT]) IsSafeToTimeout( + curRank uint64, + newestQC models.QuorumCertificate, + previousRankTimeoutCert models.TimeoutCertificate, +) error { + err := r.validateEvidenceForEnteringRank( + curRank, + newestQC, + previousRankTimeoutCert, + ) + if err != nil { + return fmt.Errorf("not safe to timeout: %w", err) + } + + if newestQC.GetRank() < r.consensusState.FinalizedRank { + return fmt.Errorf( + "have already seen QC for rank %d, but newest QC is reported to be for rank %d", + r.consensusState.FinalizedRank, + newestQC.GetRank(), + ) + } + if curRank+1 <= r.consensusState.LatestAcknowledgedRank { + return fmt.Errorf("cannot generate timeout for past rank %d", curRank) + } + // the logic for rejecting inputs with `curRank <= newestQC.Rank` is already + // contained in `validateEvidenceForEnteringRank(..)`, because it only passes + // if + // * either `curRank == newestQC.Rank + 1` (condition 2) + // * or `curRank > newestQC.Rank` (condition 4) + + return nil +} + +// validateEvidenceForEnteringRank performs the following check that is +// fundamental for consensus safety: Whenever a replica acts within a rank, it +// must prove that is has sufficient evidence to enter this rank +// Specifically: +// 1. The replica must always provide a QC and optionally a TC. +// 2. [Happy Path] If the previous round (i.e. `rank -1`) resulted in a QC, the +// replica is allowed to transition to `rank`. The QC from the previous +// round provides sufficient evidence. Furthermore, to prevent +// resource-exhaustion attacks, we require that no TC is included as part of +// the proof. +// 3. Following the Happy Path has priority over following the Recovery Path +// (specified below). +// 4. [Recovery Path] If the previous round (i.e. `rank -1`) did *not* result +// in a QC, a TC from the previous round is required to transition to +// `rank`. The following additional consistency requirements have to be +// satisfied: +// (a) newestQC.Rank + 1 < rank +// Otherwise, the replica has violated condition 3 (in case +// newestQC.Rank + 1 = rank); or the replica failed to apply condition 2 (in +// case newestQC.Rank + 1 > rank). +// (b) newestQC.Rank ≥ previousRankTimeoutCert.NewestQC.Rank +// Otherwise, the replica has violated condition 3. +// +// SafetyRules has the sole signing authority and enforces adherence to these +// conditions. In order to generate valid consensus signatures, the replica must +// provide the respective evidence (required QC + optional TC) to its internal +// SafetyRules component for each consensus action that the replica wants to +// take: +// - primary signing its own proposal +// - replica voting for a state +// - replica generating a timeout message +// +// During normal operations, no errors are expected: +// - As we are expecting the states to be pre-validated, any failure here is a +// symptom of an internal bug. +// - When generating a timeout, the inputs are provided by node-internal +// components. Failure to comply with the protocol is a symptom of an +// internal bug. +func (r *SafetyRules[StateT, VoteT]) validateEvidenceForEnteringRank( + rank uint64, + newestQC models.QuorumCertificate, + previousRankTimeoutCert models.TimeoutCertificate, +) error { + // Condition 1: + if newestQC == nil { + return fmt.Errorf("missing the mandatory QC") + } + + // Condition 2: + if newestQC.GetRank()+1 == rank { + if previousRankTimeoutCert != nil { + return fmt.Errorf("when QC is for prior round, no TC should be provided") + } + return nil + } + // Condition 3: if we reach the following lines, the happy path is not + // satisfied. + + // Condition 4: + if previousRankTimeoutCert == nil { + return fmt.Errorf( + "expecting TC because QC is not for prior rank; but didn't get any TC", + ) + } + if previousRankTimeoutCert.GetRank()+1 != rank { + return fmt.Errorf( + "neither QC (rank %d) nor TC (rank %d) allows to transition to rank %d", + newestQC.GetRank(), + previousRankTimeoutCert.GetRank(), + rank, + ) + } + if newestQC.GetRank() >= rank { + // Note: we need to enforce here that `newestQC.Rank + 1 < rank`, i.e. we + // error for `newestQC.Rank+1 >= rank` However, `newestQC.Rank+1 == rank` is + // impossible, because otherwise we would have walked into condition 2. + // Hence, it suffices to error if `newestQC.Rank+1 > rank`, which is + // identical to `newestQC.Rank >= rank` + return fmt.Errorf( + "still at rank %d, despite knowing a QC for rank %d", + rank, + newestQC.GetRank(), + ) + } + if newestQC.GetRank() < previousRankTimeoutCert.GetLatestQuorumCert().GetRank() { + return fmt.Errorf( + "failed to update newest QC (still at rank %d) despite a newer QC (rank %d) being included in TC", + newestQC.GetRank(), + previousRankTimeoutCert.GetLatestQuorumCert().GetRank(), + ) + } + + return nil +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/signature/packer.go b/consensus/signature/packer.go new file mode 100644 index 0000000..7e80b5d --- /dev/null +++ b/consensus/signature/packer.go @@ -0,0 +1,74 @@ +package signature + +import ( + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// ConsensusSigDataPacker implements the consensus.Packer interface. +type ConsensusSigDataPacker struct { + committees consensus.Replicas +} + +var _ consensus.Packer = &ConsensusSigDataPacker{} + +// NewConsensusSigDataPacker creates a new ConsensusSigDataPacker instance +func NewConsensusSigDataPacker( + committees consensus.Replicas, +) *ConsensusSigDataPacker { + return &ConsensusSigDataPacker{ + committees: committees, + } +} + +// Pack serializes the state signature data into raw bytes, suitable to create a +// QC. To pack the state signature data, we first build a compact data type, and +// then encode it into bytes. Expected error returns during normal operations: +// - none; all errors are symptoms of inconsistent input data or corrupted +// internal state. +func (p *ConsensusSigDataPacker) Pack( + rank uint64, + sig *consensus.StateSignatureData, +) ([]byte, []byte, error) { + // retrieve all authorized consensus participants at the given state + fullMembers, err := p.committees.IdentitiesByRank(rank) + if err != nil { + return nil, nil, fmt.Errorf( + "could not find consensus committee for rank %d: %w", + rank, + err, + ) + } + + sigSet := map[models.Identity]struct{}{} + for _, s := range sig.Signers { + sigSet[s.Identity()] = struct{}{} + } + + signerIndices := make([]byte, len(fullMembers)+7/8) + for i, member := range fullMembers { + if _, ok := sigSet[member.Identity()]; ok { + signerIndices[i/8] |= 1 << i % 8 + } + } + + return signerIndices, sig.Signature, nil +} + +// Unpack de-serializes the provided signature data. +// rank is the rank of the state that the aggregated sig is signed for +// sig is the aggregated signature data +// It returns: +// - (sigData, nil) if successfully unpacked the signature data +// - (nil, models.InvalidFormatError) if failed to unpack the signature data +func (p *ConsensusSigDataPacker) Unpack( + signerIdentities []models.WeightedIdentity, + sigData []byte, +) (*consensus.StateSignatureData, error) { + return &consensus.StateSignatureData{ + Signers: signerIdentities, + Signature: sigData, + }, nil +} diff --git a/consensus/signature/state_signer_decoder.go b/consensus/signature/state_signer_decoder.go new file mode 100644 index 0000000..4e45d60 --- /dev/null +++ b/consensus/signature/state_signer_decoder.go @@ -0,0 +1,135 @@ +package signature + +import ( + "errors" + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// StateSignerDecoder is a wrapper around the `consensus.DynamicCommittee`, +// which implements the auxiliary logic for de-coding signer indices of a state +// (header) to full node IDs +type StateSignerDecoder[StateT models.Unique] struct { + consensus.DynamicCommittee +} + +func NewStateSignerDecoder[StateT models.Unique]( + committee consensus.DynamicCommittee, +) *StateSignerDecoder[StateT] { + return &StateSignerDecoder[StateT]{committee} +} + +var _ consensus.StateSignerDecoder[*nilUnique] = (*StateSignerDecoder[*nilUnique])(nil) + +// DecodeSignerIDs decodes the signer indices from the given state into +// full node IDs. Note: A state header contains a quorum certificate for its +// parent, which proves that the consensus committee has reached agreement on +// validity of parent state. Consequently, the returned IdentifierList contains +// the consensus participants that signed the parent state. Expected Error +// returns during normal operations: +// - signature.InvalidSignerIndicesError if signer indices included in the +// state do not encode a valid subset of the consensus committee +// - state.ErrUnknownSnapshotReference if the input state is not a known +// incorporated state. +func (b *StateSignerDecoder[StateT]) DecodeSignerIDs( + state *models.State[StateT], +) ( + []models.WeightedIdentity, + error, +) { + // root state does not have signer indices + if state.ParentQuorumCertificate == nil { + return []models.WeightedIdentity{}, nil + } + + // we will use IdentitiesByRank since it's a faster call and avoids DB lookup + members, err := b.IdentitiesByRank(state.ParentQuorumCertificate.GetRank()) + if err != nil { + if errors.Is(err, models.ErrRankUnknown) { + // possibly, we request epoch which is far behind in the past, in this + // case we won't have it in cache. try asking by parent ID + byStateMembers, err := b.IdentitiesByState( + state.ParentQuorumCertificate.GetSelector(), + ) + if err != nil { + return nil, fmt.Errorf( + "could not retrieve identities for state %x with QC rank %d for parent %x: %w", + state.Identifier, + state.ParentQuorumCertificate.GetRank(), + state.ParentQuorumCertificate.GetSelector(), + err, + ) // state.ErrUnknownSnapshotReference or exception + } + members = byStateMembers + } else { + return nil, fmt.Errorf( + "unexpected error retrieving identities for state %v: %w", + state.Identifier, + err, + ) + } + } + + signerIDs := []models.WeightedIdentity{} + sigIndices := state.ParentQuorumCertificate.GetAggregatedSignature().GetBitmask() + for i, member := range members { + if sigIndices[i/8]>>i%8&1 == 1 { + signerIDs = append(signerIDs, member) + } + } + + return signerIDs, nil +} + +// NoopStateSignerDecoder does not decode any signer indices and consistently +// returns nil for the signing node IDs (auxiliary data) +type NoopStateSignerDecoder[StateT models.Unique] struct{} + +func NewNoopStateSignerDecoder[ + StateT models.Unique, +]() *NoopStateSignerDecoder[StateT] { + return &NoopStateSignerDecoder[StateT]{} +} + +func (b *NoopStateSignerDecoder[StateT]) DecodeSignerIDs( + _ *models.State[StateT], +) ([]models.WeightedIdentity, error) { + return nil, nil +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/signature/weighted_signature_aggregator.go b/consensus/signature/weighted_signature_aggregator.go new file mode 100644 index 0000000..1c3d54e --- /dev/null +++ b/consensus/signature/weighted_signature_aggregator.go @@ -0,0 +1,223 @@ +package signature + +import ( + "errors" + "fmt" + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// signerInfo holds information about a signer, its weight and index +type signerInfo struct { + weight uint64 + pk []byte + index int +} + +// WeightedSignatureAggregator implements consensus.WeightedSignatureAggregator. +// It is a wrapper around signature.SignatureAggregatorSameMessage, which +// implements a mapping from node IDs (as used by HotStuff) to index-based +// addressing of authorized signers (as used by SignatureAggregatorSameMessage). +// +// Similarly to module/signature.SignatureAggregatorSameMessage, this module +// assumes proofs of possession (PoP) of all identity public keys are valid. +type WeightedSignatureAggregator struct { + aggregator consensus.SignatureAggregator + ids []models.WeightedIdentity + idToInfo map[models.Identity]signerInfo + totalWeight uint64 + dsTag []byte + message []byte + lock sync.RWMutex + + // collectedSigs tracks the Identities of all nodes whose signatures have been + // collected so far. The reason for tracking the duplicate signers at this + // module level is that having no duplicates is a Hotstuff constraint, rather + // than a cryptographic aggregation constraint. + collectedSigs map[models.Identity][]byte +} + +var _ consensus.WeightedSignatureAggregator = (*WeightedSignatureAggregator)(nil) + +// NewWeightedSignatureAggregator returns a weighted aggregator initialized with +// a list of identities, their respective public keys, a message and a +// domain separation tag. The identities represent the list of all possible +// signers. This aggregator is only safe if PoPs of all identity keys are valid. +// This constructor does not verify the PoPs but assumes they have been +// validated outside this module. +// The constructor errors if: +// - the list of identities is empty +// - if the length of keys does not match the length of identities +// - if one of the keys is not a valid public key. +// +// A weighted aggregator is used for one aggregation only. A new instance should +// be used for each signature aggregation task in the protocol. +func NewWeightedSignatureAggregator( + ids []models.WeightedIdentity, + pks [][]byte, // list of corresponding public keys used for signature verifications + message []byte, // message to get an aggregated signature for + dsTag []byte, // domain separation tag used by the signature + aggregator consensus.SignatureAggregator, +) (*WeightedSignatureAggregator, error) { + if len(ids) != len(pks) { + return nil, fmt.Errorf("keys length %d and identities length %d do not match", len(pks), len(ids)) + } + + // build the internal map for a faster look-up + idToInfo := make(map[models.Identity]signerInfo) + for i, id := range ids { + idToInfo[id.Identity()] = signerInfo{ + weight: id.Weight(), + pk: pks[i], + index: i, + } + } + + return &WeightedSignatureAggregator{ + dsTag: dsTag, + ids: ids, + idToInfo: idToInfo, + aggregator: aggregator, + message: message, + collectedSigs: make(map[models.Identity][]byte), + }, nil +} + +// Verify verifies the signature under the stored public keys and message. +// Expected errors during normal operations: +// - models.InvalidSignerError if signerID is invalid (not a consensus +// participant) +// - models.ErrInvalidSignature if signerID is valid but signature is +// cryptographically invalid +// +// The function is thread-safe. +func (w *WeightedSignatureAggregator) Verify( + signerID models.Identity, + sig []byte, +) error { + info, ok := w.idToInfo[signerID] + if !ok { + return models.NewInvalidSignerErrorf( + "%v is not an authorized signer", + signerID, + ) + } + + ok = w.aggregator.VerifySignatureRaw(info.pk, sig, w.message, w.dsTag) + if !ok { + return fmt.Errorf( + "invalid signature from %s: %w", + signerID, + models.ErrInvalidSignature, + ) + } + return nil +} + +// TrustedAdd adds a signature to the internal set of signatures and adds the +// signer's weight to the total collected weight, iff the signature is _not_ a +// duplicate. +// +// The total weight of all collected signatures (excluding duplicates) is +// returned regardless of any returned error. +// The function errors with: +// - models.InvalidSignerError if signerID is invalid (not a consensus +// participant) +// - models.DuplicatedSignerError if the signer has been already added +// +// The function is thread-safe. +func (w *WeightedSignatureAggregator) TrustedAdd( + signerID models.Identity, + sig []byte, +) (uint64, error) { + info, found := w.idToInfo[signerID] + if !found { + return w.TotalWeight(), models.NewInvalidSignerErrorf( + "%v is not an authorized signer", + signerID, + ) + } + + // atomically update the signatures pool and the total weight + w.lock.Lock() + defer w.lock.Unlock() + + // check for repeated occurrence of signerID + if _, duplicate := w.collectedSigs[signerID]; duplicate { + return w.totalWeight, models.NewDuplicatedSignerErrorf( + "signature from %v was already added", + signerID, + ) + } + + w.collectedSigs[signerID] = sig + w.totalWeight += info.weight + + return w.totalWeight, nil +} + +// TotalWeight returns the total weight presented by the collected signatures. +// The function is thread-safe +func (w *WeightedSignatureAggregator) TotalWeight() uint64 { + w.lock.RLock() + defer w.lock.RUnlock() + return w.totalWeight +} + +// Aggregate aggregates the signatures and returns the aggregated signature. +// The function performs a final verification and errors if the aggregated +// signature is invalid. This is required for the function safety since +// `TrustedAdd` allows adding invalid signatures. The function errors with: +// - models.InsufficientSignaturesError if no signatures have been added yet +// - models.InvalidSignatureIncludedError if: +// - some signature(s), included via TrustedAdd, fail to deserialize +// (regardless of the aggregated public key) +// -- or all signatures deserialize correctly but some signature(s), +// included via TrustedAdd, are invalid (while aggregated public key is +// valid) +// -- models.InvalidAggregatedKeyError if all signatures deserialize +// correctly but the signer's proving public keys sum up to an invalid +// key (BLS identity public key). Any aggregated signature would fail the +// cryptographic verification under the identity public key and therefore +// such signature is considered invalid. Such scenario can only happen if +// proving public keys of signers were forged to add up to the identity +// public key. Under the assumption that all proving key PoPs are valid, +// this error case can only happen if all signers are malicious and +// colluding. If there is at least one honest signer, there is a +// negligible probability that the aggregated key is identity. +// +// The function is thread-safe. +func (w *WeightedSignatureAggregator) Aggregate() ( + []models.WeightedIdentity, + models.AggregatedSignature, + error, +) { + w.lock.Lock() + defer w.lock.Unlock() + + pks := [][]byte{} + signerIDs := []models.WeightedIdentity{} + sigs := [][]byte{} + for id, sig := range w.collectedSigs { + signerIDs = append(signerIDs, w.ids[w.idToInfo[id].index]) + pks = append(pks, w.idToInfo[id].pk) + sigs = append(sigs, sig) + } + if len(sigs) == 0 { + return nil, nil, models.NewInsufficientSignaturesError( + errors.New("no signatures"), + ) + } + + aggSignature, err := w.aggregator.Aggregate(pks, sigs) + if err != nil { + return nil, nil, fmt.Errorf( + "unexpected error during signature aggregation: %w", + err, + ) + } + + return signerIDs, aggSignature, nil +} diff --git a/consensus/state_machine_test.go b/consensus/state_machine_test.go index a2b1b75..2f90ff2 100644 --- a/consensus/state_machine_test.go +++ b/consensus/state_machine_test.go @@ -1,1055 +1,1055 @@ package consensus -import ( - "context" - "fmt" - "slices" - "sync" - "testing" - "time" - - "github.com/pkg/errors" -) - -// Test types for the generic state machine -type TestState struct { - Round uint64 - Hash string - Timestamp time.Time - ProposalID string -} - -func (t TestState) Identity() string { - return t.Hash -} - -func (t TestState) Rank() uint64 { - return t.Round -} - -func (t TestState) Clone() Unique { - return TestState{ - Round: t.Round, - Hash: t.Hash, - Timestamp: t.Timestamp, - ProposalID: t.ProposalID, - } -} - -type TestVote struct { - Round uint64 - VoterID string - ProposalID string - Signature string -} - -func (t TestVote) Identity() string { - return t.VoterID -} - -func (t TestVote) Rank() uint64 { - return t.Round -} - -func (t TestVote) Clone() Unique { - return TestVote{ - Round: t.Round, - VoterID: t.VoterID, - ProposalID: t.ProposalID, - Signature: t.Signature, - } -} - -type TestPeerID string - -func (t TestPeerID) Identity() string { - return string(t) -} - -func (t TestPeerID) Clone() Unique { - return t -} - -func (t TestPeerID) Rank() uint64 { - return 0 -} - -type TestCollected struct { - Round uint64 - Data []byte - Timestamp time.Time -} - -func (t TestCollected) Identity() string { - return string(t.Data) -} - -func (t TestCollected) Rank() uint64 { - return t.Round -} - -func (t TestCollected) Clone() Unique { - return TestCollected{ - Round: t.Round, - Data: slices.Clone(t.Data), - Timestamp: t.Timestamp, - } -} - -// Mock implementations -type mockSyncProvider struct { - syncDelay time.Duration - newState *TestState -} - -func (m *mockSyncProvider) Synchronize( - existing *TestState, - ctx context.Context, -) (<-chan *TestState, <-chan error) { - stateCh := make(chan *TestState, 1) - errCh := make(chan error, 1) - - go func() { - select { - case <-time.After(m.syncDelay): - if m.newState != nil { - stateCh <- m.newState - } else if existing != nil { - // Just return existing state - stateCh <- existing - } else { - // Create initial state - stateCh <- &TestState{ - Round: 0, - Hash: "genesis", - Timestamp: time.Now(), - } - } - close(stateCh) - close(errCh) - case <-ctx.Done(): - close(stateCh) - close(errCh) - } - }() - - return stateCh, errCh -} - -type mockVotingProvider struct { - mu sync.Mutex - quorumSize int - sentProposals []*TestState - sentVotes []*TestVote - confirmations []*TestState -} - -func (m *mockVotingProvider) SendProposal(proposal *TestState, ctx context.Context) error { - m.mu.Lock() - defer m.mu.Unlock() - m.sentProposals = append(m.sentProposals, proposal) - return nil -} - -func (m *mockVotingProvider) DecideAndSendVote( - proposals map[Identity]*TestState, - ctx context.Context, -) (TestPeerID, *TestVote, error) { - m.mu.Lock() - defer m.mu.Unlock() - - // Pick first proposal - for peerID, proposal := range proposals { - if proposal == nil { - continue - } - vote := &TestVote{ - VoterID: "leader1", - ProposalID: proposal.ProposalID, - Signature: "test-sig", - } - m.sentVotes = append(m.sentVotes, vote) - return TestPeerID(peerID), vote, nil - } - - return "", nil, errors.New("no proposal to vote for") -} - -func (m *mockVotingProvider) SendVote(vote *TestVote, ctx context.Context) (TestPeerID, error) { - return "", nil -} - -func (m *mockVotingProvider) IsQuorum(proposalVotes map[Identity]*TestVote, ctx context.Context) (bool, error) { - totalVotes := 0 - voteCount := map[string]int{} - for _, votes := range proposalVotes { - count, ok := voteCount[votes.ProposalID] - if !ok { - voteCount[votes.ProposalID] = 1 - } else { - voteCount[votes.ProposalID] = count + 1 - } - totalVotes += 1 - - if count >= m.quorumSize { - return true, nil - } - } - if totalVotes >= m.quorumSize { - return false, errors.New("split quorum") - } - return false, nil -} - -func (m *mockVotingProvider) FinalizeVotes( - proposals map[Identity]*TestState, - proposalVotes map[Identity]*TestVote, - ctx context.Context, -) (*TestState, TestPeerID, error) { - // Pick the proposal with the most votes - winnerCount := 0 - var winnerProposal *TestState = nil - var winnerProposer TestPeerID - voteCount := map[string]int{} - for _, votes := range proposalVotes { - count, ok := voteCount[votes.ProposalID] - if !ok { - voteCount[votes.ProposalID] = 1 - } else { - voteCount[votes.ProposalID] = count + 1 - } - } - for peerID, proposal := range proposals { - if proposal == nil { - continue - } - if _, ok := voteCount[proposal.ProposalID]; !ok { - continue - } - if voteCount[proposal.ProposalID] > winnerCount { - winnerCount = voteCount[proposal.ProposalID] - winnerProposal = proposal - winnerProposer = TestPeerID(peerID) - } - } - - if winnerProposal != nil { - // Create new state with incremented round - newState := &TestState{ - Round: winnerProposal.Round + 1, - Hash: "hash-" + fmt.Sprintf("%d", winnerProposal.Round+1), - Timestamp: time.Now(), - ProposalID: "finalized", - } - return newState, winnerProposer, nil - } - - // Default to first proposal - for peerID, proposal := range proposals { - if proposal == nil { - continue - } - newState := &TestState{ - Round: proposal.Round + 1, - Hash: "hash-" + fmt.Sprintf("%d", proposal.Round+1), - Timestamp: time.Now(), - ProposalID: "finalized", - } - return newState, TestPeerID(peerID), nil - } - - return nil, "", nil -} - -func (m *mockVotingProvider) SendConfirmation(finalized *TestState, ctx context.Context) error { - m.mu.Lock() - defer m.mu.Unlock() - m.confirmations = append(m.confirmations, finalized) - return nil -} - -type mockLeaderProvider struct { - isLeader bool - leaders []TestPeerID - proveDelay time.Duration - shouldFail bool -} - -func (m *mockLeaderProvider) GetNextLeaders(prior *TestState, ctx context.Context) ([]TestPeerID, error) { - if len(m.leaders) > 0 { - return m.leaders, nil - } - return []TestPeerID{"leader1", "leader2", "leader3"}, nil -} - -func (m *mockLeaderProvider) ProveNextState( - prior *TestState, - collected TestCollected, - ctx context.Context, -) (*TestState, error) { - if m.shouldFail || !m.isLeader { - return nil, context.Canceled - } - - select { - case <-time.After(m.proveDelay): - round := uint64(0) - if prior != nil { - round = prior.Round - } - return &TestState{ - Round: round + 1, - Hash: "proved-hash", - Timestamp: time.Now(), - ProposalID: "proposal-" + fmt.Sprintf("%d", round+1), - }, nil - case <-ctx.Done(): - return nil, ctx.Err() - } -} - -type mockLivenessProvider struct { - collectDelay time.Duration - sentLiveness int - mu sync.Mutex -} - -func (m *mockLivenessProvider) Collect(ctx context.Context) (TestCollected, error) { - select { - case <-time.After(m.collectDelay): - return TestCollected{ - Round: 1, - Data: []byte("collected-data"), - Timestamp: time.Now(), - }, nil - case <-ctx.Done(): - return TestCollected{}, ctx.Err() - } -} - -func (m *mockLivenessProvider) SendLiveness(prior *TestState, collected TestCollected, ctx context.Context) error { - m.mu.Lock() - defer m.mu.Unlock() - m.sentLiveness++ - return nil -} - -// MockTransitionListener for tracking state transitions -type MockTransitionListener struct { - mu sync.Mutex - transitions []TransitionRecord -} - -type TransitionRecord struct { - From State - To State - Event Event - Time time.Time -} - -func (m *MockTransitionListener) OnTransition(from State, to State, event Event) { - m.mu.Lock() - defer m.mu.Unlock() - m.transitions = append(m.transitions, TransitionRecord{ - From: from, - To: to, - Event: event, - Time: time.Now(), - }) -} - -func (m *MockTransitionListener) GetTransitions() []TransitionRecord { - m.mu.Lock() - defer m.mu.Unlock() - result := make([]TransitionRecord, len(m.transitions)) - copy(result, m.transitions) - return result -} - -// Helper to create test state machine -func createTestStateMachine( - id TestPeerID, - isLeader bool, -) *StateMachine[TestState, TestVote, TestPeerID, TestCollected] { - leaders := []TestPeerID{"leader1", "leader2", "leader3"} - if isLeader { - leaders[0] = id - } - - // For leader-only tests, set minimumProvers to 1 - minimumProvers := func() uint64 { return uint64(2) } - if isLeader { - minimumProvers = func() uint64 { return uint64(1) } - } - - return NewStateMachine( - id, - &TestState{Round: 0, Hash: "genesis", Timestamp: time.Now()}, - true, // shouldEmitReceiveEventsOnSends - minimumProvers, - &mockSyncProvider{syncDelay: 10 * time.Millisecond}, - &mockVotingProvider{quorumSize: int(minimumProvers())}, - &mockLeaderProvider{ - isLeader: isLeader, - leaders: leaders, - proveDelay: 50 * time.Millisecond, - }, - &mockLivenessProvider{collectDelay: 10 * time.Millisecond}, - nil, - ) -} - -// Helper to wait for a specific state in transition history -func waitForTransition(listener *MockTransitionListener, targetState State, timeout time.Duration) bool { - deadline := time.Now().Add(timeout) - for time.Now().Before(deadline) { - transitions := listener.GetTransitions() - for _, tr := range transitions { - if tr.To == targetState { - return true - } - } - time.Sleep(10 * time.Millisecond) - } - return false -} - -// Helper to check if a state was reached in transition history -func hasReachedState(listener *MockTransitionListener, targetState State) bool { - transitions := listener.GetTransitions() - for _, tr := range transitions { - if tr.To == targetState { - return true - } - } - return false -} - -func TestStateMachineBasicTransitions(t *testing.T) { - sm := createTestStateMachine("test-node", true) - defer sm.Close() - - // Initial state should be stopped - if sm.GetState() != StateStopped { - t.Errorf("Expected initial state to be %s, got %s", StateStopped, sm.GetState()) - } - - listener := &MockTransitionListener{} - sm.AddListener(listener) - - // Start the state machine - err := sm.Start() - if err != nil { - t.Fatalf("Failed to start state machine: %v", err) - } - - time.Sleep(10 * time.Millisecond) - - // Should transition to starting immediately - if sm.GetState() != StateStarting { - t.Errorf("Expected state to be %s after start, got %s", StateStarting, sm.GetState()) - } - - // Wait for automatic transitions - if !waitForTransition(listener, StateLoading, 2*time.Second) { - t.Fatalf("Failed to reach loading state") - } - - if !waitForTransition(listener, StateCollecting, 3*time.Second) { - t.Fatalf("Failed to reach collecting state") - } - - // Verify the expected transition sequence - transitions := listener.GetTransitions() - expectedSequence := []State{StateStarting, StateLoading, StateCollecting} - - for i, expected := range expectedSequence { - if i >= len(transitions) { - t.Errorf("Missing transition to %s", expected) - continue - } - if transitions[i].To != expected { - t.Errorf("Expected transition %d to be to %s, got %s", i, expected, transitions[i].To) - } - } -} - -func TestStateMachineLeaderFlow(t *testing.T) { - sm := createTestStateMachine("leader1", true) - defer sm.Close() - - listener := &MockTransitionListener{} - sm.AddListener(listener) - - // Start the machine - err := sm.Start() - if err != nil { - t.Fatalf("Failed to start: %v", err) - } - - // Wait for the leader to progress through states - if !waitForTransition(listener, StateCollecting, 3*time.Second) { - t.Fatalf("Failed to reach collecting state") - } - - // Leader should reach proving state - if !waitForTransition(listener, StateProving, 5*time.Second) { - // Debug output if test fails - transitions := listener.GetTransitions() - t.Logf("Current state: %s", sm.GetState()) - t.Logf("Total transitions: %d", len(transitions)) - for _, tr := range transitions { - t.Logf("Transition: %s -> %s [%s]", tr.From, tr.To, tr.Event) - } - t.Fatalf("Leader should have entered proving state") - } - - // Verify expected states were reached - if !hasReachedState(listener, StateCollecting) { - t.Error("Leader should have gone through collecting state") - } - if !hasReachedState(listener, StateLivenessCheck) { - t.Error("Leader should have gone through liveness check state") - } - if !hasReachedState(listener, StateProving) { - t.Error("Leader should have entered proving state") - } -} - -func TestStateMachineExternalEvents(t *testing.T) { - sm := createTestStateMachine("leader1", true) - defer sm.Close() - - listener := &MockTransitionListener{} - sm.AddListener(listener) - - sm.Start() - - // Wait for collecting state - if !waitForTransition(listener, StateCollecting, 3*time.Second) { - t.Fatalf("Failed to reach collecting state") - } - - // Send liveness check - sm.ReceiveLivenessCheck("leader2", TestCollected{Round: 1, Data: []byte("foo"), Timestamp: time.Now()}) - - // Receive a proposal while collecting - err := sm.ReceiveProposal("external-leader", &TestState{ - Round: 1, - Hash: "external-hash", - Timestamp: time.Now(), - ProposalID: "external-proposal", - }) - if err != nil { - t.Fatalf("Failed to receive proposal: %v", err) - } - - // Should transition to voting - if !waitForTransition(listener, StateVoting, 4*time.Second) { - t.Errorf("Expected to transition to voting after proposal") - } - - // Verify the transition happened - if !hasReachedState(listener, StateVoting) { - transitions := listener.GetTransitions() - t.Logf("Total transitions: %d", len(transitions)) - for _, tr := range transitions { - t.Logf("Transition: %s -> %s [%s]", tr.From, tr.To, tr.Event) - } - t.Error("Should have transitioned to voting state") - } -} - -func TestStateMachineVoting(t *testing.T) { - sm := createTestStateMachine("leader1", true) - defer sm.Close() - - listener := &MockTransitionListener{} - sm.AddListener(listener) - - sm.Start() - - // Wait for collecting state - if !waitForTransition(listener, StateCollecting, 3*time.Second) { - t.Fatalf("Failed to reach collecting state") - } - - // Send liveness check - sm.ReceiveLivenessCheck("leader2", TestCollected{Round: 1, Data: []byte("foo"), Timestamp: time.Now()}) - - // Send proposal to trigger voting - sm.ReceiveProposal("leader2", &TestState{ - Round: 1, - Hash: "test-hash", - Timestamp: time.Now(), - ProposalID: "test-proposal", - }) - - // Wait for voting state - if !waitForTransition(listener, StateVoting, 2*time.Second) { - t.Fatalf("Failed to reach voting state") - } - - // Add another vote to reach quorum - err := sm.ReceiveVote("leader1", "leader2", &TestVote{ - Round: 1, - VoterID: "leader2", - ProposalID: "test-proposal", - Signature: "sig2", - }) - if err != nil { - t.Fatalf("Failed to receive vote: %v", err) - } - - // Should eventually progress past voting (to finalizing, verifying, or back to collecting) - time.Sleep(2 * time.Second) - - // Check if we progressed past voting - progressedPastVoting := hasReachedState(listener, StateFinalizing) || - hasReachedState(listener, StateVerifying) || - (hasReachedState(listener, StateCollecting) && len(listener.GetTransitions()) > 5) - - if !progressedPastVoting { - // If still stuck, try manual trigger - sm.SendEvent(EventQuorumReached) - time.Sleep(500 * time.Millisecond) - - progressedPastVoting = hasReachedState(listener, StateFinalizing) || - hasReachedState(listener, StateVerifying) || - (hasReachedState(listener, StateCollecting) && len(listener.GetTransitions()) > 5) - } - - if !progressedPastVoting { - transitions := listener.GetTransitions() - t.Logf("Total transitions: %d", len(transitions)) - for _, tr := range transitions { - t.Logf("Transition: %s -> %s [%s]", tr.From, tr.To, tr.Event) - } - t.Errorf("Expected to progress past voting with quorum") - } -} - -func TestStateMachineStop(t *testing.T) { - sm := createTestStateMachine("leader1", true) - defer sm.Close() - - listener := &MockTransitionListener{} - sm.AddListener(listener) - - sm.Start() - - // Wait for any state beyond starting - if !waitForTransition(listener, StateLoading, 2*time.Second) { - t.Fatalf("State machine did not progress from starting") - } - - // Stop from any state - err := sm.Stop() - if err != nil { - t.Fatalf("Failed to stop: %v", err) - } - - // Should transition to stopping - if !waitForTransition(listener, StateStopping, 1*time.Second) { - t.Errorf("Expected to transition to stopping state") - } - - // Should eventually reach stopped - if !waitForTransition(listener, StateStopped, 3*time.Second) { - // Try manual cleanup complete - sm.SendEvent(EventCleanupComplete) - time.Sleep(100 * time.Millisecond) - } - - // Verify we reached stopped state - if !hasReachedState(listener, StateStopped) { - transitions := listener.GetTransitions() - t.Logf("Total transitions: %d", len(transitions)) - for _, tr := range transitions { - t.Logf("Transition: %s -> %s [%s]", tr.From, tr.To, tr.Event) - } - t.Errorf("Expected to reach stopped state") - } -} - -func TestStateMachineLiveness(t *testing.T) { - sm := createTestStateMachine("leader1", true) - defer sm.Close() - - listener := &MockTransitionListener{} - sm.AddListener(listener) - - sm.Start() - - // Wait for collecting state - if !waitForTransition(listener, StateCollecting, 3*time.Second) { - t.Fatalf("Failed to reach collecting state") - } - - // Wait for liveness check state - if !waitForTransition(listener, StateLivenessCheck, 3*time.Second) { - transitions := listener.GetTransitions() - t.Logf("Total transitions: %d", len(transitions)) - for _, tr := range transitions { - t.Logf("Transition: %s -> %s [%s]", tr.From, tr.To, tr.Event) - } - t.Fatalf("Failed to reach liveness check state") - } - - // Receive liveness checks - sm.ReceiveLivenessCheck("peer1", TestCollected{ - Data: []byte("peer1-data"), - Timestamp: time.Now(), - }) - - sm.ReceiveLivenessCheck("peer2", TestCollected{ - Data: []byte("peer2-data"), - Timestamp: time.Now(), - }) - - // Give it a moment to process - time.Sleep(100 * time.Millisecond) - - // Check that liveness data was stored - sm.mu.RLock() - livenessCount := len(sm.liveness) - sm.mu.RUnlock() - - // Should have at least 2 entries (or 3 if self-emit is counted) - if livenessCount < 2 { - t.Errorf("Expected at least 2 liveness entries, got %d", livenessCount) - } -} - -func TestStateMachineMetrics(t *testing.T) { - sm := createTestStateMachine("leader1", true) - defer sm.Close() - - // Initial metrics - if sm.GetTransitionCount() != 0 { - t.Error("Expected initial transition count to be 0") - } - - listener := &MockTransitionListener{} - sm.AddListener(listener) - - // Make transitions - sm.Start() - - // Wait for a few transitions - if !waitForTransition(listener, StateCollecting, 3*time.Second) { - t.Fatalf("Failed to reach collecting state") - } - - if sm.GetTransitionCount() == 0 { - t.Error("Expected transition count to be greater than 0") - } - - // Check state time - stateTime := sm.GetStateTime() - if stateTime < 0 { - t.Errorf("Invalid state time: %v", stateTime) - } -} - -func TestStateMachineConfirmations(t *testing.T) { - sm := createTestStateMachine("leader1", true) - sm.id = "leader1" - defer sm.Close() - - listener := &MockTransitionListener{} - sm.AddListener(listener) - - sm.Start() - - // Progress to voting state via proposal - if !waitForTransition(listener, StateCollecting, 3*time.Second) { - t.Fatalf("Failed to reach collecting state") - } - - // Send liveness check - sm.ReceiveLivenessCheck("leader2", TestCollected{Round: 1, Data: []byte("foo"), Timestamp: time.Now()}) - - // Send proposal to get to voting - sm.ReceiveProposal("leader2", &TestState{ - Round: 1, - Hash: "test-hash", - Timestamp: time.Now(), - ProposalID: "test-proposal", - }) - - // Wait for voting - if !waitForTransition(listener, StateVoting, 2*time.Second) { - t.Fatalf("Failed to reach voting state") - } - - // Wait a bit for auto-progression or trigger manually - time.Sleep(1 * time.Second) - - // Try to progress to finalizing - sm.SendEvent(EventVotingTimeout) - time.Sleep(500 * time.Millisecond) - - // Check if we reached a state that accepts confirmations - currentState := sm.GetState() - canAcceptConfirmation := currentState == StateFinalizing || currentState == StateVerifying - - if !canAcceptConfirmation { - // Check transition history - if hasReachedState(listener, StateFinalizing) || hasReachedState(listener, StateVerifying) { - // We passed through the state already, that's ok - canAcceptConfirmation = true - } else { - transitions := listener.GetTransitions() - t.Logf("Current state: %s", currentState) - t.Logf("Total transitions: %d", len(transitions)) - for _, tr := range transitions { - t.Logf("Transition: %s -> %s [%s]", tr.From, tr.To, tr.Event) - } - // Don't fail - just skip the confirmation test - t.Skip("Could not reach a state that accepts confirmations") - } - } - - // Send confirmation (should only be accepted in finalizing or verifying) - sm.ReceiveConfirmation("leader2", &TestState{ - Round: 1, - Hash: "confirmed-hash", - Timestamp: time.Now(), - ProposalID: "confirmed", - }) - - // Check that confirmation was stored - sm.mu.RLock() - confirmCount := len(sm.confirmations) - sm.mu.RUnlock() - - if confirmCount != 1 { - t.Errorf("Expected 1 confirmation, got %d", confirmCount) - } -} - -func TestStateMachineConcurrency(t *testing.T) { - sm := createTestStateMachine("leader1", true) - defer sm.Close() - - sm.Start() - time.Sleep(500 * time.Millisecond) - - // Concurrent operations - var wg sync.WaitGroup - errChan := make(chan error, 5) - - // Send multiple events concurrently - for i := 0; i < 5; i++ { - wg.Add(1) - go func() { - defer wg.Done() - sm.SendEvent(EventSyncComplete) - }() - } - - // Receive data concurrently - for i := 0; i < 5; i++ { - wg.Add(1) - go func(id int) { - defer wg.Done() - peerID := TestPeerID(fmt.Sprintf("peer%d", id)) - if err := sm.ReceiveLivenessCheck(peerID, TestCollected{ - Data: []byte("data"), - }); err != nil { - errChan <- err - } - }(i) - } - - wg.Wait() - close(errChan) - - // Some errors are expected due to invalid state transitions - errorCount := 0 - for err := range errChan { - if err != nil { - errorCount++ - } - } - - // As long as we didn't panic, concurrency is handled - t.Logf("Concurrent operations completed with %d errors (expected)", errorCount) -} - -type mockPanickingVotingProvider struct { - mu sync.Mutex - quorumSize int - sentProposals []*TestState - sentVotes []*TestVote - confirmations []*TestState -} - -func (m *mockPanickingVotingProvider) SendProposal(proposal *TestState, ctx context.Context) error { - m.mu.Lock() - defer m.mu.Unlock() - m.sentProposals = append(m.sentProposals, proposal) - return nil -} - -func (m *mockPanickingVotingProvider) DecideAndSendVote( - proposals map[Identity]*TestState, - ctx context.Context, -) (TestPeerID, *TestVote, error) { - m.mu.Lock() - defer m.mu.Unlock() - - // Pick first proposal - for peerID, proposal := range proposals { - if proposal == nil { - continue - } - vote := &TestVote{ - VoterID: "leader1", - ProposalID: proposal.ProposalID, - Signature: "test-sig", - } - m.sentVotes = append(m.sentVotes, vote) - return TestPeerID(peerID), vote, nil - } - - return "", nil, errors.New("no proposal to vote for") -} - -func (m *mockPanickingVotingProvider) IsQuorum(proposalVotes map[Identity]*TestVote, ctx context.Context) (bool, error) { - totalVotes := 0 - voteCount := map[string]int{} - for _, votes := range proposalVotes { - count, ok := voteCount[votes.ProposalID] - if !ok { - voteCount[votes.ProposalID] = 1 - count = 1 - } else { - voteCount[votes.ProposalID] = count + 1 - count = count + 1 - } - totalVotes += 1 - - if count >= m.quorumSize { - return true, nil - } - } - if totalVotes >= m.quorumSize { - return false, errors.New("split quorum") - } - return false, nil -} - -func (m *mockPanickingVotingProvider) FinalizeVotes( - proposals map[Identity]*TestState, - proposalVotes map[Identity]*TestVote, - ctx context.Context, -) (*TestState, TestPeerID, error) { - // Pick the proposal with the most votes - winnerCount := 0 - var winnerProposal *TestState = nil - var winnerProposer TestPeerID - voteCount := map[string]int{} - for _, votes := range proposalVotes { - count, ok := voteCount[votes.ProposalID] - if !ok { - voteCount[votes.ProposalID] = 1 - count = 1 - } else { - voteCount[votes.ProposalID] = count + 1 - count += 1 - } - } - for peerID, proposal := range proposals { - if proposal == nil { - continue - } - if _, ok := voteCount[proposal.ProposalID]; !ok { - continue - } - if voteCount[proposal.ProposalID] > winnerCount { - winnerCount = voteCount[proposal.ProposalID] - winnerProposal = proposal - winnerProposer = TestPeerID(peerID) - } - } - - if winnerProposal != nil { - // Create new state with incremented round - newState := &TestState{ - Round: winnerProposal.Round + 1, - Hash: "hash-" + fmt.Sprintf("%d", winnerProposal.Round+1), - Timestamp: time.Now(), - ProposalID: "finalized", - } - return newState, winnerProposer, nil - } - - // Default to first proposal - for peerID, proposal := range proposals { - if proposal == nil { - continue - } - newState := &TestState{ - Round: proposal.Round + 1, - Hash: "hash-" + fmt.Sprintf("%d", proposal.Round+1), - Timestamp: time.Now(), - ProposalID: "finalized", - } - return newState, TestPeerID(peerID), nil - } - - return nil, "", nil -} - -func (m *mockPanickingVotingProvider) SendVote(vote *TestVote, ctx context.Context) (TestPeerID, error) { - return "", nil -} - -func (m *mockPanickingVotingProvider) SendConfirmation(finalized *TestState, ctx context.Context) error { - panic("PANIC HERE") -} - -type printtracer struct{} - -// Error implements TraceLogger. -func (p *printtracer) Error(message string, err error) { - fmt.Println("[error]", message, err) -} - -// Trace implements TraceLogger. -func (p *printtracer) Trace(message string) { - fmt.Println("[trace]", message) -} - -func TestStateMachinePanicRecovery(t *testing.T) { - minimumProvers := func() uint64 { return uint64(1) } - - sm := NewStateMachine( - "leader1", - &TestState{Round: 0, Hash: "genesis", Timestamp: time.Now()}, - true, // shouldEmitReceiveEventsOnSends - minimumProvers, - &mockSyncProvider{syncDelay: 10 * time.Millisecond}, - &mockPanickingVotingProvider{quorumSize: 1}, - &mockLeaderProvider{ - isLeader: true, - leaders: []TestPeerID{"leader1"}, - proveDelay: 50 * time.Millisecond, - }, - &mockLivenessProvider{collectDelay: 10 * time.Millisecond}, - &printtracer{}, - ) - defer sm.Close() - - sm.Start() - time.Sleep(10 * time.Second) - sm.mu.Lock() - if sm.machineState != StateStopped { - sm.mu.Unlock() - t.FailNow() - } - sm.mu.Unlock() - -} +// import ( +// "context" +// "fmt" +// "slices" +// "sync" +// "testing" +// "time" + +// "github.com/pkg/errors" +// ) + +// // Test types for the generic state machine +// type TestState struct { +// Round uint64 +// Hash string +// Timestamp time.Time +// ProposalID string +// } + +// func (t TestState) Identity() string { +// return t.Hash +// } + +// func (t TestState) GetRank() uint64 { +// return t.Round +// } + +// func (t TestState) Clone() Unique { +// return TestState{ +// Round: t.Round, +// Hash: t.Hash, +// Timestamp: t.Timestamp, +// ProposalID: t.ProposalID, +// } +// } + +// type TestVote struct { +// Round uint64 +// VoterID string +// ProposalID string +// Signature string +// } + +// func (t TestVote) Identity() string { +// return t.VoterID +// } + +// func (t TestVote) GetRank() uint64 { +// return t.Round +// } + +// func (t TestVote) Clone() Unique { +// return TestVote{ +// Round: t.Round, +// VoterID: t.VoterID, +// ProposalID: t.ProposalID, +// Signature: t.Signature, +// } +// } + +// type TestPeerID string + +// func (t TestPeerID) Identity() string { +// return string(t) +// } + +// func (t TestPeerID) Clone() Unique { +// return t +// } + +// func (t TestPeerID) GetRank() uint64 { +// return 0 +// } + +// type TestCollected struct { +// Round uint64 +// Data []byte +// Timestamp time.Time +// } + +// func (t TestCollected) Identity() string { +// return string(t.Data) +// } + +// func (t TestCollected) GetRank() uint64 { +// return t.Round +// } + +// func (t TestCollected) Clone() Unique { +// return TestCollected{ +// Round: t.Round, +// Data: slices.Clone(t.Data), +// Timestamp: t.Timestamp, +// } +// } + +// // Mock implementations +// type mockSyncProvider struct { +// syncDelay time.Duration +// newState *TestState +// } + +// func (m *mockSyncProvider) Synchronize( +// ctx context.Context, +// existing *TestState, +// ) (<-chan *TestState, <-chan error) { +// stateCh := make(chan *TestState, 1) +// errCh := make(chan error, 1) + +// go func() { +// select { +// case <-time.After(m.syncDelay): +// if m.newState != nil { +// stateCh <- m.newState +// } else if existing != nil { +// // Just return existing state +// stateCh <- existing +// } else { +// // Create initial state +// stateCh <- &TestState{ +// Round: 0, +// Hash: "genesis", +// Timestamp: time.Now(), +// } +// } +// close(stateCh) +// close(errCh) +// case <-ctx.Done(): +// close(stateCh) +// close(errCh) +// } +// }() + +// return stateCh, errCh +// } + +// type mockVotingProvider struct { +// mu sync.Mutex +// quorumSize int +// sentProposals []*TestState +// sentVotes []*TestVote +// confirmations []*TestState +// } + +// func (m *mockVotingProvider) SendProposal(ctx context.Context, proposal *TestState) error { +// m.mu.Lock() +// defer m.mu.Unlock() +// m.sentProposals = append(m.sentProposals, proposal) +// return nil +// } + +// func (m *mockVotingProvider) DecideAndSendVote( +// ctx context.Context, +// proposals map[Identity]*TestState, +// ) (TestPeerID, *TestVote, error) { +// m.mu.Lock() +// defer m.mu.Unlock() + +// // Pick first proposal +// for peerID, proposal := range proposals { +// if proposal == nil { +// continue +// } +// vote := &TestVote{ +// VoterID: "leader1", +// ProposalID: proposal.ProposalID, +// Signature: "test-sig", +// } +// m.sentVotes = append(m.sentVotes, vote) +// return TestPeerID(peerID), vote, nil +// } + +// return "", nil, errors.New("no proposal to vote for") +// } + +// func (m *mockVotingProvider) SendVote(ctx context.Context, vote *TestVote) (TestPeerID, error) { +// return "", nil +// } + +// func (m *mockVotingProvider) IsQuorum(ctx context.Context, proposalVotes map[Identity]*TestVote) (bool, error) { +// totalVotes := 0 +// voteCount := map[string]int{} +// for _, votes := range proposalVotes { +// count, ok := voteCount[votes.ProposalID] +// if !ok { +// voteCount[votes.ProposalID] = 1 +// } else { +// voteCount[votes.ProposalID] = count + 1 +// } +// totalVotes += 1 + +// if count >= m.quorumSize { +// return true, nil +// } +// } +// if totalVotes >= m.quorumSize { +// return false, errors.New("split quorum") +// } +// return false, nil +// } + +// func (m *mockVotingProvider) FinalizeVotes( +// ctx context.Context, +// proposals map[Identity]*TestState, +// proposalVotes map[Identity]*TestVote, +// ) (*TestState, TestPeerID, error) { +// // Pick the proposal with the most votes +// winnerCount := 0 +// var winnerProposal *TestState = nil +// var winnerProposer TestPeerID +// voteCount := map[string]int{} +// for _, votes := range proposalVotes { +// count, ok := voteCount[votes.ProposalID] +// if !ok { +// voteCount[votes.ProposalID] = 1 +// } else { +// voteCount[votes.ProposalID] = count + 1 +// } +// } +// for peerID, proposal := range proposals { +// if proposal == nil { +// continue +// } +// if _, ok := voteCount[proposal.ProposalID]; !ok { +// continue +// } +// if voteCount[proposal.ProposalID] > winnerCount { +// winnerCount = voteCount[proposal.ProposalID] +// winnerProposal = proposal +// winnerProposer = TestPeerID(peerID) +// } +// } + +// if winnerProposal != nil { +// // Create new state with incremented round +// newState := &TestState{ +// Round: winnerProposal.Round + 1, +// Hash: "hash-" + fmt.Sprintf("%d", winnerProposal.Round+1), +// Timestamp: time.Now(), +// ProposalID: "finalized", +// } +// return newState, winnerProposer, nil +// } + +// // Default to first proposal +// for peerID, proposal := range proposals { +// if proposal == nil { +// continue +// } +// newState := &TestState{ +// Round: proposal.Round + 1, +// Hash: "hash-" + fmt.Sprintf("%d", proposal.Round+1), +// Timestamp: time.Now(), +// ProposalID: "finalized", +// } +// return newState, TestPeerID(peerID), nil +// } + +// return nil, "", nil +// } + +// func (m *mockVotingProvider) SendConfirmation(ctx context.Context, finalized *TestState) error { +// m.mu.Lock() +// defer m.mu.Unlock() +// m.confirmations = append(m.confirmations, finalized) +// return nil +// } + +// type mockLeaderProvider struct { +// isLeader bool +// leaders []TestPeerID +// proveDelay time.Duration +// shouldFail bool +// } + +// func (m *mockLeaderProvider) GetNextLeaders(ctx context.Context, prior *TestState) ([]TestPeerID, error) { +// if len(m.leaders) > 0 { +// return m.leaders, nil +// } +// return []TestPeerID{"leader1", "leader2", "leader3"}, nil +// } + +// func (m *mockLeaderProvider) ProveNextState( +// ctx context.Context, +// prior *TestState, +// collected TestCollected, +// ) (*TestState, error) { +// if m.shouldFail || !m.isLeader { +// return nil, context.Canceled +// } + +// select { +// case <-time.After(m.proveDelay): +// round := uint64(0) +// if prior != nil { +// round = prior.Round +// } +// return &TestState{ +// Round: round + 1, +// Hash: "proved-hash", +// Timestamp: time.Now(), +// ProposalID: "proposal-" + fmt.Sprintf("%d", round+1), +// }, nil +// case <-ctx.Done(): +// return nil, ctx.Err() +// } +// } + +// type mockLivenessProvider struct { +// collectDelay time.Duration +// sentLiveness int +// mu sync.Mutex +// } + +// func (m *mockLivenessProvider) Collect(ctx context.Context) (TestCollected, error) { +// select { +// case <-time.After(m.collectDelay): +// return TestCollected{ +// Round: 1, +// Data: []byte("collected-data"), +// Timestamp: time.Now(), +// }, nil +// case <-ctx.Done(): +// return TestCollected{}, ctx.Err() +// } +// } + +// func (m *mockLivenessProvider) SendLiveness(ctx context.Context, prior *TestState, collected TestCollected) error { +// m.mu.Lock() +// defer m.mu.Unlock() +// m.sentLiveness++ +// return nil +// } + +// // MockTransitionListener for tracking state transitions +// type MockTransitionListener struct { +// mu sync.Mutex +// transitions []TransitionRecord +// } + +// type TransitionRecord struct { +// From State +// To State +// Event Event +// Time time.Time +// } + +// func (m *MockTransitionListener) OnTransition(from State, to State, event Event) { +// m.mu.Lock() +// defer m.mu.Unlock() +// m.transitions = append(m.transitions, TransitionRecord{ +// From: from, +// To: to, +// Event: event, +// Time: time.Now(), +// }) +// } + +// func (m *MockTransitionListener) GetTransitions() []TransitionRecord { +// m.mu.Lock() +// defer m.mu.Unlock() +// result := make([]TransitionRecord, len(m.transitions)) +// copy(result, m.transitions) +// return result +// } + +// // Helper to create test state machine +// func createTestStateMachine( +// id TestPeerID, +// isLeader bool, +// ) *StateMachine[TestState, TestVote, TestPeerID, TestCollected] { +// leaders := []TestPeerID{"leader1", "leader2", "leader3"} +// if isLeader { +// leaders[0] = id +// } + +// // For leader-only tests, set minimumProvers to 1 +// minimumProvers := func() uint64 { return uint64(2) } +// if isLeader { +// minimumProvers = func() uint64 { return uint64(1) } +// } + +// return NewStateMachine( +// id, +// &TestState{Round: 0, Hash: "genesis", Timestamp: time.Now()}, +// true, // shouldEmitReceiveEventsOnSends +// minimumProvers, +// &mockSyncProvider{syncDelay: 10 * time.Millisecond}, +// &mockVotingProvider{quorumSize: int(minimumProvers())}, +// &mockLeaderProvider{ +// isLeader: isLeader, +// leaders: leaders, +// proveDelay: 50 * time.Millisecond, +// }, +// &mockLivenessProvider{collectDelay: 10 * time.Millisecond}, +// nil, +// ) +// } + +// // Helper to wait for a specific state in transition history +// func waitForTransition(listener *MockTransitionListener, targetState State, timeout time.Duration) bool { +// deadline := time.Now().Add(timeout) +// for time.Now().Before(deadline) { +// transitions := listener.GetTransitions() +// for _, tr := range transitions { +// if tr.To == targetState { +// return true +// } +// } +// time.Sleep(10 * time.Millisecond) +// } +// return false +// } + +// // Helper to check if a state was reached in transition history +// func hasReachedState(listener *MockTransitionListener, targetState State) bool { +// transitions := listener.GetTransitions() +// for _, tr := range transitions { +// if tr.To == targetState { +// return true +// } +// } +// return false +// } + +// func TestStateMachineBasicTransitions(t *testing.T) { +// sm := createTestStateMachine("test-node", true) +// defer sm.Close() + +// // Initial state should be stopped +// if sm.GetState() != StateStopped { +// t.Errorf("Expected initial state to be %s, got %s", StateStopped, sm.GetState()) +// } + +// listener := &MockTransitionListener{} +// sm.AddListener(listener) + +// // Start the state machine +// err := sm.Start() +// if err != nil { +// t.Fatalf("Failed to start state machine: %v", err) +// } + +// time.Sleep(10 * time.Millisecond) + +// // Should transition to starting immediately +// if sm.GetState() != StateStarting { +// t.Errorf("Expected state to be %s after start, got %s", StateStarting, sm.GetState()) +// } + +// // Wait for automatic transitions +// if !waitForTransition(listener, StateLoading, 2*time.Second) { +// t.Fatalf("Failed to reach loading state") +// } + +// if !waitForTransition(listener, StateCollecting, 3*time.Second) { +// t.Fatalf("Failed to reach collecting state") +// } + +// // Verify the expected transition sequence +// transitions := listener.GetTransitions() +// expectedSequence := []State{StateStarting, StateLoading, StateCollecting} + +// for i, expected := range expectedSequence { +// if i >= len(transitions) { +// t.Errorf("Missing transition to %s", expected) +// continue +// } +// if transitions[i].To != expected { +// t.Errorf("Expected transition %d to be to %s, got %s", i, expected, transitions[i].To) +// } +// } +// } + +// func TestStateMachineLeaderFlow(t *testing.T) { +// sm := createTestStateMachine("leader1", true) +// defer sm.Close() + +// listener := &MockTransitionListener{} +// sm.AddListener(listener) + +// // Start the machine +// err := sm.Start() +// if err != nil { +// t.Fatalf("Failed to start: %v", err) +// } + +// // Wait for the leader to progress through states +// if !waitForTransition(listener, StateCollecting, 3*time.Second) { +// t.Fatalf("Failed to reach collecting state") +// } + +// // Leader should reach proving state +// if !waitForTransition(listener, StateProving, 5*time.Second) { +// // Debug output if test fails +// transitions := listener.GetTransitions() +// t.Logf("Current state: %s", sm.GetState()) +// t.Logf("Total transitions: %d", len(transitions)) +// for _, tr := range transitions { +// t.Logf("Transition: %s -> %s [%s]", tr.From, tr.To, tr.Event) +// } +// t.Fatalf("Leader should have entered proving state") +// } + +// // Verify expected states were reached +// if !hasReachedState(listener, StateCollecting) { +// t.Error("Leader should have gone through collecting state") +// } +// if !hasReachedState(listener, StateLivenessCheck) { +// t.Error("Leader should have gone through liveness check state") +// } +// if !hasReachedState(listener, StateProving) { +// t.Error("Leader should have entered proving state") +// } +// } + +// func TestStateMachineExternalEvents(t *testing.T) { +// sm := createTestStateMachine("leader1", true) +// defer sm.Close() + +// listener := &MockTransitionListener{} +// sm.AddListener(listener) + +// sm.Start() + +// // Wait for collecting state +// if !waitForTransition(listener, StateCollecting, 3*time.Second) { +// t.Fatalf("Failed to reach collecting state") +// } + +// // Send liveness check +// sm.ReceiveLivenessCheck("leader2", TestCollected{Round: 1, Data: []byte("foo"), Timestamp: time.Now()}) + +// // Receive a proposal while collecting +// err := sm.ReceiveProposal(1, "external-leader", &TestState{ +// Round: 1, +// Hash: "external-hash", +// Timestamp: time.Now(), +// ProposalID: "external-proposal", +// }) +// if err != nil { +// t.Fatalf("Failed to receive proposal: %v", err) +// } + +// // Should transition to voting +// if !waitForTransition(listener, StateVoting, 4*time.Second) { +// t.Errorf("Expected to transition to voting after proposal") +// } + +// // Verify the transition happened +// if !hasReachedState(listener, StateVoting) { +// transitions := listener.GetTransitions() +// t.Logf("Total transitions: %d", len(transitions)) +// for _, tr := range transitions { +// t.Logf("Transition: %s -> %s [%s]", tr.From, tr.To, tr.Event) +// } +// t.Error("Should have transitioned to voting state") +// } +// } + +// func TestStateMachineVoting(t *testing.T) { +// sm := createTestStateMachine("leader1", true) +// defer sm.Close() + +// listener := &MockTransitionListener{} +// sm.AddListener(listener) + +// sm.Start() + +// // Wait for collecting state +// if !waitForTransition(listener, StateCollecting, 3*time.Second) { +// t.Fatalf("Failed to reach collecting state") +// } + +// // Send liveness check +// sm.ReceiveLivenessCheck("leader2", TestCollected{Round: 1, Data: []byte("foo"), Timestamp: time.Now()}) + +// // Send proposal to trigger voting +// sm.ReceiveProposal(1, "leader2", &TestState{ +// Round: 1, +// Hash: "test-hash", +// Timestamp: time.Now(), +// ProposalID: "test-proposal", +// }) + +// // Wait for voting state +// if !waitForTransition(listener, StateVoting, 2*time.Second) { +// t.Fatalf("Failed to reach voting state") +// } + +// // Add another vote to reach quorum +// err := sm.ReceiveVote("leader1", "leader2", &TestVote{ +// Round: 1, +// VoterID: "leader2", +// ProposalID: "test-proposal", +// Signature: "sig2", +// }) +// if err != nil { +// t.Fatalf("Failed to receive vote: %v", err) +// } + +// // Should eventually progress past voting (to finalizing, verifying, or back to collecting) +// time.Sleep(2 * time.Second) + +// // Check if we progressed past voting +// progressedPastVoting := hasReachedState(listener, StateFinalizing) || +// hasReachedState(listener, StateVerifying) || +// (hasReachedState(listener, StateCollecting) && len(listener.GetTransitions()) > 5) + +// if !progressedPastVoting { +// // If still stuck, try manual trigger +// sm.SendEvent(EventQuorumReached) +// time.Sleep(500 * time.Millisecond) + +// progressedPastVoting = hasReachedState(listener, StateFinalizing) || +// hasReachedState(listener, StateVerifying) || +// (hasReachedState(listener, StateCollecting) && len(listener.GetTransitions()) > 5) +// } + +// if !progressedPastVoting { +// transitions := listener.GetTransitions() +// t.Logf("Total transitions: %d", len(transitions)) +// for _, tr := range transitions { +// t.Logf("Transition: %s -> %s [%s]", tr.From, tr.To, tr.Event) +// } +// t.Errorf("Expected to progress past voting with quorum") +// } +// } + +// func TestStateMachineStop(t *testing.T) { +// sm := createTestStateMachine("leader1", true) +// defer sm.Close() + +// listener := &MockTransitionListener{} +// sm.AddListener(listener) + +// sm.Start() + +// // Wait for any state beyond starting +// if !waitForTransition(listener, StateLoading, 2*time.Second) { +// t.Fatalf("State machine did not progress from starting") +// } + +// // Stop from any state +// err := sm.Stop() +// if err != nil { +// t.Fatalf("Failed to stop: %v", err) +// } + +// // Should transition to stopping +// if !waitForTransition(listener, StateStopping, 1*time.Second) { +// t.Errorf("Expected to transition to stopping state") +// } + +// // Should eventually reach stopped +// if !waitForTransition(listener, StateStopped, 3*time.Second) { +// // Try manual cleanup complete +// sm.SendEvent(EventCleanupComplete) +// time.Sleep(100 * time.Millisecond) +// } + +// // Verify we reached stopped state +// if !hasReachedState(listener, StateStopped) { +// transitions := listener.GetTransitions() +// t.Logf("Total transitions: %d", len(transitions)) +// for _, tr := range transitions { +// t.Logf("Transition: %s -> %s [%s]", tr.From, tr.To, tr.Event) +// } +// t.Errorf("Expected to reach stopped state") +// } +// } + +// func TestStateMachineLiveness(t *testing.T) { +// sm := createTestStateMachine("leader1", true) +// defer sm.Close() + +// listener := &MockTransitionListener{} +// sm.AddListener(listener) + +// sm.Start() + +// // Wait for collecting state +// if !waitForTransition(listener, StateCollecting, 3*time.Second) { +// t.Fatalf("Failed to reach collecting state") +// } + +// // Wait for liveness check state +// if !waitForTransition(listener, StateLivenessCheck, 3*time.Second) { +// transitions := listener.GetTransitions() +// t.Logf("Total transitions: %d", len(transitions)) +// for _, tr := range transitions { +// t.Logf("Transition: %s -> %s [%s]", tr.From, tr.To, tr.Event) +// } +// t.Fatalf("Failed to reach liveness check state") +// } + +// // Receive liveness checks +// sm.ReceiveLivenessCheck("peer1", TestCollected{ +// Data: []byte("peer1-data"), +// Timestamp: time.Now(), +// }) + +// sm.ReceiveLivenessCheck("peer2", TestCollected{ +// Data: []byte("peer2-data"), +// Timestamp: time.Now(), +// }) + +// // Give it a moment to process +// time.Sleep(100 * time.Millisecond) + +// // Check that liveness data was stored +// sm.mu.RLock() +// livenessCount := len(sm.liveness) +// sm.mu.RUnlock() + +// // Should have at least 2 entries (or 3 if self-emit is counted) +// if livenessCount < 2 { +// t.Errorf("Expected at least 2 liveness entries, got %d", livenessCount) +// } +// } + +// func TestStateMachineMetrics(t *testing.T) { +// sm := createTestStateMachine("leader1", true) +// defer sm.Close() + +// // Initial metrics +// if sm.GetTransitionCount() != 0 { +// t.Error("Expected initial transition count to be 0") +// } + +// listener := &MockTransitionListener{} +// sm.AddListener(listener) + +// // Make transitions +// sm.Start() + +// // Wait for a few transitions +// if !waitForTransition(listener, StateCollecting, 3*time.Second) { +// t.Fatalf("Failed to reach collecting state") +// } + +// if sm.GetTransitionCount() == 0 { +// t.Error("Expected transition count to be greater than 0") +// } + +// // Check state time +// stateTime := sm.GetStateTime() +// if stateTime < 0 { +// t.Errorf("Invalid state time: %v", stateTime) +// } +// } + +// func TestStateMachineConfirmations(t *testing.T) { +// sm := createTestStateMachine("leader1", true) +// sm.id = "leader1" +// defer sm.Close() + +// listener := &MockTransitionListener{} +// sm.AddListener(listener) + +// sm.Start() + +// // Progress to voting state via proposal +// if !waitForTransition(listener, StateCollecting, 3*time.Second) { +// t.Fatalf("Failed to reach collecting state") +// } + +// // Send liveness check +// sm.ReceiveLivenessCheck("leader2", TestCollected{Round: 1, Data: []byte("foo"), Timestamp: time.Now()}) + +// // Send proposal to get to voting +// sm.ReceiveProposal(1, "leader2", &TestState{ +// Round: 1, +// Hash: "test-hash", +// Timestamp: time.Now(), +// ProposalID: "test-proposal", +// }) + +// // Wait for voting +// if !waitForTransition(listener, StateVoting, 2*time.Second) { +// t.Fatalf("Failed to reach voting state") +// } + +// // Wait a bit for auto-progression or trigger manually +// time.Sleep(1 * time.Second) + +// // Try to progress to finalizing +// sm.SendEvent(EventVotingTimeout) +// time.Sleep(500 * time.Millisecond) + +// // Check if we reached a state that accepts confirmations +// currentState := sm.GetState() +// canAcceptConfirmation := currentState == StateFinalizing || currentState == StateVerifying + +// if !canAcceptConfirmation { +// // Check transition history +// if hasReachedState(listener, StateFinalizing) || hasReachedState(listener, StateVerifying) { +// // We passed through the state already, that's ok +// canAcceptConfirmation = true +// } else { +// transitions := listener.GetTransitions() +// t.Logf("Current state: %s", currentState) +// t.Logf("Total transitions: %d", len(transitions)) +// for _, tr := range transitions { +// t.Logf("Transition: %s -> %s [%s]", tr.From, tr.To, tr.Event) +// } +// // Don't fail - just skip the confirmation test +// t.Skip("Could not reach a state that accepts confirmations") +// } +// } + +// // Send confirmation (should only be accepted in finalizing or verifying) +// sm.ReceiveConfirmation("leader2", &TestState{ +// Round: 1, +// Hash: "confirmed-hash", +// Timestamp: time.Now(), +// ProposalID: "confirmed", +// }) + +// // Check that confirmation was stored +// sm.mu.RLock() +// confirmCount := len(sm.confirmations) +// sm.mu.RUnlock() + +// if confirmCount != 1 { +// t.Errorf("Expected 1 confirmation, got %d", confirmCount) +// } +// } + +// func TestStateMachineConcurrency(t *testing.T) { +// sm := createTestStateMachine("leader1", true) +// defer sm.Close() + +// sm.Start() +// time.Sleep(500 * time.Millisecond) + +// // Concurrent operations +// var wg sync.WaitGroup +// errChan := make(chan error, 5) + +// // Send multiple events concurrently +// for i := 0; i < 5; i++ { +// wg.Add(1) +// go func() { +// defer wg.Done() +// sm.SendEvent(EventSyncComplete) +// }() +// } + +// // Receive data concurrently +// for i := 0; i < 5; i++ { +// wg.Add(1) +// go func(id int) { +// defer wg.Done() +// peerID := TestPeerID(fmt.Sprintf("peer%d", id)) +// if err := sm.ReceiveLivenessCheck(peerID, TestCollected{ +// Data: []byte("data"), +// }); err != nil { +// errChan <- err +// } +// }(i) +// } + +// wg.Wait() +// close(errChan) + +// // Some errors are expected due to invalid state transitions +// errorCount := 0 +// for err := range errChan { +// if err != nil { +// errorCount++ +// } +// } + +// // As long as we didn't panic, concurrency is handled +// t.Logf("Concurrent operations completed with %d errors (expected)", errorCount) +// } + +// type mockPanickingVotingProvider struct { +// mu sync.Mutex +// quorumSize int +// sentProposals []*TestState +// sentVotes []*TestVote +// confirmations []*TestState +// } + +// func (m *mockPanickingVotingProvider) SendProposal(ctx context.Context, proposal *TestState) error { +// m.mu.Lock() +// defer m.mu.Unlock() +// m.sentProposals = append(m.sentProposals, proposal) +// return nil +// } + +// func (m *mockPanickingVotingProvider) DecideAndSendVote( +// ctx context.Context, +// proposals map[Identity]*TestState, +// ) (TestPeerID, *TestVote, error) { +// m.mu.Lock() +// defer m.mu.Unlock() + +// // Pick first proposal +// for peerID, proposal := range proposals { +// if proposal == nil { +// continue +// } +// vote := &TestVote{ +// VoterID: "leader1", +// ProposalID: proposal.ProposalID, +// Signature: "test-sig", +// } +// m.sentVotes = append(m.sentVotes, vote) +// return TestPeerID(peerID), vote, nil +// } + +// return "", nil, errors.New("no proposal to vote for") +// } + +// func (m *mockPanickingVotingProvider) IsQuorum(ctx context.Context, proposalVotes map[Identity]*TestVote) (bool, error) { +// totalVotes := 0 +// voteCount := map[string]int{} +// for _, votes := range proposalVotes { +// count, ok := voteCount[votes.ProposalID] +// if !ok { +// voteCount[votes.ProposalID] = 1 +// count = 1 +// } else { +// voteCount[votes.ProposalID] = count + 1 +// count = count + 1 +// } +// totalVotes += 1 + +// if count >= m.quorumSize { +// return true, nil +// } +// } +// if totalVotes >= m.quorumSize { +// return false, errors.New("split quorum") +// } +// return false, nil +// } + +// func (m *mockPanickingVotingProvider) FinalizeVotes( +// ctx context.Context, +// proposals map[Identity]*TestState, +// proposalVotes map[Identity]*TestVote, +// ) (*TestState, TestPeerID, error) { +// // Pick the proposal with the most votes +// winnerCount := 0 +// var winnerProposal *TestState = nil +// var winnerProposer TestPeerID +// voteCount := map[string]int{} +// for _, votes := range proposalVotes { +// count, ok := voteCount[votes.ProposalID] +// if !ok { +// voteCount[votes.ProposalID] = 1 +// count = 1 +// } else { +// voteCount[votes.ProposalID] = count + 1 +// count += 1 +// } +// } +// for peerID, proposal := range proposals { +// if proposal == nil { +// continue +// } +// if _, ok := voteCount[proposal.ProposalID]; !ok { +// continue +// } +// if voteCount[proposal.ProposalID] > winnerCount { +// winnerCount = voteCount[proposal.ProposalID] +// winnerProposal = proposal +// winnerProposer = TestPeerID(peerID) +// } +// } + +// if winnerProposal != nil { +// // Create new state with incremented round +// newState := &TestState{ +// Round: winnerProposal.Round + 1, +// Hash: "hash-" + fmt.Sprintf("%d", winnerProposal.Round+1), +// Timestamp: time.Now(), +// ProposalID: "finalized", +// } +// return newState, winnerProposer, nil +// } + +// // Default to first proposal +// for peerID, proposal := range proposals { +// if proposal == nil { +// continue +// } +// newState := &TestState{ +// Round: proposal.Round + 1, +// Hash: "hash-" + fmt.Sprintf("%d", proposal.Round+1), +// Timestamp: time.Now(), +// ProposalID: "finalized", +// } +// return newState, TestPeerID(peerID), nil +// } + +// return nil, "", nil +// } + +// func (m *mockPanickingVotingProvider) SendVote(ctx context.Context, vote *TestVote) (TestPeerID, error) { +// return "", nil +// } + +// func (m *mockPanickingVotingProvider) SendConfirmation(ctx context.Context, finalized *TestState) error { +// panic("PANIC HERE") +// } + +// type printtracer struct{} + +// // Error implements TraceLogger. +// func (p *printtracer) Error(message string, err error) { +// fmt.Println("[error]", message, err) +// } + +// // Trace implements TraceLogger. +// func (p *printtracer) Trace(message string) { +// fmt.Println("[trace]", message) +// } + +// func TestStateMachinePanicRecovery(t *testing.T) { +// minimumProvers := func() uint64 { return uint64(1) } + +// sm := NewStateMachine( +// "leader1", +// &TestState{Round: 0, Hash: "genesis", Timestamp: time.Now()}, +// true, // shouldEmitReceiveEventsOnSends +// minimumProvers, +// &mockSyncProvider{syncDelay: 10 * time.Millisecond}, +// &mockPanickingVotingProvider{quorumSize: 1}, +// &mockLeaderProvider{ +// isLeader: true, +// leaders: []TestPeerID{"leader1"}, +// proveDelay: 50 * time.Millisecond, +// }, +// &mockLivenessProvider{collectDelay: 10 * time.Millisecond}, +// &printtracer{}, +// ) +// defer sm.Close() + +// sm.Start() +// time.Sleep(10 * time.Second) +// sm.mu.Lock() +// if sm.machineState != StateStopped { +// sm.mu.Unlock() +// t.FailNow() +// } +// sm.mu.Unlock() + +// } diff --git a/consensus/state_machine_viz.go b/consensus/state_machine_viz.go index 0634589..9285150 100644 --- a/consensus/state_machine_viz.go +++ b/consensus/state_machine_viz.go @@ -1,360 +1,360 @@ package consensus -import ( - "fmt" - "strings" - "time" -) +// import ( +// "fmt" +// "strings" +// "time" +// ) -// StateMachineViz provides visualization utilities for the generic state machine -type StateMachineViz[ - StateT Unique, - VoteT Unique, - PeerIDT Unique, - CollectedT Unique, -] struct { - sm *StateMachine[StateT, VoteT, PeerIDT, CollectedT] -} +// // StateMachineViz provides visualization utilities for the generic state machine +// type StateMachineViz[ +// StateT Unique, +// VoteT Unique, +// PeerIDT Unique, +// CollectedT Unique, +// ] struct { +// sm *StateMachine[StateT, VoteT, PeerIDT, CollectedT] +// } -// NewStateMachineViz creates a new visualizer for the generic state machine -func NewStateMachineViz[ - StateT Unique, - VoteT Unique, - PeerIDT Unique, - CollectedT Unique, -]( - sm *StateMachine[StateT, VoteT, PeerIDT, CollectedT], -) *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT] { - return &StateMachineViz[StateT, VoteT, PeerIDT, CollectedT]{sm: sm} -} +// // NewStateMachineViz creates a new visualizer for the generic state machine +// func NewStateMachineViz[ +// StateT Unique, +// VoteT Unique, +// PeerIDT Unique, +// CollectedT Unique, +// ]( +// sm *StateMachine[StateT, VoteT, PeerIDT, CollectedT], +// ) *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT] { +// return &StateMachineViz[StateT, VoteT, PeerIDT, CollectedT]{sm: sm} +// } -// GenerateMermaidDiagram generates a Mermaid diagram of the state machine -func ( - v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT], -) GenerateMermaidDiagram() string { - var sb strings.Builder +// // GenerateMermaidDiagram generates a Mermaid diagram of the state machine +// func ( +// v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT], +// ) GenerateMermaidDiagram() string { +// var sb strings.Builder - sb.WriteString("```mermaid\n") - sb.WriteString("stateDiagram-v2\n") - sb.WriteString(" [*] --> Stopped\n") +// sb.WriteString("```mermaid\n") +// sb.WriteString("stateDiagram-v2\n") +// sb.WriteString(" [*] --> Stopped\n") - // Define states with descriptions - // Use CamelCase for state IDs to avoid underscore issues - stateMap := map[State]string{ - StateStopped: "Stopped", - StateStarting: "Starting", - StateLoading: "Loading", - StateCollecting: "Collecting", - StateLivenessCheck: "LivenessCheck", - StateProving: "Proving", - StatePublishing: "Publishing", - StateVoting: "Voting", - StateFinalizing: "Finalizing", - StateVerifying: "Verifying", - StateStopping: "Stopping", - } +// // Define states with descriptions +// // Use CamelCase for state IDs to avoid underscore issues +// stateMap := map[State]string{ +// StateStopped: "Stopped", +// StateStarting: "Starting", +// StateLoading: "Loading", +// StateCollecting: "Collecting", +// StateLivenessCheck: "LivenessCheck", +// StateProving: "Proving", +// StatePublishing: "Publishing", +// StateVoting: "Voting", +// StateFinalizing: "Finalizing", +// StateVerifying: "Verifying", +// StateStopping: "Stopping", +// } - stateDescriptions := map[State]string{ - StateStopped: "Engine not running", - StateStarting: "Initializing components", - StateLoading: "Syncing with network", - StateCollecting: "Gathering consensus data", - StateLivenessCheck: "Checking prover availability", - StateProving: "Generating cryptographic proof", - StatePublishing: "Broadcasting proposal", - StateVoting: "Participating in consensus", - StateFinalizing: "Aggregating votes", - StateVerifying: "Publishing confirmation", - StateStopping: "Cleaning up resources", - } +// stateDescriptions := map[State]string{ +// StateStopped: "Engine not running", +// StateStarting: "Initializing components", +// StateLoading: "Syncing with network", +// StateCollecting: "Gathering consensus data", +// StateLivenessCheck: "Checking prover availability", +// StateProving: "Generating cryptographic proof", +// StatePublishing: "Broadcasting proposal", +// StateVoting: "Participating in consensus", +// StateFinalizing: "Aggregating votes", +// StateVerifying: "Publishing confirmation", +// StateStopping: "Cleaning up resources", +// } - // Add state descriptions - for state, id := range stateMap { - desc := stateDescriptions[state] - sb.WriteString(fmt.Sprintf(" %s : %s\n", id, desc)) - } +// // Add state descriptions +// for state, id := range stateMap { +// desc := stateDescriptions[state] +// sb.WriteString(fmt.Sprintf(" %s : %s\n", id, desc)) +// } - sb.WriteString("\n") +// sb.WriteString("\n") - // Add transitions using mapped state names - transitions := v.getTransitionList() - for _, t := range transitions { - fromID := stateMap[t.From] - toID := stateMap[t.To] - if t.Guard != nil { - sb.WriteString(fmt.Sprintf( - " %s --> %s : %s [guarded]\n", - fromID, toID, t.Event)) - } else { - sb.WriteString(fmt.Sprintf( - " %s --> %s : %s\n", - fromID, toID, t.Event)) - } - } +// // Add transitions using mapped state names +// transitions := v.getTransitionList() +// for _, t := range transitions { +// fromID := stateMap[t.From] +// toID := stateMap[t.To] +// if t.Guard != nil { +// sb.WriteString(fmt.Sprintf( +// " %s --> %s : %s [guarded]\n", +// fromID, toID, t.Event)) +// } else { +// sb.WriteString(fmt.Sprintf( +// " %s --> %s : %s\n", +// fromID, toID, t.Event)) +// } +// } - // Add special annotations using mapped names - sb.WriteString("\n") - sb.WriteString(" note right of Proving : Leader only\n") - sb.WriteString( - " note right of LivenessCheck : Divergence point\\nfor leader/non-leader\n", - ) - sb.WriteString(" note right of Voting : Convergence point\n") +// // Add special annotations using mapped names +// sb.WriteString("\n") +// sb.WriteString(" note right of Proving : Leader only\n") +// sb.WriteString( +// " note right of LivenessCheck : Divergence point\\nfor leader/non-leader\n", +// ) +// sb.WriteString(" note right of Voting : Convergence point\n") - sb.WriteString("```\n") +// sb.WriteString("```\n") - return sb.String() -} +// return sb.String() +// } -// GenerateDotDiagram generates a Graphviz DOT diagram -func ( - v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT], -) GenerateDotDiagram() string { - var sb strings.Builder +// // GenerateDotDiagram generates a Graphviz DOT diagram +// func ( +// v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT], +// ) GenerateDotDiagram() string { +// var sb strings.Builder - sb.WriteString("digraph ConsensusStateMachine {\n") - sb.WriteString(" rankdir=TB;\n") - sb.WriteString(" node [shape=box, style=rounded];\n") - sb.WriteString(" edge [fontsize=10];\n\n") +// sb.WriteString("digraph ConsensusStateMachine {\n") +// sb.WriteString(" rankdir=TB;\n") +// sb.WriteString(" node [shape=box, style=rounded];\n") +// sb.WriteString(" edge [fontsize=10];\n\n") - // Define node styles - sb.WriteString(" // State styles\n") - sb.WriteString( - " Stopped [style=\"rounded,filled\", fillcolor=lightgray];\n", - ) - sb.WriteString( - " Starting [style=\"rounded,filled\", fillcolor=lightyellow];\n", - ) - sb.WriteString( - " Loading [style=\"rounded,filled\", fillcolor=lightyellow];\n", - ) - sb.WriteString( - " Collecting [style=\"rounded,filled\", fillcolor=lightblue];\n", - ) - sb.WriteString( - " LivenessCheck [style=\"rounded,filled\", fillcolor=orange];\n", - ) - sb.WriteString( - " Proving [style=\"rounded,filled\", fillcolor=lightgreen];\n", - ) - sb.WriteString( - " Publishing [style=\"rounded,filled\", fillcolor=lightgreen];\n", - ) - sb.WriteString( - " Voting [style=\"rounded,filled\", fillcolor=lightblue];\n", - ) - sb.WriteString( - " Finalizing [style=\"rounded,filled\", fillcolor=lightblue];\n", - ) - sb.WriteString( - " Verifying [style=\"rounded,filled\", fillcolor=lightblue];\n", - ) - sb.WriteString( - " Stopping [style=\"rounded,filled\", fillcolor=lightcoral];\n\n", - ) +// // Define node styles +// sb.WriteString(" // State styles\n") +// sb.WriteString( +// " Stopped [style=\"rounded,filled\", fillcolor=lightgray];\n", +// ) +// sb.WriteString( +// " Starting [style=\"rounded,filled\", fillcolor=lightyellow];\n", +// ) +// sb.WriteString( +// " Loading [style=\"rounded,filled\", fillcolor=lightyellow];\n", +// ) +// sb.WriteString( +// " Collecting [style=\"rounded,filled\", fillcolor=lightblue];\n", +// ) +// sb.WriteString( +// " LivenessCheck [style=\"rounded,filled\", fillcolor=orange];\n", +// ) +// sb.WriteString( +// " Proving [style=\"rounded,filled\", fillcolor=lightgreen];\n", +// ) +// sb.WriteString( +// " Publishing [style=\"rounded,filled\", fillcolor=lightgreen];\n", +// ) +// sb.WriteString( +// " Voting [style=\"rounded,filled\", fillcolor=lightblue];\n", +// ) +// sb.WriteString( +// " Finalizing [style=\"rounded,filled\", fillcolor=lightblue];\n", +// ) +// sb.WriteString( +// " Verifying [style=\"rounded,filled\", fillcolor=lightblue];\n", +// ) +// sb.WriteString( +// " Stopping [style=\"rounded,filled\", fillcolor=lightcoral];\n\n", +// ) - // Add transitions - sb.WriteString(" // Transitions\n") - transitions := v.getTransitionList() - for _, t := range transitions { - label := string(t.Event) - if t.Guard != nil { - label += " [G]" - } - sb.WriteString(fmt.Sprintf( - " %s -> %s [label=\"%s\"];\n", - t.From, t.To, label)) - } +// // Add transitions +// sb.WriteString(" // Transitions\n") +// transitions := v.getTransitionList() +// for _, t := range transitions { +// label := string(t.Event) +// if t.Guard != nil { +// label += " [G]" +// } +// sb.WriteString(fmt.Sprintf( +// " %s -> %s [label=\"%s\"];\n", +// t.From, t.To, label)) +// } - // Add legend - sb.WriteString("\n // Legend\n") - sb.WriteString(" subgraph cluster_legend {\n") - sb.WriteString(" label=\"Legend\";\n") - sb.WriteString(" style=dotted;\n") - sb.WriteString(" \"[G] = Guarded transition\" [shape=none];\n") - sb.WriteString(" \"Yellow = Initialization\" [shape=none];\n") - sb.WriteString(" \"Blue = Consensus flow\" [shape=none];\n") - sb.WriteString(" \"Green = Leader specific\" [shape=none];\n") - sb.WriteString(" \"Orange = Decision point\" [shape=none];\n") - sb.WriteString(" }\n") +// // Add legend +// sb.WriteString("\n // Legend\n") +// sb.WriteString(" subgraph cluster_legend {\n") +// sb.WriteString(" label=\"Legend\";\n") +// sb.WriteString(" style=dotted;\n") +// sb.WriteString(" \"[G] = Guarded transition\" [shape=none];\n") +// sb.WriteString(" \"Yellow = Initialization\" [shape=none];\n") +// sb.WriteString(" \"Blue = Consensus flow\" [shape=none];\n") +// sb.WriteString(" \"Green = Leader specific\" [shape=none];\n") +// sb.WriteString(" \"Orange = Decision point\" [shape=none];\n") +// sb.WriteString(" }\n") - sb.WriteString("}\n") +// sb.WriteString("}\n") - return sb.String() -} +// return sb.String() +// } -// GenerateTransitionTable generates a markdown table of all transitions -func ( - v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT], -) GenerateTransitionTable() string { - var sb strings.Builder +// // GenerateTransitionTable generates a markdown table of all transitions +// func ( +// v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT], +// ) GenerateTransitionTable() string { +// var sb strings.Builder - sb.WriteString("| From State | Event | To State | Condition |\n") - sb.WriteString("|------------|-------|----------|----------|\n") +// sb.WriteString("| From State | Event | To State | Condition |\n") +// sb.WriteString("|------------|-------|----------|----------|\n") - transitions := v.getTransitionList() - for _, t := range transitions { - condition := "None" - if t.Guard != nil { - condition = "Has guard" - } - sb.WriteString(fmt.Sprintf( - "| %s | %s | %s | %s |\n", - t.From, t.Event, t.To, condition)) - } +// transitions := v.getTransitionList() +// for _, t := range transitions { +// condition := "None" +// if t.Guard != nil { +// condition = "Has guard" +// } +// sb.WriteString(fmt.Sprintf( +// "| %s | %s | %s | %s |\n", +// t.From, t.Event, t.To, condition)) +// } - return sb.String() -} +// return sb.String() +// } -// getTransitionList extracts all transitions from the state machine -func ( - v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT], -) getTransitionList() []*Transition[StateT, VoteT, PeerIDT, CollectedT] { - var transitions []*Transition[StateT, VoteT, PeerIDT, CollectedT] +// // getTransitionList extracts all transitions from the state machine +// func ( +// v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT], +// ) getTransitionList() []*Transition[StateT, VoteT, PeerIDT, CollectedT] { +// var transitions []*Transition[StateT, VoteT, PeerIDT, CollectedT] - v.sm.mu.RLock() - defer v.sm.mu.RUnlock() +// v.sm.mu.RLock() +// defer v.sm.mu.RUnlock() - for _, eventMap := range v.sm.transitions { - for _, transition := range eventMap { - transitions = append(transitions, transition) - } - } +// for _, eventMap := range v.sm.transitions { +// for _, transition := range eventMap { +// transitions = append(transitions, transition) +// } +// } - return transitions -} +// return transitions +// } -// GetStateStats returns statistics about the state machine -func ( - v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT], -) GetStateStats() string { - var sb strings.Builder +// // GetStateStats returns statistics about the state machine +// func ( +// v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT], +// ) GetStateStats() string { +// var sb strings.Builder - sb.WriteString("State Machine Statistics:\n") - sb.WriteString("========================\n\n") +// sb.WriteString("State Machine Statistics:\n") +// sb.WriteString("========================\n\n") - v.sm.mu.RLock() - defer v.sm.mu.RUnlock() +// v.sm.mu.RLock() +// defer v.sm.mu.RUnlock() - // Count states and transitions - stateCount := 0 - transitionCount := 0 - eventCount := make(map[Event]int) +// // Count states and transitions +// stateCount := 0 +// transitionCount := 0 +// eventCount := make(map[Event]int) - for _, eventMap := range v.sm.transitions { - // Only count if we have transitions for this state - if len(eventMap) > 0 { - stateCount++ - } - for event := range eventMap { - transitionCount++ - eventCount[event]++ - } - } +// for _, eventMap := range v.sm.transitions { +// // Only count if we have transitions for this state +// if len(eventMap) > 0 { +// stateCount++ +// } +// for event := range eventMap { +// transitionCount++ +// eventCount[event]++ +// } +// } - sb.WriteString(fmt.Sprintf("Total States: %d\n", stateCount)) - sb.WriteString(fmt.Sprintf("Total Transitions: %d\n", transitionCount)) - sb.WriteString(fmt.Sprintf("Current State: %s\n", v.sm.machineState)) - sb.WriteString(fmt.Sprintf("Transitions Made: %d\n", v.sm.transitionCount)) - sb.WriteString( - fmt.Sprintf("Time in Current State: %v\n", v.sm.GetStateTime()), - ) +// sb.WriteString(fmt.Sprintf("Total States: %d\n", stateCount)) +// sb.WriteString(fmt.Sprintf("Total Transitions: %d\n", transitionCount)) +// sb.WriteString(fmt.Sprintf("Current State: %s\n", v.sm.machineState)) +// sb.WriteString(fmt.Sprintf("Transitions Made: %d\n", v.sm.transitionCount)) +// sb.WriteString( +// fmt.Sprintf("Time in Current State: %v\n", v.sm.GetStateTime()), +// ) - // Display current leader info if available - if len(v.sm.nextProvers) > 0 { - sb.WriteString("\nNext Leaders:\n") - for i, leader := range v.sm.nextProvers { - sb.WriteString(fmt.Sprintf(" %d. %v\n", i+1, leader)) - } - } +// // Display current leader info if available +// if len(v.sm.nextProvers) > 0 { +// sb.WriteString("\nNext Leaders:\n") +// for i, leader := range v.sm.nextProvers { +// sb.WriteString(fmt.Sprintf(" %d. %v\n", i+1, leader)) +// } +// } - // Display active state info - if v.sm.activeState != nil { - sb.WriteString(fmt.Sprintf("\nActive State: %+v\n", v.sm.activeState)) - } +// // Display active state info +// if v.sm.activeState != nil { +// sb.WriteString(fmt.Sprintf("\nActive State: %+v\n", v.sm.activeState)) +// } - // Display liveness info - sb.WriteString(fmt.Sprintf("\nLiveness Checks: %d\n", len(v.sm.liveness))) +// // Display liveness info +// sb.WriteString(fmt.Sprintf("\nLiveness Checks: %d\n", len(v.sm.liveness))) - // Display voting info - sb.WriteString(fmt.Sprintf("Proposals: %d\n", len(v.sm.proposals))) - sb.WriteString(fmt.Sprintf("Votes: %d\n", len(v.sm.votes))) - sb.WriteString(fmt.Sprintf("Confirmations: %d\n", len(v.sm.confirmations))) +// // Display voting info +// sb.WriteString(fmt.Sprintf("Proposals: %d\n", len(v.sm.proposals))) +// sb.WriteString(fmt.Sprintf("Votes: %d\n", len(v.sm.votes))) +// sb.WriteString(fmt.Sprintf("Confirmations: %d\n", len(v.sm.confirmations))) - sb.WriteString("\nEvent Usage:\n") - for event, count := range eventCount { - sb.WriteString(fmt.Sprintf(" %s: %d transitions\n", event, count)) - } +// sb.WriteString("\nEvent Usage:\n") +// for event, count := range eventCount { +// sb.WriteString(fmt.Sprintf(" %s: %d transitions\n", event, count)) +// } - return sb.String() -} +// return sb.String() +// } -// GetCurrentStateInfo returns detailed information about the current state -func ( - v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT]) GetCurrentStateInfo() string { - v.sm.mu.RLock() - defer v.sm.mu.RUnlock() +// // GetCurrentStateInfo returns detailed information about the current state +// func ( +// v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT]) GetCurrentStateInfo() string { +// v.sm.mu.RLock() +// defer v.sm.mu.RUnlock() - var sb strings.Builder +// var sb strings.Builder - sb.WriteString("Current State Information:\n") - sb.WriteString("=========================\n\n") - sb.WriteString(fmt.Sprintf("State: %s\n", v.sm.machineState)) - sb.WriteString( - fmt.Sprintf("Time in State: %v\n", time.Since(v.sm.stateStartTime)), - ) - sb.WriteString(fmt.Sprintf("Total Transitions: %d\n", v.sm.transitionCount)) +// sb.WriteString("Current State Information:\n") +// sb.WriteString("=========================\n\n") +// sb.WriteString(fmt.Sprintf("State: %s\n", v.sm.machineState)) +// sb.WriteString( +// fmt.Sprintf("Time in State: %v\n", time.Since(v.sm.stateStartTime)), +// ) +// sb.WriteString(fmt.Sprintf("Total Transitions: %d\n", v.sm.transitionCount)) - // State configuration info - if config, exists := v.sm.stateConfigs[v.sm.machineState]; exists { - sb.WriteString("\nState Configuration:\n") - if config.Timeout > 0 { - sb.WriteString(fmt.Sprintf(" Timeout: %v\n", config.Timeout)) - sb.WriteString(fmt.Sprintf(" Timeout Event: %s\n", config.OnTimeout)) - } - if config.Behavior != nil { - sb.WriteString(" Has Behavior: Yes\n") - } - if config.OnEnter != nil { - sb.WriteString(" Has OnEnter Callback: Yes\n") - } - if config.OnExit != nil { - sb.WriteString(" Has OnExit Callback: Yes\n") - } - } +// // State configuration info +// if config, exists := v.sm.stateConfigs[v.sm.machineState]; exists { +// sb.WriteString("\nState Configuration:\n") +// if config.Timeout > 0 { +// sb.WriteString(fmt.Sprintf(" Timeout: %v\n", config.Timeout)) +// sb.WriteString(fmt.Sprintf(" Timeout Event: %s\n", config.OnTimeout)) +// } +// if config.Behavior != nil { +// sb.WriteString(" Has Behavior: Yes\n") +// } +// if config.OnEnter != nil { +// sb.WriteString(" Has OnEnter Callback: Yes\n") +// } +// if config.OnExit != nil { +// sb.WriteString(" Has OnExit Callback: Yes\n") +// } +// } - // Available transitions from current state - sb.WriteString("\nAvailable Transitions:\n") - if transitions, exists := v.sm.transitions[v.sm.machineState]; exists { - for event, transition := range transitions { - guardStr := "" - if transition.Guard != nil { - guardStr = " [guarded]" - } - sb.WriteString( - fmt.Sprintf(" %s -> %s%s\n", event, transition.To, guardStr), - ) - } - } +// // Available transitions from current state +// sb.WriteString("\nAvailable Transitions:\n") +// if transitions, exists := v.sm.transitions[v.sm.machineState]; exists { +// for event, transition := range transitions { +// guardStr := "" +// if transition.Guard != nil { +// guardStr = " [guarded]" +// } +// sb.WriteString( +// fmt.Sprintf(" %s -> %s%s\n", event, transition.To, guardStr), +// ) +// } +// } - return sb.String() -} +// return sb.String() +// } -// GenerateEventFlow generates a flow of events that occurred -func ( - v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT], -) GenerateEventFlow() string { - var sb strings.Builder +// // GenerateEventFlow generates a flow of events that occurred +// func ( +// v *StateMachineViz[StateT, VoteT, PeerIDT, CollectedT], +// ) GenerateEventFlow() string { +// var sb strings.Builder - sb.WriteString("Event Flow:\n") - sb.WriteString("===========\n\n") +// sb.WriteString("Event Flow:\n") +// sb.WriteString("===========\n\n") - transitions := v.getTransitionList() - for i, tr := range transitions { - sb.WriteString(fmt.Sprintf( - "%d. %s -> %s [%s]\n", - i+1, tr.From, tr.To, tr.Event, - )) - } +// transitions := v.getTransitionList() +// for i, tr := range transitions { +// sb.WriteString(fmt.Sprintf( +// "%d. %s -> %s [%s]\n", +// i+1, tr.From, tr.To, tr.Event, +// )) +// } - return sb.String() -} +// return sb.String() +// } diff --git a/consensus/stateproducer/safety_rules_wrapper.go b/consensus/stateproducer/safety_rules_wrapper.go new file mode 100644 index 0000000..5ee7256 --- /dev/null +++ b/consensus/stateproducer/safety_rules_wrapper.go @@ -0,0 +1,128 @@ +package stateproducer + +import ( + "fmt" + + "go.uber.org/atomic" + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// safetyRulesConcurrencyWrapper wraps `consensus.SafetyRules` to allow its +// usage in concurrent environments. +// Correctness requirements: +// +// (i) The wrapper's Sign function is called exactly once (wrapper errors on +// repeated Sign calls) +// (ii) SafetyRules is not accessed outside the wrapper concurrently. The +// wrapper cannot enforce this. +// +// The correctness condition (ii) holds because there is a single dedicated +// thread executing the Event Loop, including the EventHandler, that also runs +// the logic of `StateProducer.MakeStateProposal`. +// +// Concurrency safety: +// +// (a) There is one dedicated thread executing the Event Loop, including the +// EventHandler, that also runs the logic of +// `StateProducer.MakeStateProposal`. Hence, while the 'Event Loop Thread' +// is in `MakeStateProposal`, we are guaranteed the only interactions with +// `SafetyRules` are in `consensus.LeaderProvider.BuildOn` +// (b) The Event Loop Thread instantiates the variable `signingStatus`. +// Furthermore, the `signer` call first reads `signingStatus`. Therefore, +// all operations in the EventHandler prior to calling +// `Builder.BuildOn(..)` happen before the call to `signer`. Hence, it is +// guaranteed that the `signer` uses the most recent state of +// `SafetyRules`, even if `Sign` is executed by a different thread. +// (c) Just before the `signer` call returns, it writes `signingStatus`. +// Furthermore, the Event Loop Thread reads `signingStatus` right after the +// `Builder.BuildOn(..)` call returns. Thereby, Event Loop Thread sees the +// most recent state of `SafetyRules` after completing the signing +// operation. +// +// With the transitivity of the 'Happens Before' relationship (-> go Memory +// Model https://go.dev/ref/mem#atomic), we have proven that concurrent access +// of the wrapped `safetyRules` is safe for the state transition: +// +// instantiate signingStatus to 0 ─► update signingStatus from 0 to 1 → signer → update signingStatus from 1 to 2 ─► confirm signingStatus has value 2 +// +// ╰──────────────┬───────────────╯ ╰──────────────────────────────────────┬─────────────────────────────────────╯ ╰────────────────┬────────────────╯ +// +// Event Loop Thread within the scope of Builder.BuildOn Event Loop Thread +// +// All state transitions _other_ than the one above yield exceptions without +// modifying `SafetyRules`. +type safetyRulesConcurrencyWrapper[ + StateT models.Unique, + VoteT models.Unique, +] struct { + // signingStatus guarantees concurrency safety and encodes the progress of the + // signing process. We differentiate between 4 different states: + // - value 0: signing is not yet started + // - value 1: one thread has already entered the signing process, which is + // currently ongoing + // - value 2: the thread that set `signingStatus` to value 1 has completed + // the signing + signingStatus atomic.Uint32 + safetyRules consensus.SafetyRules[StateT, VoteT] +} + +func newSafetyRulesConcurrencyWrapper[ + StateT models.Unique, + VoteT models.Unique, +]( + safetyRules consensus.SafetyRules[StateT, VoteT], +) *safetyRulesConcurrencyWrapper[StateT, VoteT] { + return &safetyRulesConcurrencyWrapper[StateT, VoteT]{safetyRules: safetyRules} +} + +// Sign modifies the given unsignedHeader by including the proposer's signature +// date. Safe under concurrent calls. Per convention, this method should be +// called exactly once. Only the first call will succeed, and subsequent calls +// error. The implementation is backed by `SafetyRules` and thereby guarantees +// consensus safety for singing state proposals. +// Error Returns: +// - models.NoVoteError if it is not safe for us to vote (our proposal +// includes our vote) for this rank. This can happen if we have already +// proposed or timed out this rank. +// - generic error in case of unexpected failure +func (w *safetyRulesConcurrencyWrapper[StateT, VoteT]) Sign( + unsigned *models.Proposal[StateT], +) (*VoteT, error) { + // value of `signingStatus` is something else than 0 + if !w.signingStatus.CompareAndSwap(0, 1) { + return nil, fmt.Errorf( + "signer has already commenced signing; possibly repeated signer call", + ) + } + + // signer is now in state 1, and this thread is the only one every going to + // execute the following logic + + // signature for own state is structurally a vote + vote, err := w.safetyRules.SignOwnProposal(unsigned) + if err != nil { + return nil, fmt.Errorf("could not sign state proposal: %w", err) + } + // value of `signingStatus` is always 1, i.e. the following check always + // succeeds. + if !w.signingStatus.CompareAndSwap(1, 2) { + // sanity check protects logic from future modifications accidentally + // breaking this invariant + panic( + "signer wrapper completed its work but encountered state other than 1", + ) // never happens + } + return vote, nil +} + +// IsSigningComplete atomically checks whether the Sign logic has concluded, and +// returns true only in this case. By reading the atomic `signingStatus` and +// confirming it has the expected value, it is guaranteed that any state changes +// of `safetyRules` that happened within `Sign` are visible to the Event Loop +// Thread. No errors expected during normal operations +func ( + w *safetyRulesConcurrencyWrapper[StateT, VoteT], +) IsSigningComplete() bool { + return w.signingStatus.Load() == 2 +} diff --git a/consensus/stateproducer/state_producer.go b/consensus/stateproducer/state_producer.go new file mode 100644 index 0000000..f3fbca7 --- /dev/null +++ b/consensus/stateproducer/state_producer.go @@ -0,0 +1,137 @@ +package stateproducer + +import ( + "context" + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// StateProducer is responsible for producing new state proposals. It is a +// ervice component to HotStuff's main state machine (implemented in the +// EventHandler). The StateProducer's central purpose is to mediate concurrent +// signing requests to its embedded `consensus.SafetyRules` during state +// production. The actual work of producing a state proposal is delegated to the +// embedded `consensus.LeaderProvider`. +type StateProducer[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, + CollectedT models.Unique, +] struct { + safetyRules consensus.SafetyRules[StateT, VoteT] + committee consensus.Replicas + builder consensus.LeaderProvider[StateT, PeerIDT, CollectedT] +} + +var _ consensus.StateProducer[*nilUnique, *nilUnique] = (*StateProducer[*nilUnique, *nilUnique, *nilUnique, *nilUnique])(nil) + +// New creates a new StateProducer, which mediates concurrent signing requests +// to the embedded `consensus.SafetyRules` during state production, delegated to +// `consensus.LeaderProvider`. No errors are expected during normal operation. +func NewStateProducer[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, + CollectedT models.Unique, +]( + safetyRules consensus.SafetyRules[StateT, VoteT], + committee consensus.Replicas, + builder consensus.LeaderProvider[StateT, PeerIDT, CollectedT], +) (*StateProducer[StateT, VoteT, PeerIDT, CollectedT], error) { + bp := &StateProducer[StateT, VoteT, PeerIDT, CollectedT]{ + safetyRules: safetyRules, + committee: committee, + builder: builder, + } + return bp, nil +} + +// MakeStateProposal builds a new HotStuff state proposal using the given rank, +// the given quorum certificate for its parent and [optionally] a timeout +// certificate for last rank(could be nil). +// Error Returns: +// - models.NoVoteError if it is not safe for us to vote (our proposal +// includes our vote) for this rank. This can happen if we have already +// proposed or timed out this rank. +// - generic error in case of unexpected failure +func (bp *StateProducer[StateT, VoteT, PeerIDT, CollectedT]) MakeStateProposal( + rank uint64, + qc models.QuorumCertificate, + previousRankTimeoutCert models.TimeoutCertificate, +) (*models.SignedProposal[StateT, VoteT], error) { + newState, err := bp.builder.ProveNextState( + context.TODO(), + qc.GetFilter(), + qc.GetSelector(), + ) + if err != nil { + if models.IsNoVoteError(err) { + return nil, fmt.Errorf( + "unsafe to vote for own proposal on top of %x: %w", + qc.GetSelector(), + err, + ) + } + return nil, fmt.Errorf( + "could not build state proposal on top of %v: %w", + qc.GetSelector(), + err, + ) + } + + proposal := models.ProposalFrom( + models.StateFrom(newState, qc), + previousRankTimeoutCert, + ) + + signer := newSafetyRulesConcurrencyWrapper(bp.safetyRules) + vote, err := signer.Sign(proposal) + if err != nil { + return nil, fmt.Errorf( + "could not vote on state proposal on top of %v: %w", + qc.GetSelector(), + err, + ) + } + + signedProposal := models.SignedProposalFromState(proposal, vote) + + return signedProposal, nil +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/timeoutaggregator/timeout_aggregator.go b/consensus/timeoutaggregator/timeout_aggregator.go new file mode 100644 index 0000000..7d5a084 --- /dev/null +++ b/consensus/timeoutaggregator/timeout_aggregator.go @@ -0,0 +1,247 @@ +package timeoutaggregator + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// defaultTimeoutAggregatorWorkers number of workers to dispatch events for +// timeout aggregator +const defaultTimeoutAggregatorWorkers = 4 + +// defaultTimeoutQueueCapacity maximum capacity for buffering unprocessed +// timeouts +const defaultTimeoutQueueCapacity = 1000 + +// TimeoutAggregator stores the timeout states and aggregates them into a TC +// when enough TSs have been collected. It's safe to use in concurrent +// environment. +type TimeoutAggregator[VoteT models.Unique] struct { + tracer consensus.TraceLogger + lowestRetainedRank atomic.Uint64 + collectors consensus.TimeoutCollectors[VoteT] + queuedTimeoutsNotifier chan struct{} + enteringRankNotifier chan struct{} + queuedTimeouts chan *models.TimeoutState[VoteT] + wg sync.WaitGroup +} + +var _ consensus.TimeoutAggregator[*nilUnique] = (*TimeoutAggregator[*nilUnique])(nil) + +// NewTimeoutAggregator creates an instance of timeout aggregator. +// No errors are expected during normal operations. +func NewTimeoutAggregator[VoteT models.Unique]( + tracer consensus.TraceLogger, + lowestRetainedRank uint64, + collectors consensus.TimeoutCollectors[VoteT], +) (*TimeoutAggregator[VoteT], error) { + queuedTimeouts := make( + chan *models.TimeoutState[VoteT], + defaultTimeoutQueueCapacity, + ) + + aggregator := &TimeoutAggregator[VoteT]{ + tracer: tracer, + lowestRetainedRank: atomic.Uint64{}, + collectors: collectors, + queuedTimeoutsNotifier: make(chan struct{}, 1), + enteringRankNotifier: make(chan struct{}, 1), + queuedTimeouts: queuedTimeouts, + wg: sync.WaitGroup{}, + } + + aggregator.lowestRetainedRank.Store(lowestRetainedRank) + aggregator.wg.Add(defaultTimeoutAggregatorWorkers + 1) + + return aggregator, nil +} + +func (t *TimeoutAggregator[VoteT]) Start(ctx context.Context) error { + // manager for worker routines that process inbound events + for i := 0; i < defaultTimeoutAggregatorWorkers; i++ { + go t.queuedTimeoutsProcessingLoop(ctx) + } + + go t.enteringRankProcessingLoop(ctx) + + return nil +} + +// queuedTimeoutsProcessingLoop is the event loop which waits for notification +// about pending work and as soon as there is some it triggers processing. +func ( + t *TimeoutAggregator[VoteT], +) queuedTimeoutsProcessingLoop(ctx context.Context) { + defer t.wg.Done() + notifier := t.queuedTimeoutsNotifier + for { + select { + case <-ctx.Done(): + return + case <-notifier: + err := t.processQueuedTimeoutStates(ctx) + if err != nil { + return + } + } + } +} + +// processQueuedTimeoutStates sequentially processes items from `queuedTimeouts` +// until the queue returns 'empty'. Only when there are no more queued up +// TimeoutStates, this function call returns. No errors are expected during +// normal operations. +func (t *TimeoutAggregator[VoteT]) processQueuedTimeoutStates( + ctx context.Context, +) error { + for { + select { + case <-ctx.Done(): + return nil + case timeoutState, ok := <-t.queuedTimeouts: + if !ok { + // when there is no more messages in the queue, back to the loop to wait + // for the next incoming message to arrive. + return nil + } + + err := t.processQueuedTimeout(timeoutState) + + if err != nil { + return fmt.Errorf("could not process pending TO: %d: %w", + timeoutState.Rank, + err, + ) + } + + t.tracer.Trace("TimeoutState processed successfully") + } + } +} + +// processQueuedTimeout performs actual processing of queued timeouts, this +// method is called from multiple concurrent goroutines. No errors are expected +// during normal operation +func (t *TimeoutAggregator[VoteT]) processQueuedTimeout( + timeoutState *models.TimeoutState[VoteT], +) error { + // We create a timeout collector before validating the first TO, so processing + // an invalid TO will result in a collector being added, until the + // corresponding rank is pruned. + collector, _, err := t.collectors.GetOrCreateCollector(timeoutState.Rank) + if err != nil { + if errors.Is(err, models.ErrRankUnknown) { + t.tracer.Error("discarding TO for unknown rank", err) + return nil + } + return fmt.Errorf("could not get collector for rank %d: %w", + timeoutState.Rank, err) + } + + err = collector.AddTimeout(timeoutState) + if err != nil { + return fmt.Errorf("could not process TO for rank %d: %w", + timeoutState.Rank, err) + } + return nil +} + +// AddTimeout checks if TO is stale and appends TO to processing queue. +// The actual processing will be done asynchronously by the +// `TimeoutAggregator`'s internal worker routines. +func (t *TimeoutAggregator[VoteT]) AddTimeout( + timeoutState *models.TimeoutState[VoteT], +) { + // drop stale objects + if timeoutState.Rank < t.lowestRetainedRank.Load() { + t.tracer.Trace("drop stale timeouts") + return + } + + select { + case t.queuedTimeouts <- timeoutState: + t.queuedTimeoutsNotifier <- struct{}{} + default: + // processing pipeline `queuedTimeouts` is full + // It's ok to silently drop timeouts, because we are probably catching up. + t.tracer.Trace("no queue capacity, dropping timeout") + } +} + +// PruneUpToRank deletes all `TimeoutCollector`s _below_ to the given rank, as +// well as related indices. We only retain and process `TimeoutCollector`s, +// whose rank is equal or larger than `lowestRetainedRank`. If +// `lowestRetainedRank` is smaller than the previous value, the previous value +// is kept and the method call is a NoOp. +func (t *TimeoutAggregator[VoteT]) PruneUpToRank(lowestRetainedRank uint64) { + t.collectors.PruneUpToRank(lowestRetainedRank) +} + +// OnRankChange implements the `OnRankChange` callback from the +// `consensus.Consumer`. We notify the enteringRankProcessingLoop worker, which +// then prunes up to the active rank. CAUTION: the input to this callback is +// treated as trusted; precautions should be taken that messages from external +// nodes cannot be considered as inputs to this function +func (t *TimeoutAggregator[VoteT]) OnRankChange(oldRank, newRank uint64) { + if t.lowestRetainedRank.CompareAndSwap(oldRank, newRank) { + t.enteringRankNotifier <- struct{}{} + } +} + +// enteringRankProcessingLoop is a separate goroutine that performs processing +// of entering rank events +func (t *TimeoutAggregator[VoteT]) enteringRankProcessingLoop( + ctx context.Context, +) { + defer t.wg.Done() + notifier := t.enteringRankNotifier + for { + select { + case <-ctx.Done(): + return + case <-notifier: + t.PruneUpToRank(t.lowestRetainedRank.Load()) + } + } +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/timeoutaggregator/timeout_collectors.go b/consensus/timeoutaggregator/timeout_collectors.go new file mode 100644 index 0000000..f8395e6 --- /dev/null +++ b/consensus/timeoutaggregator/timeout_collectors.go @@ -0,0 +1,156 @@ +package timeoutaggregator + +import ( + "fmt" + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutCollectors implements management of multiple timeout collectors +// indexed by rank. Implements consensus.TimeoutCollectors interface. Creating a +// TimeoutCollector for a particular rank is lazy (instances are created on +// demand). This structure is concurrently safe. +type TimeoutCollectors[VoteT models.Unique] struct { + tracer consensus.TraceLogger + lock sync.RWMutex + lowestRetainedRank uint64 // lowest rank, for which we still retain a TimeoutCollector and process timeouts + newestRankCachedCollector uint64 // highest rank, for which we have created a TimeoutCollector + collectors map[uint64]consensus.TimeoutCollector[VoteT] // rank -> TimeoutCollector + collectorFactory consensus.TimeoutCollectorFactory[VoteT] // factor for creating collectors +} + +var _ consensus.TimeoutCollectors[*nilUnique] = (*TimeoutCollectors[*nilUnique])(nil) + +func NewTimeoutCollectors[VoteT models.Unique]( + tracer consensus.TraceLogger, + lowestRetainedRank uint64, + collectorFactory consensus.TimeoutCollectorFactory[VoteT], +) *TimeoutCollectors[VoteT] { + return &TimeoutCollectors[VoteT]{ + tracer: tracer, + lowestRetainedRank: lowestRetainedRank, + newestRankCachedCollector: lowestRetainedRank, + collectors: make(map[uint64]consensus.TimeoutCollector[VoteT]), + collectorFactory: collectorFactory, + } +} + +// GetOrCreateCollector retrieves the consensus.TimeoutCollector for the +// specified rank or creates one if none exists. +// - (collector, true, nil) if no collector can be found by the rank, and a +// new collector was created. +// - (collector, false, nil) if the collector can be found by the rank +// - (nil, false, error) if running into any exception creating the timeout +// collector state machine +// +// Expected error returns during normal operations: +// - models.BelowPrunedThresholdError if rank is below the pruning threshold +// - models.ErrRankUnknown if rank is not yet pruned but no epoch containing +// the given rank is known, this error +// +// can be returned from factory method. +func (t *TimeoutCollectors[VoteT]) GetOrCreateCollector(rank uint64) ( + consensus.TimeoutCollector[VoteT], + bool, + error, +) { + cachedCollector, hasCachedCollector, err := t.getCollector(rank) + if err != nil { + return nil, false, err + } + if hasCachedCollector { + return cachedCollector, false, nil + } + + collector, err := t.collectorFactory.Create(rank) + if err != nil { + return nil, false, fmt.Errorf( + "could not create timeout collector for rank %d: %w", + rank, + err, + ) + } + + // Initial check showed that there was no collector. However, it's possible + // that after the initial check but before acquiring the lock to add the + // newly-created collector, another goroutine already added the needed + // collector. Hence, check again after acquiring the lock: + t.lock.Lock() + clr, found := t.collectors[rank] + if found { + t.lock.Unlock() + return clr, false, nil + } + t.collectors[rank] = collector + if t.newestRankCachedCollector < rank { + t.newestRankCachedCollector = rank + } + t.lock.Unlock() + + t.tracer.Trace("timeout collector has been created") + return collector, true, nil +} + +// getCollector retrieves consensus.TimeoutCollector from local cache in +// concurrent safe way. Performs check for lowestRetainedRank. +// Expected error returns during normal operations: +// - models.BelowPrunedThresholdError - in case rank is lower than +// lowestRetainedRank +func (t *TimeoutCollectors[VoteT]) getCollector(rank uint64) ( + consensus.TimeoutCollector[VoteT], + bool, + error, +) { + t.lock.RLock() + defer t.lock.RUnlock() + if rank < t.lowestRetainedRank { + return nil, false, models.NewBelowPrunedThresholdErrorf( + "cannot retrieve collector for pruned rank %d (lowest retained rank %d)", + rank, + t.lowestRetainedRank, + ) + } + + clr, found := t.collectors[rank] + return clr, found, nil +} + +// PruneUpToRank prunes the timeout collectors with ranks _below_ the given +// value, i.e. we only retain and process whose rank is equal or larger than +// `lowestRetainedRank`. If `lowestRetainedRank` is smaller than the previous +// value, the previous value is kept and the method call is a NoOp. +func (t *TimeoutCollectors[VoteT]) PruneUpToRank(lowestRetainedRank uint64) { + t.lock.Lock() + if t.lowestRetainedRank >= lowestRetainedRank { + t.lock.Unlock() + return + } + sizeBefore := len(t.collectors) + if sizeBefore == 0 { + t.lowestRetainedRank = lowestRetainedRank + t.lock.Unlock() + return + } + + // to optimize the pruning of large rank-ranges, we compare: + // * the number of ranks for which we have collectors: len(t.collectors) + // * the number of ranks that need to be pruned: rank-t.lowestRetainedRank + // We iterate over the dimension which is smaller. + if uint64(sizeBefore) < lowestRetainedRank-t.lowestRetainedRank { + for w := range t.collectors { + if w < lowestRetainedRank { + delete(t.collectors, w) + } + } + } else { + for w := t.lowestRetainedRank; w < lowestRetainedRank; w++ { + delete(t.collectors, w) + } + } + t.lowestRetainedRank = lowestRetainedRank + t.lock.Unlock() + + t.tracer.Trace("pruned timeout collectors") +} diff --git a/consensus/timeoutcollector/aggregation.go b/consensus/timeoutcollector/aggregation.go new file mode 100644 index 0000000..9e3b7db --- /dev/null +++ b/consensus/timeoutcollector/aggregation.go @@ -0,0 +1,224 @@ +package timeoutcollector + +import ( + "fmt" + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/verification" +) + +// signerInfo holds information about a signer, its public key and weight +type signerInfo struct { + pk []byte + weight uint64 +} + +// sigInfo holds signature and high QC rank submitted by some signer +type sigInfo struct { + sig []byte + newestQCRank uint64 +} + +// TimeoutSignatureAggregator implements consensus.TimeoutSignatureAggregator. +// It performs timeout specific BLS aggregation over multiple distinct messages. +// We perform timeout signature aggregation for some concrete rank, utilizing +// the protocol specification that timeouts sign the message: +// hash(rank, newestQCRank), where newestQCRank can have different values +// for different replicas. +// Rank and the identities of all authorized replicas are specified when the +// TimeoutSignatureAggregator is instantiated. Each signer is allowed to sign at +// most once. Aggregation uses BLS scheme. Mitigation against rogue attacks is +// done using Proof Of Possession (PoP). Implementation is only safe under the +// assumption that all proofs of possession (PoP) of the public keys are valid. +// This module does not perform the PoPs validity checks, it assumes +// verification was done outside the module. Implementation is thread-safe. +type TimeoutSignatureAggregator struct { + lock sync.RWMutex + dsTag []byte + aggregator consensus.SignatureAggregator + idToInfo map[models.Identity]signerInfo // auxiliary map to lookup signer weight and public key (only gets updated by constructor) + idToSignature map[models.Identity]sigInfo // signatures indexed by the signer ID + totalWeight uint64 // total accumulated weight + rank uint64 // rank for which we are aggregating signatures +} + +var _ consensus.TimeoutSignatureAggregator = (*TimeoutSignatureAggregator)(nil) + +// NewTimeoutSignatureAggregator returns a multi message signature aggregator +// initialized with a predefined rank for which we aggregate signatures, list of +// identities, their respective public keys and a domain separation tag. The +// identities represent the list of all authorized signers. The constructor does +// not verify PoPs of input public keys, it assumes verification was done +// outside this module. +// The constructor errors if: +// - the list of identities is empty +// - if one of the keys is not a valid public key. +// +// A multi message sig aggregator is used for aggregating timeouts for a single +// rank only. A new instance should be used for each signature aggregation task +// in the protocol. +func NewTimeoutSignatureAggregator( + aggregator consensus.SignatureAggregator, + rank uint64, // rank for which we are aggregating signatures + ids []models.WeightedIdentity, // list of all authorized signers + dsTag []byte, // domain separation tag used by the signature +) (*TimeoutSignatureAggregator, error) { + if len(ids) == 0 { + return nil, fmt.Errorf( + "number of participants must be larger than 0, got %d", + len(ids), + ) + } + + // build the internal map for a faster look-up + idToInfo := make(map[models.Identity]signerInfo) + for _, id := range ids { + idToInfo[id.Identity()] = signerInfo{ + pk: id.PublicKey(), + weight: id.Weight(), + } + } + + return &TimeoutSignatureAggregator{ + aggregator: aggregator, + dsTag: dsTag, + idToInfo: idToInfo, + idToSignature: make(map[models.Identity]sigInfo), + rank: rank, + }, nil +} + +// VerifyAndAdd verifies the signature under the stored public keys and adds +// signature with corresponding newest QC rank to the internal set. Internal set +// and collected weight is modified iff the signer ID is not a duplicate and +// signature _is_ valid. The total weight of all collected signatures (excluding +// duplicates) is returned regardless of any returned error. +// Expected errors during normal operations: +// - models.InvalidSignerError if signerID is invalid (not a consensus +// participant) +// - models.DuplicatedSignerError if the signer has been already added +// - models.ErrInvalidSignature if signerID is valid but signature is +// cryptographically invalid +// +// The function is thread-safe. +func (a *TimeoutSignatureAggregator) VerifyAndAdd( + signerID models.Identity, + sig []byte, + newestQCRank uint64, +) (totalWeight uint64, exception error) { + info, ok := a.idToInfo[signerID] + if !ok { + return a.TotalWeight(), models.NewInvalidSignerErrorf( + "%v is not an authorized signer", + signerID, + ) + } + + // to avoid expensive signature verification we will proceed with double lock + // style check + if a.hasSignature(signerID) { + return a.TotalWeight(), models.NewDuplicatedSignerErrorf( + "signature from %v was already added", + signerID, + ) + } + + msg := verification.MakeTimeoutMessage(a.rank, newestQCRank) + valid := a.aggregator.VerifySignatureRaw(info.pk, sig, msg, a.dsTag) + if !valid { + return a.TotalWeight(), fmt.Errorf( + "invalid signature from %s: %w", + signerID, + models.ErrInvalidSignature, + ) + } + + a.lock.Lock() + defer a.lock.Unlock() + + if _, duplicate := a.idToSignature[signerID]; duplicate { + return a.totalWeight, models.NewDuplicatedSignerErrorf( + "signature from %v was already added", + signerID, + ) + } + + a.idToSignature[signerID] = sigInfo{ + sig: sig, + newestQCRank: newestQCRank, + } + a.totalWeight += info.weight + + return a.totalWeight, nil +} + +func (a *TimeoutSignatureAggregator) hasSignature( + signerID models.Identity, +) bool { + a.lock.RLock() + defer a.lock.RUnlock() + _, found := a.idToSignature[signerID] + return found +} + +// TotalWeight returns the total weight presented by the collected signatures. +// The function is thread-safe +func (a *TimeoutSignatureAggregator) TotalWeight() uint64 { + a.lock.RLock() + defer a.lock.RUnlock() + return a.totalWeight +} + +// Rank returns rank for which aggregation happens +// The function is thread-safe +func (a *TimeoutSignatureAggregator) Rank() uint64 { + return a.rank +} + +// Aggregate aggregates the signatures and returns the aggregated signature. +// The resulting aggregated signature is guaranteed to be valid, as all +// individual signatures are pre-validated before their addition. Expected +// errors during normal operations: +// - models.InsufficientSignaturesError if no signatures have been added yet +// +// This function is thread-safe +func (a *TimeoutSignatureAggregator) Aggregate() ( + []consensus.TimeoutSignerInfo, + models.AggregatedSignature, + error, +) { + a.lock.RLock() + defer a.lock.RUnlock() + + sharesNum := len(a.idToSignature) + signatures := make([][]byte, 0, sharesNum) + publicKeys := make([][]byte, 0, sharesNum) + signersData := make([]consensus.TimeoutSignerInfo, 0, sharesNum) + for id, info := range a.idToSignature { + publicKeys = append(publicKeys, a.idToInfo[id].pk) + signatures = append(signatures, info.sig) + signersData = append(signersData, consensus.TimeoutSignerInfo{ + NewestQCRank: info.newestQCRank, + Signer: id, + }) + } + + if sharesNum == 0 { + return nil, nil, models.NewInsufficientSignaturesErrorf( + "cannot aggregate an empty list of signatures", + ) + } + + aggSignature, err := a.aggregator.Aggregate(publicKeys, signatures) + if err != nil { + // any other error here is a symptom of an internal bug + return nil, nil, fmt.Errorf( + "unexpected internal error during BLS signature aggregation: %w", + err, + ) + } + + return signersData, aggSignature, nil +} diff --git a/consensus/timeoutcollector/factory.go b/consensus/timeoutcollector/factory.go new file mode 100644 index 0000000..085695b --- /dev/null +++ b/consensus/timeoutcollector/factory.go @@ -0,0 +1,166 @@ +package timeoutcollector + +import ( + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutCollectorFactory implements consensus.TimeoutCollectorFactory, it is +// responsible for creating timeout collector for given rank. +type TimeoutCollectorFactory[VoteT models.Unique] struct { + tracer consensus.TraceLogger + notifier consensus.TimeoutAggregationConsumer[VoteT] + processorFactory consensus.TimeoutProcessorFactory[VoteT] +} + +var _ consensus.TimeoutCollectorFactory[*nilUnique] = (*TimeoutCollectorFactory[*nilUnique])(nil) + +// NewTimeoutCollectorFactory creates new instance of TimeoutCollectorFactory. +// No error returns are expected during normal operations. +func NewTimeoutCollectorFactory[VoteT models.Unique]( + tracer consensus.TraceLogger, + notifier consensus.TimeoutAggregationConsumer[VoteT], + createProcessor consensus.TimeoutProcessorFactory[VoteT], +) *TimeoutCollectorFactory[VoteT] { + return &TimeoutCollectorFactory[VoteT]{ + tracer: tracer, + notifier: notifier, + processorFactory: createProcessor, + } +} + +// Create is a factory method to generate a TimeoutCollector for a given rank +// Expected error returns during normal operations: +// - models.ErrRankUnknown if rank is not yet pruned but no epoch containing +// the given rank is known +// +// All other errors should be treated as exceptions. +func (f *TimeoutCollectorFactory[VoteT]) Create(rank uint64) ( + consensus.TimeoutCollector[VoteT], + error, +) { + processor, err := f.processorFactory.Create(rank) + if err != nil { + return nil, fmt.Errorf( + "could not create TimeoutProcessor at rank %d: %w", + rank, + err, + ) + } + return NewTimeoutCollector(f.tracer, rank, f.notifier, processor), nil +} + +// TimeoutProcessorFactory implements consensus.TimeoutProcessorFactory, it is +// responsible for creating timeout processor for given rank. +type TimeoutProcessorFactory[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +] struct { + tracer consensus.TraceLogger + aggregator consensus.SignatureAggregator + committee consensus.Replicas + notifier consensus.TimeoutCollectorConsumer[VoteT] + validator consensus.Validator[StateT, VoteT] + domainSeparationTag []byte +} + +var _ consensus.TimeoutProcessorFactory[*nilUnique] = (*TimeoutProcessorFactory[*nilUnique, *nilUnique, *nilUnique])(nil) + +// NewTimeoutProcessorFactory creates new instance of TimeoutProcessorFactory. +// No error returns are expected during normal operations. +func NewTimeoutProcessorFactory[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +]( + tracer consensus.TraceLogger, + aggregator consensus.SignatureAggregator, + notifier consensus.TimeoutCollectorConsumer[VoteT], + committee consensus.Replicas, + validator consensus.Validator[StateT, VoteT], + domainSeparationTag []byte, +) *TimeoutProcessorFactory[StateT, VoteT, PeerIDT] { + return &TimeoutProcessorFactory[StateT, VoteT, PeerIDT]{ + tracer: tracer, + aggregator: aggregator, + committee: committee, + notifier: notifier, + validator: validator, + domainSeparationTag: domainSeparationTag, + } +} + +// Create is a factory method to generate a TimeoutProcessor for a given rank +// Expected error returns during normal operations: +// - models.ErrRankUnknown no epoch containing the given rank is known +// +// All other errors should be treated as exceptions. +func (f *TimeoutProcessorFactory[StateT, VoteT, PeerIDT]) Create(rank uint64) ( + consensus.TimeoutProcessor[VoteT], + error, +) { + allParticipants, err := f.committee.IdentitiesByRank(rank) + if err != nil { + return nil, fmt.Errorf("error retrieving consensus participants: %w", err) + } + + sigAggregator, err := NewTimeoutSignatureAggregator( + f.aggregator, + rank, + allParticipants, + f.domainSeparationTag, + ) + if err != nil { + return nil, fmt.Errorf( + "could not create TimeoutSignatureAggregator at rank %d: %w", + rank, + err, + ) + } + + return NewTimeoutProcessor[StateT, VoteT, PeerIDT]( + f.tracer, + f.committee, + f.validator, + sigAggregator, + f.notifier, + ) +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/timeoutcollector/timeout_cache.go b/consensus/timeoutcollector/timeout_cache.go new file mode 100644 index 0000000..9e1fa9e --- /dev/null +++ b/consensus/timeoutcollector/timeout_cache.go @@ -0,0 +1,122 @@ +package timeoutcollector + +import ( + "errors" + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +var ( + // ErrRepeatedTimeout is emitted, when we receive an identical timeout state + // for the same state from the same voter multiple times. This error does + // _not_ indicate equivocation. + ErrRepeatedTimeout = errors.New("duplicated timeout") + ErrTimeoutForIncompatibleRank = errors.New("timeout for incompatible rank") +) + +// TimeoutStatesCache maintains a _concurrency safe_ cache of timeouts for one +// particular rank. The cache memorizes the order in which the timeouts were +// received. Timeouts are de-duplicated based on the following rules: +// - For each voter (i.e. SignerID), we store the _first_ timeout t0. +// - For any subsequent timeout t, we check whether t equals t0. +// If this is the case, we consider the timeout a duplicate and drop it. +// If t and t0 have different contents, the voter is equivocating, and +// we return a models.DoubleTimeoutError. +type TimeoutStatesCache[VoteT models.Unique] struct { + lock sync.RWMutex + rank uint64 + timeouts map[models.Identity]*models.TimeoutState[VoteT] // signerID -> first timeout +} + +// NewTimeoutStatesCache instantiates a TimeoutStatesCache for the given rank +func NewTimeoutStatesCache[VoteT models.Unique]( + rank uint64, +) *TimeoutStatesCache[VoteT] { + return &TimeoutStatesCache[VoteT]{ + rank: rank, + timeouts: make(map[models.Identity]*models.TimeoutState[VoteT]), + } +} + +func (vc *TimeoutStatesCache[VoteT]) Rank() uint64 { return vc.rank } + +// AddTimeoutState stores a timeout in the cache. The following errors are +// expected during normal operations: +// - nil: if the timeout was successfully added +// - models.DoubleTimeoutError is returned if the replica is equivocating +// - RepeatedTimeoutErr is returned when adding an _identical_ timeout for the +// same rank from the same voter multiple times. +// - TimeoutForIncompatibleRankError is returned if the timeout is for a +// different rank. +// +// When AddTimeoutState returns an error, the timeout is _not_ stored. +func (vc *TimeoutStatesCache[VoteT]) AddTimeoutState( + timeout *models.TimeoutState[VoteT], +) error { + if timeout.Rank != vc.rank { + return ErrTimeoutForIncompatibleRank + } + vc.lock.Lock() + + // De-duplicated timeouts based on the following rules: + // * For each voter (i.e. SignerID), we store the _first_ t0. + // * For any subsequent timeout t, we check whether t equals t0. + // If this is the case, we consider the timeout a duplicate and drop it. + // If t and t0 have different contents, the voter is equivocating, and + // we return a models.DoubleTimeoutError. + firstTimeout, exists := vc.timeouts[(*timeout.Vote).Identity()] + if exists { + vc.lock.Unlock() + if !firstTimeout.Equals(timeout) { + return models.NewDoubleTimeoutErrorf( + firstTimeout, + timeout, + "detected timeout equivocation by replica %x at rank: %d", + (*timeout.Vote).Identity(), + vc.rank, + ) + } + return ErrRepeatedTimeout + } + vc.timeouts[(*timeout.Vote).Identity()] = timeout + vc.lock.Unlock() + + return nil +} + +// GetTimeoutState returns the stored timeout for the given `signerID`. Returns: +// - (timeout, true) if a timeout state from signerID is known +// - (nil, false) no timeout state from signerID is known +func (vc *TimeoutStatesCache[VoteT]) GetTimeoutState( + signerID models.Identity, +) (*models.TimeoutState[VoteT], bool) { + vc.lock.RLock() + timeout, exists := vc.timeouts[signerID] // if signerID is unknown, its `Vote` pointer is nil + vc.lock.RUnlock() + return timeout, exists +} + +// Size returns the number of cached timeout states +func (vc *TimeoutStatesCache[VoteT]) Size() int { + vc.lock.RLock() + s := len(vc.timeouts) + vc.lock.RUnlock() + return s +} + +// All returns all currently cached timeout states. Concurrency safe. +func (vc *TimeoutStatesCache[VoteT]) All() []*models.TimeoutState[VoteT] { + vc.lock.RLock() + defer vc.lock.RUnlock() + return vc.all() +} + +// all returns all currently cached timeout states. NOT concurrency safe +func (vc *TimeoutStatesCache[VoteT]) all() []*models.TimeoutState[VoteT] { + timeoutStates := make([]*models.TimeoutState[VoteT], 0, len(vc.timeouts)) + for _, t := range vc.timeouts { + timeoutStates = append(timeoutStates, t) + } + return timeoutStates +} diff --git a/consensus/timeoutcollector/timeout_collector.go b/consensus/timeoutcollector/timeout_collector.go new file mode 100644 index 0000000..3d3b367 --- /dev/null +++ b/consensus/timeoutcollector/timeout_collector.go @@ -0,0 +1,155 @@ +package timeoutcollector + +import ( + "errors" + "fmt" + "sync/atomic" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// TimeoutCollector implements logic for collecting timeout states. Performs +// deduplication, caching and processing of timeouts, delegating those tasks to +// underlying modules. Emits notifications about verified QCs and TCs, if their +// rank is newer than any QC or TC previously known to the TimeoutCollector. +// This module is safe to use in concurrent environment. +type TimeoutCollector[VoteT models.Unique] struct { + tracer consensus.TraceLogger + timeoutsCache *TimeoutStatesCache[VoteT] // cache for tracking double timeout and timeout equivocation + notifier consensus.TimeoutAggregationConsumer[VoteT] + processor consensus.TimeoutProcessor[VoteT] + newestReportedQC atomic.Uint64 // rank of newest QC that was reported + newestReportedTC atomic.Uint64 // rank of newest TC that was reported +} + +var _ consensus.TimeoutCollector[*nilUnique] = (*TimeoutCollector[*nilUnique])(nil) + +// NewTimeoutCollector creates new instance of TimeoutCollector +func NewTimeoutCollector[VoteT models.Unique]( + tracer consensus.TraceLogger, + rank uint64, + notifier consensus.TimeoutAggregationConsumer[VoteT], + processor consensus.TimeoutProcessor[VoteT], +) *TimeoutCollector[VoteT] { + tc := &TimeoutCollector[VoteT]{ + tracer: tracer, + notifier: notifier, + timeoutsCache: NewTimeoutStatesCache[VoteT](rank), + processor: processor, + newestReportedQC: atomic.Uint64{}, + newestReportedTC: atomic.Uint64{}, + } + tc.newestReportedQC.Store(0) + tc.newestReportedTC.Store(0) + return tc +} + +// AddTimeout adds a Timeout State to the collector. When TSs from +// strictly more than 1/3 of consensus participants (measured by weight) were +// collected, the callback for partial TC will be triggered. After collecting +// TSs from a supermajority, a TC will be created and passed to the EventLoop. +// Expected error returns during normal operations: +// - timeoutcollector.ErrTimeoutForIncompatibleRank - submitted timeout for +// incompatible rank +// +// All other exceptions are symptoms of potential state corruption. +func (c *TimeoutCollector[VoteT]) AddTimeout( + timeout *models.TimeoutState[VoteT], +) error { + // cache timeout + err := c.timeoutsCache.AddTimeoutState(timeout) + if err != nil { + if errors.Is(err, ErrRepeatedTimeout) { + return nil + } + doubleTimeoutErr, isDoubleTimeoutErr := + models.AsDoubleTimeoutError[VoteT](err) + if isDoubleTimeoutErr { + c.notifier.OnDoubleTimeoutDetected( + doubleTimeoutErr.FirstTimeout, + doubleTimeoutErr.ConflictingTimeout, + ) + return nil + } + return fmt.Errorf("internal error adding timeout to cache: %d: %w", + timeout.Rank, + err, + ) + } + + err = c.processTimeout(timeout) + if err != nil { + return fmt.Errorf("internal error processing TO: %d: %w", + timeout.Rank, + err, + ) + } + return nil +} + +// processTimeout delegates TO processing to TimeoutProcessor, handles sentinel +// errors expected errors are handled and reported to notifier. Notifies +// listeners about validates QCs and TCs. No errors are expected during normal +// flow of operations. +func (c *TimeoutCollector[VoteT]) processTimeout( + timeout *models.TimeoutState[VoteT], +) error { + err := c.processor.Process(timeout) + if err != nil { + if invalidTimeoutErr, ok := models.AsInvalidTimeoutError[VoteT](err); ok { + c.notifier.OnInvalidTimeoutDetected(*invalidTimeoutErr) + return nil + } + return fmt.Errorf("internal error while processing timeout: %w", err) + } + + // TODO: consider moving OnTimeoutProcessed to TimeoutAggregationConsumer, + // need to fix telemetry for this. + c.notifier.OnTimeoutProcessed(timeout) + + // In the following, we emit notifications about new QCs, if their rank is + // newer than any QC previously known to the TimeoutCollector. Note that our + // implementation only provides weak ordering: + // * Over larger time scales, the emitted events are for statistically + // increasing ranks. + // * However, on short time scales there are _no_ monotonicity guarantees + // w.r.t. the ranks. + // Explanation: + // While only QCs with strict monotonicly increasing ranks pass the + // `if c.newestReportedQC.Set(timeout.NewestQC.Rank)` statement, we emit the + // notification in a separate step. Therefore, emitting the notifications is + // subject to races, where on very short time-scales the notifications can be + // out of order. Nevertheless, we note that notifications are only created for + // QCs that are strictly newer than any other known QC at the time we check + // via the `if ... Set(..)` statement. Thereby, we implement the desired + // filtering behaviour, i.e. that the recipient of the notifications is not + // spammed by old (or repeated) QCs. Reasoning for this approach: + // The current implementation is completely lock-free without noteworthy risk + // of congestion. For the recipient of the notifications, the weak ordering is + // of no concern, because it anyway is only interested in the newest QC. + // Time-localized disorder is irrelevant, because newer QCs that would arrive + // later in a strongly ordered system can only arrive earlier in our weakly + // ordered implementation. Hence, if anything, the recipient receives the + // desired information _earlier_ but not later. + if c.newestReportedQC.Load() < timeout.LatestQuorumCertificate.GetRank() { + c.newestReportedQC.Store(timeout.LatestQuorumCertificate.GetRank()) + c.notifier.OnNewQuorumCertificateDiscovered(timeout.LatestQuorumCertificate) + } + // Same explanation for weak ordering of QCs also applies to TCs. + if timeout.PriorRankTimeoutCertificate != nil { + if c.newestReportedTC.Load() < timeout.PriorRankTimeoutCertificate.GetRank() { + c.newestReportedTC.Store(timeout.PriorRankTimeoutCertificate.GetRank()) + c.notifier.OnNewTimeoutCertificateDiscovered( + timeout.PriorRankTimeoutCertificate, + ) + } + } + + return nil +} + +// Rank returns rank which is associated with this timeout collector +func (c *TimeoutCollector[VoteT]) Rank() uint64 { + return c.timeoutsCache.Rank() +} diff --git a/consensus/timeoutcollector/timeout_processor.go b/consensus/timeoutcollector/timeout_processor.go new file mode 100644 index 0000000..cb4da65 --- /dev/null +++ b/consensus/timeoutcollector/timeout_processor.go @@ -0,0 +1,410 @@ +package timeoutcollector + +import ( + "context" + "errors" + "fmt" + + "go.uber.org/atomic" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/tracker" +) + +// accumulatedWeightTracker tracks one-time event of reaching required weight +// Uses atomic flag to guarantee concurrency safety. +type accumulatedWeightTracker struct { + minRequiredWeight uint64 + done atomic.Bool +} + +func (t *accumulatedWeightTracker) Done() bool { + return t.done.Load() +} + +// Track returns true if `weight` reaches or exceeds `minRequiredWeight` for the +// _first time_. All subsequent calls of `Track` (with any value) return false. +func (t *accumulatedWeightTracker) Track(weight uint64) bool { + if weight < t.minRequiredWeight { + return false + } + return t.done.CompareAndSwap(false, true) +} + +// TimeoutProcessor implements the consensus.TimeoutProcessor interface. It +// processes timeout states broadcast by other replicas of the consensus +// committee. TimeoutProcessor collects TSs for one rank, eventually when enough +// timeout states are contributed TimeoutProcessor will create a timeout +// certificate which can be used to advance round. Concurrency safe. +type TimeoutProcessor[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +] struct { + tracer consensus.TraceLogger + rank uint64 + validator consensus.Validator[StateT, VoteT] + committee consensus.Replicas + sigAggregator consensus.TimeoutSignatureAggregator + notifier consensus.TimeoutCollectorConsumer[VoteT] + voting consensus.VotingProvider[StateT, VoteT, PeerIDT] + partialTCTracker accumulatedWeightTracker + tcTracker accumulatedWeightTracker + newestQCTracker *tracker.NewestQCTracker +} + +var _ consensus.TimeoutProcessor[*nilUnique] = (*TimeoutProcessor[*nilUnique, *nilUnique, *nilUnique])(nil) + +// NewTimeoutProcessor creates new instance of TimeoutProcessor +// Returns the following expected errors for invalid inputs: +// - models.ErrRankUnknown if no epoch containing the given rank is known +// +// All other errors should be treated as exceptions. +func NewTimeoutProcessor[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +]( + tracer consensus.TraceLogger, + committee consensus.Replicas, + validator consensus.Validator[StateT, VoteT], + sigAggregator consensus.TimeoutSignatureAggregator, + notifier consensus.TimeoutCollectorConsumer[VoteT], +) (*TimeoutProcessor[StateT, VoteT, PeerIDT], error) { + rank := sigAggregator.Rank() + qcThreshold, err := committee.QuorumThresholdForRank(rank) + if err != nil { + return nil, fmt.Errorf( + "could not retrieve QC weight threshold for rank %d: %w", + rank, + err, + ) + } + timeoutThreshold, err := committee.TimeoutThresholdForRank(rank) + if err != nil { + return nil, fmt.Errorf( + "could not retrieve timeout weight threshold for rank %d: %w", + rank, + err, + ) + } + return &TimeoutProcessor[StateT, VoteT, PeerIDT]{ + tracer: tracer, + rank: rank, + committee: committee, + validator: validator, + notifier: notifier, + partialTCTracker: accumulatedWeightTracker{ + minRequiredWeight: timeoutThreshold, + done: *atomic.NewBool(false), + }, + tcTracker: accumulatedWeightTracker{ + minRequiredWeight: qcThreshold, + done: *atomic.NewBool(false), + }, + sigAggregator: sigAggregator, + newestQCTracker: tracker.NewNewestQCTracker(), + }, nil +} + +// Process performs processing of timeout state in concurrent safe way. This +// function is implemented to be called by multiple goroutines at the same time. +// Design of this function is event driven, as soon as we collect enough weight +// to create a TC or a partial TC we will immediately do so and submit it +// via callback for further processing. +// Expected error returns during normal operations: +// - ErrTimeoutForIncompatibleRank - submitted timeout for incompatible rank +// - models.InvalidTimeoutError - submitted invalid timeout(invalid structure +// or invalid signature) +// - models.DuplicatedSignerError if a timeout from the same signer was +// previously already added. It does _not necessarily_ imply that the +// timeout is invalid or the sender is equivocating. +// +// All other errors should be treated as exceptions. +func (p *TimeoutProcessor[StateT, VoteT, PeerIDT]) Process( + timeout *models.TimeoutState[VoteT], +) error { + if p.rank != timeout.Rank { + return fmt.Errorf( + "received incompatible timeout, expected %d got %d: %w", + p.rank, + timeout.Rank, + ErrTimeoutForIncompatibleRank, + ) + } + + if p.tcTracker.Done() { + return nil + } + + err := p.validateTimeout(timeout) + if err != nil { + return fmt.Errorf("validating timeout failed: %w", err) + } + if p.tcTracker.Done() { + return nil + } + + // CAUTION: for correctness it is critical that we update the + // `newestQCTracker` first, _before_ we add the TO's signature to + // `sigAggregator`. Reasoning: + // * For a valid TC, we require that the TC includes a QC with + // rank ≥ max{TC.LatestQuorumCertificateRanks}. + // * The `LatestQuorumCertificateRanks` is maintained by `sigAggregator`. + // * Hence, for any rank `v ∈ LatestQuorumCertificateRanks` that + // `sigAggregator` knows, a QC with equal or larger rank is known to + // `newestQCTracker`. This is guaranteed if and only if `newestQCTracker` + // is updated first. + p.newestQCTracker.Track(&timeout.LatestQuorumCertificate) + + totalWeight, err := p.sigAggregator.VerifyAndAdd( + (*timeout.Vote).Source(), + (*timeout.Vote).GetSignature(), + timeout.LatestQuorumCertificate.GetRank(), + ) + if err != nil { + if models.IsInvalidSignerError(err) { + return models.NewInvalidTimeoutErrorf( + timeout, + "invalid signer for timeout: %w", + err, + ) + } + if errors.Is(err, models.ErrInvalidSignature) { + return models.NewInvalidTimeoutErrorf( + timeout, + "timeout is from valid signer but has cryptographically invalid signature: %w", + err, + ) + } + // models.DuplicatedSignerError is an expected error and just bubbled up the + // call stack. It does _not necessarily_ imply that the timeout is invalid + // or the sender is equivocating. + return fmt.Errorf("adding signature to aggregator failed: %w", err) + } + p.tracer.Trace(fmt.Sprintf( + "processed timeout, total weight=(%d), required=(%d)", + totalWeight, + p.tcTracker.minRequiredWeight, + )) + + if p.partialTCTracker.Track(totalWeight) { + qc := p.newestQCTracker.NewestQC() + p.notifier.OnPartialTimeoutCertificateCreated( + p.rank, + *qc, + timeout.PriorRankTimeoutCertificate, + ) + } + + // Checking of conditions for building TC are satisfied when willBuildTC is + // true. At this point, we have enough signatures to build a TC. Another + // routine might just be at this point. To avoid duplicate work, Track returns + // true only once. + willBuildTC := p.tcTracker.Track(totalWeight) + if !willBuildTC { + // either we do not have enough timeouts to build a TC, or another thread + // has already passed this gate and created a TC + return nil + } + + tc, err := p.buildTC() + if err != nil { + return fmt.Errorf("internal error constructing TC: %w", err) + } + p.notifier.OnTimeoutCertificateConstructedFromTimeouts(*tc) + + return nil +} + +// validateTimeout performs validation of timeout state, verifies if timeout is +// correctly structured and included QC and TC is correctly structured and +// signed. ATTENTION: this function does _not_ check whether the TO's `SignerID` +// is an authorized node nor if the signature is valid. These checks happen in +// signature aggregator. Expected error returns during normal operations: +// * models.InvalidTimeoutError - submitted invalid timeout +// All other errors should be treated as exceptions. +func (p *TimeoutProcessor[StateT, VoteT, PeerIDT]) validateTimeout( + timeout *models.TimeoutState[VoteT], +) error { + // 1. check if it's correctly structured + // (a) Every TO must contain a QC + if timeout.LatestQuorumCertificate == nil { + return models.NewInvalidTimeoutErrorf(timeout, "TimeoutState without QC is invalid") + } + + if timeout.Rank <= timeout.LatestQuorumCertificate.GetRank() { + return models.NewInvalidTimeoutErrorf( + timeout, + "TO's QC %d cannot be newer than the TO's rank %d", + timeout.LatestQuorumCertificate.GetRank(), + timeout.Rank, + ) + } + + // (b) If a TC is included, the TC must be for the past round, no matter + // whether a QC for the last round is also included. In some edge cases, a + // node might observe _both_ QC and TC for the previous round, in which + // case it can include both. + if timeout.PriorRankTimeoutCertificate != nil { + if timeout.Rank != timeout.PriorRankTimeoutCertificate.GetRank()+1 { + return models.NewInvalidTimeoutErrorf( + timeout, + "invalid TC for non-previous rank, expected rank %d, got rank %d", + timeout.Rank-1, + timeout.PriorRankTimeoutCertificate.GetRank(), + ) + } + if timeout.LatestQuorumCertificate.GetRank() < + timeout.PriorRankTimeoutCertificate.GetLatestQuorumCert().GetRank() { + return models.NewInvalidTimeoutErrorf( + timeout, + "timeout.LatestQuorumCertificate is older (rank=%d) than the QC in timeout.PriorRankTimeoutCertificate (rank=%d)", + timeout.LatestQuorumCertificate.GetRank(), + timeout.PriorRankTimeoutCertificate.GetLatestQuorumCert().GetRank(), + ) + } + } + // (c) The TO must contain a proof that sender legitimately entered + // timeout.Rank. Transitioning to round timeout.Rank is possible either by + // observing a QC or a TC for the previous round. If no QC is included, we + // require a TC to be present, which by check (1b) must be for the + // previous round. + lastRankSuccessful := timeout.Rank == + timeout.LatestQuorumCertificate.GetRank()+1 + if !lastRankSuccessful { + // The TO's sender did _not_ observe a QC for round timeout.Rank-1. Hence, + // it should include a TC for the previous round. Otherwise, the TO is + // invalid. + if timeout.PriorRankTimeoutCertificate == nil { + return models.NewInvalidTimeoutErrorf(timeout, "timeout must include TC") + } + } + + // 2. Check if QC is valid + err := p.validator.ValidateQuorumCertificate(timeout.LatestQuorumCertificate) + if err != nil { + if models.IsInvalidQuorumCertificateError(err) { + return models.NewInvalidTimeoutErrorf( + timeout, + "included QC is invalid: %w", + err, + ) + } + if errors.Is(err, models.ErrRankUnknown) { + // We require each replica to be bootstrapped with a QC pointing to a + // finalized state. Therefore, we should know the Epoch for any QC.Rank + // and TC.Rank we encounter. Receiving a `models.ErrRankUnknown` is + // conceptually impossible, i.e. a symptom of an internal bug or invalid + // bootstrapping information. + return fmt.Errorf( + "no Epoch information availalbe for QC that was included in TO; symptom of internal bug or invalid bootstrapping information: %s", + err.Error(), + ) + } + return fmt.Errorf("unexpected error when validating QC: %w", err) + } + + // 3. If TC is included, it must be valid + if timeout.PriorRankTimeoutCertificate != nil { + err = p.validator.ValidateTimeoutCertificate( + &timeout.PriorRankTimeoutCertificate, + ) + if err != nil { + if models.IsInvalidTimeoutCertificateError(err) { + return models.NewInvalidTimeoutErrorf( + timeout, + "included TC is invalid: %w", + err, + ) + } + if errors.Is(err, models.ErrRankUnknown) { + // We require each replica to be bootstrapped with a QC pointing to a + // finalized state. Therefore, we should know the Epoch for any QC.Rank + // and TC.Rank we encounter. Receiving a `models.ErrRankUnknown` is + // conceptually impossible, i.e. a symptom of an internal bug or invalid + // bootstrapping information. + return fmt.Errorf( + "no Epoch information availalbe for TC that was included in TO; symptom of internal bug or invalid bootstrapping information: %s", + err.Error(), + ) + } + return fmt.Errorf("unexpected error when validating TC: %w", err) + } + } + return nil + +} + +// buildTC performs aggregation of signatures when we have collected enough +// weight for building TC. This function is run only once by single worker. +// Any error should be treated as exception. +func (p *TimeoutProcessor[StateT, VoteT, PeerIDT]) buildTC() ( + *models.TimeoutCertificate, + error, +) { + signersData, aggregatedSig, err := p.sigAggregator.Aggregate() + if err != nil { + return nil, fmt.Errorf( + "could not aggregate multi message signature: %w", + err, + ) + } + + newestQCRanks := make([]uint64, 0, len(signersData)) + for _, data := range signersData { + newestQCRanks = append(newestQCRanks, data.NewestQCRank) + } + + // Note that `newestQC` can have a larger rank than any of the ranks included + // in `newestQCRanks`. This is because for a TO currently being processes + // following two operations are executed in separate steps: + // * updating the `newestQCTracker` with the QC from the TO + // * adding the TO's signature to `sigAggregator` + // Therefore, races are possible, where the `newestQCTracker` already knows of + // a QC with larger rank than the data stored in `sigAggregator`. + newestQC := p.newestQCTracker.NewestQC() + + tc, err := p.voting.FinalizeTimeout( + context.TODO(), + (*newestQC).GetFilter(), + p.rank, + newestQCRanks, + aggregatedSig, + ) + if err != nil { + return nil, fmt.Errorf("could not construct timeout certificate: %w", err) + } + + return &tc, nil +} + +// signerIndicesFromIdentities encodes identities into signer indices. +// Any error should be treated as exception. +func (p *TimeoutProcessor[StateT, VoteT, PeerIDT]) signerIndicesFromIdentities( + signerIDs []models.WeightedIdentity, +) ([]byte, error) { + allIdentities, err := p.committee.IdentitiesByRank(p.rank) + if err != nil { + return nil, fmt.Errorf( + "could not retrieve identities for rank %d: %w", + p.rank, + err, + ) + } + + signerSet := map[models.Identity]struct{}{} + for _, signerID := range signerIDs { + signerSet[signerID.Identity()] = struct{}{} + } + + signerIndices := make([]byte, len(allIdentities)+7/8) + for i, member := range allIdentities { + if _, ok := signerSet[member.Identity()]; ok { + signerIndices[i/8] |= 1 << i % 8 + } + } + + return signerIndices, nil +} diff --git a/consensus/tracker/tracker.go b/consensus/tracker/tracker.go new file mode 100644 index 0000000..1330b7d --- /dev/null +++ b/consensus/tracker/tracker.go @@ -0,0 +1,175 @@ +package tracker + +import ( + "unsafe" + + "go.uber.org/atomic" + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// NewestQCTracker is a helper structure which keeps track of the newest QC +// (by rank) in concurrency safe way. +type NewestQCTracker struct { + newestQC *atomic.UnsafePointer +} + +func NewNewestQCTracker() *NewestQCTracker { + tracker := &NewestQCTracker{ + newestQC: atomic.NewUnsafePointer(unsafe.Pointer(nil)), + } + return tracker +} + +// Track updates local state of NewestQC if the provided instance is newer +// (by rank). Concurrency safe +func (t *NewestQCTracker) Track(qc *models.QuorumCertificate) bool { + // to record the newest value that we have ever seen we need to use loop + // with CAS atomic operation to make sure that we always write the latest + // value in case of shared access to updated value. + for { + // take a snapshot + newestQC := t.NewestQC() + // verify that our update makes sense + if newestQC != nil && (*newestQC).GetRank() >= (*qc).GetRank() { + return false + } + // attempt to install new value, repeat in case of shared update. + if t.newestQC.CompareAndSwap(unsafe.Pointer(newestQC), unsafe.Pointer(qc)) { + return true + } + } +} + +// NewestQC returns the newest QC(by rank) tracked. +// Concurrency safe. +func (t *NewestQCTracker) NewestQC() *models.QuorumCertificate { + return (*models.QuorumCertificate)(t.newestQC.Load()) +} + +// NewestTCTracker is a helper structure which keeps track of the newest TC (by +// rank) in concurrency safe way. +type NewestTCTracker struct { + newestTC *atomic.UnsafePointer +} + +func NewNewestTCTracker() *NewestTCTracker { + tracker := &NewestTCTracker{ + newestTC: atomic.NewUnsafePointer(unsafe.Pointer(nil)), + } + return tracker +} + +// Track updates local state of NewestTC if the provided instance is newer (by +// rank). Concurrency safe. +func (t *NewestTCTracker) Track(tc *models.TimeoutCertificate) bool { + // to record the newest value that we have ever seen we need to use loop + // with CAS atomic operation to make sure that we always write the latest + // value in case of shared access to updated value. + for { + // take a snapshot + newestTC := t.NewestTC() + // verify that our update makes sense + if newestTC != nil && (*newestTC).GetRank() >= (*tc).GetRank() { + return false + } + // attempt to install new value, repeat in case of shared update. + if t.newestTC.CompareAndSwap(unsafe.Pointer(newestTC), unsafe.Pointer(tc)) { + return true + } + } +} + +// NewestTC returns the newest TC(by rank) tracked. +// Concurrency safe. +func (t *NewestTCTracker) NewestTC() *models.TimeoutCertificate { + return (*models.TimeoutCertificate)(t.newestTC.Load()) +} + +// NewestStateTracker is a helper structure which keeps track of the newest +// state (by rank) in concurrency safe way. +type NewestStateTracker[StateT models.Unique] struct { + newestState *atomic.UnsafePointer +} + +func NewNewestStateTracker[StateT models.Unique]() *NewestStateTracker[StateT] { + tracker := &NewestStateTracker[StateT]{ + newestState: atomic.NewUnsafePointer(unsafe.Pointer(nil)), + } + return tracker +} + +// Track updates local state of newestState if the provided instance is newer +// (by rank). Concurrency safe. +func (t *NewestStateTracker[StateT]) Track(state *models.State[StateT]) bool { + // to record the newest value that we have ever seen we need to use loop + // with CAS atomic operation to make sure that we always write the latest + // value in case of shared access to updated value. + for { + // take a snapshot + newestState := t.NewestState() + // verify that our update makes sense + if newestState != nil && newestState.Rank >= state.Rank { + return false + } + // attempt to install new value, repeat in case of shared update. + if t.newestState.CompareAndSwap( + unsafe.Pointer(newestState), + unsafe.Pointer(state), + ) { + return true + } + } +} + +// NewestState returns the newest state (by rank) tracked. +// Concurrency safe. +func (t *NewestStateTracker[StateT]) NewestState() *models.State[StateT] { + return (*models.State[StateT])(t.newestState.Load()) +} + +// NewestPartialTcTracker tracks the newest partial TC (by rank) in a +// concurrency safe way. +type NewestPartialTcTracker struct { + newestPartialTc *atomic.UnsafePointer +} + +func NewNewestPartialTcTracker() *NewestPartialTcTracker { + tracker := &NewestPartialTcTracker{ + newestPartialTc: atomic.NewUnsafePointer(unsafe.Pointer(nil)), + } + return tracker +} + +// Track updates local state of newestPartialTc if the provided instance is +// newer (by rank). Concurrency safe. +func (t *NewestPartialTcTracker) Track( + partialTc *consensus.PartialTimeoutCertificateCreated, +) bool { + // To record the newest value that we have ever seen, we need to use loop + // with CAS atomic operation to make sure that we always write the latest + // value in case of shared access to updated value. + for { + // take a snapshot + newestPartialTc := t.NewestPartialTc() + // verify that our partial TC is from a newer rank + if newestPartialTc != nil && newestPartialTc.Rank >= partialTc.Rank { + return false + } + // attempt to install new value, repeat in case of shared update. + if t.newestPartialTc.CompareAndSwap( + unsafe.Pointer(newestPartialTc), + unsafe.Pointer(partialTc), + ) { + return true + } + } +} + +// NewestPartialTc returns the newest partial TC (by rank) tracked. +// Concurrency safe. +func ( + t *NewestPartialTcTracker, +) NewestPartialTc() *consensus.PartialTimeoutCertificateCreated { + return (*consensus.PartialTimeoutCertificateCreated)(t.newestPartialTc.Load()) +} diff --git a/consensus/validator/validator.go b/consensus/validator/validator.go new file mode 100644 index 0000000..565ec17 --- /dev/null +++ b/consensus/validator/validator.go @@ -0,0 +1,560 @@ +package validator + +import ( + "errors" + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Validator is responsible for validating QC, State and Vote +type Validator[StateT models.Unique, VoteT models.Unique] struct { + committee consensus.Replicas + verifier consensus.Verifier[VoteT] +} + +var _ consensus.Validator[*nilUnique, *nilUnique] = (*Validator[*nilUnique, *nilUnique])(nil) + +// New creates a new Validator instance +func NewValidator[StateT models.Unique, VoteT models.Unique]( + committee consensus.Replicas, + verifier consensus.Verifier[VoteT], +) *Validator[StateT, VoteT] { + return &Validator[StateT, VoteT]{ + committee: committee, + verifier: verifier, + } +} + +// ValidateTimeoutCertificate validates the TimeoutCertificate `TC`. +// During normal operations, the following error returns are expected: +// - models.InvalidTCError if the TC is invalid +// - models.ErrRankUnknown if the TC refers unknown epoch +// +// Any other error should be treated as exception +func (v *Validator[StateT, VoteT]) ValidateTimeoutCertificate( + tc models.TimeoutCertificate, +) error { + newestQC := tc.GetLatestQuorumCert() + if newestQC == nil { + return newInvalidTimeoutCertificateError( + tc, + fmt.Errorf("TC must include a QC but found nil"), + ) + } + + // The TC's rank cannot be smaller than the rank of the QC it contains. + // Note: we specifically allow for the TC to have the same rank as the highest + // QC. This is useful as a fallback, because it allows replicas other than the + // designated leader to also collect votes and generate a QC. + if tc.GetRank() < newestQC.GetRank() { + return newInvalidTimeoutCertificateError( + tc, + fmt.Errorf("TC's QC cannot be newer than the TC's rank"), + ) + } + + // 1. Check if there is super-majority of votes + allParticipants, err := v.committee.IdentitiesByRank(tc.GetRank()) + if err != nil { + return fmt.Errorf( + "could not get consensus participants at rank %d: %w", + tc.GetRank(), + err, + ) + } + + signerIDs := []models.WeightedIdentity{} + sigIndices := tc.GetAggregatedSignature().GetBitmask() + totalWeight := uint64(0) + for i, member := range allParticipants { + if sigIndices[i/8]>>i%8&1 == 1 { + signerIDs = append(signerIDs, member) + totalWeight += member.Weight() + } + } + + // determine whether signers reach minimally required weight threshold for + // consensus + threshold, err := v.committee.QuorumThresholdForRank(tc.GetRank()) + if err != nil { + return fmt.Errorf( + "could not get weight threshold for rank %d: %w", + tc.GetRank(), + err, + ) + } + + if totalWeight < threshold { + return newInvalidTimeoutCertificateError(tc, fmt.Errorf( + "tc signers have insufficient weight of %d (required=%d)", + totalWeight, + threshold, + )) + } + + // Verify multi-message BLS sig of TC, by far the most expensive check + err = v.verifier.VerifyTimeoutCertificate(tc) + if err != nil { + // Considerations about other errors that `VerifyTC` could return: + // * models.InsufficientSignaturesError: we previously checked the total + // weight of all signers meets the supermajority threshold, which is a + // _positive_ number. Hence, there must be at least one signer. Hence, + // receiving this error would be a symptom of a fatal internal bug. + switch { + case models.IsInvalidFormatError(err): + return newInvalidTimeoutCertificateError( + tc, + fmt.Errorf("TC's signature data has an invalid structure: %w", err), + ) + case errors.Is(err, models.ErrInvalidSignature): + return newInvalidTimeoutCertificateError( + tc, + fmt.Errorf("TC contains invalid signature(s): %w", err), + ) + default: + return fmt.Errorf( + "cannot verify tc's aggregated signature (tc.Rank: %d): %w", + tc.GetRank(), + err, + ) + } + } + + // verifying that tc.NewestQC is the QC with the highest rank. + // Note: A byzantine TC could include `nil` for tc.NewestQCRanks, in which + // case `tc.NewestQCRanks[0]` would panic. Though, per API specification + // `verifier.VerifyTC(…)` should return a `models.InvalidFormatError` if + // `signers` and `tc.NewestQCRanks` have different length. Hence, the + // following code is safe only if it is executed + // 1. _after_ checking the quorum threshold (thereby we guarantee that + // `signers` is not empty); and + // 2. _after_ `verifier.VerifyTC(…)`, which enforces that `signers` and + // `tc.NewestQCRanks` have identical length. + // Only then we can be sure that `tc.NewestQCRanks` cannot be nil. + newestQCRank := tc.GetLatestRanks()[0] + for _, rank := range tc.GetLatestRanks() { + if newestQCRank < rank { + newestQCRank = rank + } + } + if newestQCRank > tc.GetLatestQuorumCert().GetRank() { + return newInvalidTimeoutCertificateError( + tc, + fmt.Errorf( + "included QC (rank=%d) should be equal or higher to highest contributed rank: %d", + tc.GetLatestQuorumCert().GetRank(), + newestQCRank, + ), + ) + } + + // Validate QC + err = v.ValidateQuorumCertificate(newestQC) + if err != nil { + if models.IsInvalidQuorumCertificateError(err) { + return newInvalidTimeoutCertificateError(tc, fmt.Errorf( + "invalid QC included in TC: %w", + err, + )) + } + if errors.Is(err, models.ErrRankUnknown) { + // We require each replica to be bootstrapped with a QC pointing to a + // finalized state. Consensus safety rules guarantee that a QC at least as + // new as the root QC must be contained in any TC. This is because the TC + // must include signatures from a supermajority of replicas, including at + // least one honest replica, which attest to their locally highest known + // QC. Hence, any QC included in a TC must be the root QC or newer. + // Therefore, we should know the Epoch for any QC we encounter. Receiving + // a `models.ErrRankUnknown` is conceptually impossible, i.e. a symptom of + // an internal bug or invalid bootstrapping information. + return fmt.Errorf( + "no Epoch information availalbe for QC that was included in TC; symptom of internal bug or invalid bootstrapping information: %s", + err.Error(), + ) + } + return fmt.Errorf( + "unexpected internal error while verifying the QC included in the TC: %w", + err, + ) + } + + return nil +} + +// ValidateQuorumCertificate validates the Quorum Certificate `qc`. +// During normal operations, the following error returns are expected: +// - models.InvalidQCError if the QC is invalid +// - models.ErrRankUnknown if the QC refers unknown epoch +// +// Any other error should be treated as exception +func (v *Validator[StateT, VoteT]) ValidateQuorumCertificate( + qc models.QuorumCertificate, +) error { + // Retrieve the initial identities of consensus participants for this epoch, + // and those that signed the QC. IdentitiesByRank contains all nodes that were + // authorized to sign during this epoch. Ejection and dynamic weight + // adjustments are not taken into account here. By using an epoch-static set + // of authorized + // signers, we can check QC validity without needing all ancestor states. + allParticipants, err := v.committee.IdentitiesByRank(qc.GetRank()) + if err != nil { + return fmt.Errorf( + "could not get consensus participants at rank %d: %w", + qc.GetRank(), + err, + ) + } + + signerIDs := []models.WeightedIdentity{} + sigIndices := qc.GetAggregatedSignature().GetBitmask() + totalWeight := uint64(0) + for i, member := range allParticipants { + if sigIndices[i/8]>>i%8&1 == 1 { + signerIDs = append(signerIDs, member) + totalWeight += member.Weight() + } + } + + // determine whether signers reach minimally required weight threshold for + // consensus + threshold, err := v.committee.QuorumThresholdForRank(qc.GetRank()) + if err != nil { + return fmt.Errorf( + "could not get weight threshold for rank %d: %w", + qc.GetRank(), + err, + ) + } + if totalWeight < threshold { + return newInvalidQuorumCertificateError( + qc, + fmt.Errorf( + "QC signers have insufficient weight of %d (required=%d)", + totalWeight, + threshold, + ), + ) + } + + // verify whether the signature bytes are valid for the QC + err = v.verifier.VerifyQuorumCertificate(qc) + if err != nil { + // Considerations about other errors that `VerifyQC` could return: + // * models.InvalidSignerError: for the time being, we assume that _every_ + // HotStuff participant is also a member of the random beacon committee. + // Consequently, `InvalidSignerError` should not occur atm. + // TODO: if the random beacon committee is a strict subset of the + // HotStuff committee, we expect `models.InvalidSignerError` here + // during normal operations. + // * models.InsufficientSignaturesError: we previously checked the total + // weight of all signers meets the supermajority threshold, which is a + // _positive_ number. Hence, there must be at least one signer. Hence, + // receiving this error would be a symptom of a fatal internal bug. + switch { + case models.IsInvalidFormatError(err): + return newInvalidQuorumCertificateError( + qc, + fmt.Errorf("QC's signature data has an invalid structure: %w", err), + ) + case errors.Is(err, models.ErrInvalidSignature): + return newInvalidQuorumCertificateError( + qc, + fmt.Errorf("QC contains invalid signature(s): %w", err), + ) + case errors.Is(err, models.ErrRankUnknown): + // We have earlier queried the Identities for the QC's rank, which must + // have returned proper values, otherwise, we wouldn't reach this code. + // Therefore, it should be impossible for `verifier.VerifyQC` to return + // ErrRankUnknown. To avoid confusion with expected sentinel errors, we + // only preserve the error messages here, but not the error types. + return fmt.Errorf( + "internal error, as querying identities for rank %d succeeded earlier but now the rank supposedly belongs to an unknown epoch: %s", + qc.GetRank(), + err.Error(), + ) + default: + return fmt.Errorf( + "cannot verify qc's aggregated signature (qc.Identifier: %x): %w", + qc.GetSelector(), + err, + ) + } + } + + return nil +} + +// ValidateProposal validates the state proposal +// A state is considered as valid if it's a valid extension of existing forks. +// Note it doesn't check if it's conflicting with finalized state +// During normal operations, the following error returns are expected: +// - models.InvalidProposalError if the state is invalid +// - models.ErrRankUnknown if the proposal refers unknown epoch +// +// Any other error should be treated as exception +func (v *Validator[StateT, VoteT]) ValidateProposal( + proposal *models.SignedProposal[StateT, VoteT], +) error { + qc := proposal.State.ParentQuorumCertificate + state := proposal.State + + // validate the proposer's vote and get their identity + vote, err := proposal.ProposerVote() + if err != nil { + return fmt.Errorf("could not get vote from proposer vote: %w", err) + } + _, err = v.ValidateVote(vote) + if models.IsInvalidVoteError[VoteT](err) { + return models.NewInvalidProposalErrorf( + proposal, + "invalid proposer signature: %w", + err, + ) + } + if err != nil { + return fmt.Errorf( + "error verifying leader signature for state %x: %w", + state.Identifier, + err, + ) + } + + // check the proposer is the leader for the proposed state's rank + leader, err := v.committee.LeaderForRank(state.Rank) + if err != nil { + return fmt.Errorf( + "error determining leader for state %x: %w", + state.Identifier, + err, + ) + } + if leader != state.ProposerID { + return models.NewInvalidProposalErrorf( + proposal, + "proposer %s is not leader (%s) for rank %d", + state.ProposerID, + leader, + state.Rank, + ) + } + + // The State must contain a proof that the primary legitimately entered the + // respective rank. Transitioning to proposal.State.Rank is possible either by + // observing a QC or a TC for the previous round. If and only if the QC is + // _not_ for the previous round we require a TC for the previous rank to be + // present. + lastRankSuccessful := proposal.State.Rank == + proposal.State.ParentQuorumCertificate.GetRank()+1 + if !lastRankSuccessful { + // check if proposal is correctly structured + if proposal.PreviousRankTimeoutCertificate == nil { + return models.NewInvalidProposalErrorf( + proposal, + "QC in state is not for previous rank, so expecting a TC but none is included in state", + ) + } + + // check if included TC is for previous rank + if proposal.State.Rank != + proposal.PreviousRankTimeoutCertificate.GetRank()+1 { + return models.NewInvalidProposalErrorf( + proposal, + "QC in state is not for previous rank, so expecting a TC for rank %d but got TC for rank %d", + proposal.State.Rank-1, + proposal.PreviousRankTimeoutCertificate.GetRank(), + ) + } + + // Check if proposal extends either the newest QC specified in the TC, or a + // newer QC in edge cases a leader may construct a TC and QC concurrently + // such that TC contains an older QC - in these case we still want to build + // on the newest QC, so this case is allowed. + if proposal.State.ParentQuorumCertificate.GetRank() < + proposal.PreviousRankTimeoutCertificate.GetLatestQuorumCert().GetRank() { + return models.NewInvalidProposalErrorf( + proposal, + "TC in state contains a newer QC than the state itself, which is a protocol violation", + ) + } + } else if proposal.PreviousRankTimeoutCertificate != nil { + // last rank ended with QC, including TC is a protocol violation + return models.NewInvalidProposalErrorf( + proposal, + "last rank has ended with QC but proposal includes PreviousRankTimeoutCertificate", + ) + } + + // Check signatures, keep the most expensive the last to check + + // check if included QC is valid + err = v.ValidateQuorumCertificate(qc) + if err != nil { + if models.IsInvalidQuorumCertificateError(err) { + return models.NewInvalidProposalErrorf(proposal, "invalid qc included: %w", err) + } + if errors.Is(err, models.ErrRankUnknown) { + // We require each replica to be bootstrapped with a QC pointing to a + // finalized state. Therefore, receiving a `models.ErrRankUnknown` is + // conceptually impossible, i.e. a symptom of an internal bug or invalid + // bootstrapping information. + return fmt.Errorf( + "no Epoch information availalbe for QC that was included in proposal; symptom of internal bug or invalid bootstrapping information: %s", + err.Error(), + ) + } + return fmt.Errorf("unexpected error verifying qc: %w", err) + } + + if !lastRankSuccessful { + // check if included TC is valid + err = v.ValidateTimeoutCertificate(proposal.PreviousRankTimeoutCertificate) + if err != nil { + if models.IsInvalidTimeoutCertificateError(err) { + return models.NewInvalidProposalErrorf( + proposal, + "proposals TC's is not valid: %w", + err, + ) + } + if errors.Is(err, models.ErrRankUnknown) { + // We require each replica to be bootstrapped with a QC pointing to a + // finalized state. Therefore, we should know the Epoch for any QC.Rank + // and TC.Rank we encounter. Receiving a `models.ErrRankUnknown` is + // conceptually impossible, i.e. a symptom of an internal bug or invalid + // bootstrapping information. + return fmt.Errorf( + "no Epoch information availalbe for QC that was included in TC; symptom of internal bug or invalid bootstrapping information: %s", + err.Error(), + ) + } + return fmt.Errorf( + "unexpected internal error while verifying the TC included in state: %w", + err, + ) + } + } + + return nil +} + +// ValidateVote validates the vote and returns the identity of the voter who +// signed the vote - the vote to be validated +// During normal operations, the following error returns are expected: +// - models.InvalidVoteError for invalid votes +// - models.ErrRankUnknown if the vote refers unknown epoch +// +// Any other error should be treated as exception +func (v *Validator[StateT, VoteT]) ValidateVote(vote *VoteT) ( + *models.WeightedIdentity, + error, +) { + voter, err := v.committee.IdentityByRank( + (*vote).GetRank(), + (*vote).Identity(), + ) + if models.IsInvalidSignerError(err) { + return nil, newInvalidVoteError(vote, err) + } + if err != nil { + return nil, fmt.Errorf( + "error retrieving voter Identity at rank %d: %w", + (*vote).GetRank(), + err, + ) + } + + // check whether the signature data is valid for the vote in the hotstuff + // context + err = v.verifier.VerifyVote(vote) + if err != nil { + // Theoretically, `VerifyVote` could also return a + // `models.InvalidSignerError`. However, for the time being, we assume that + // _every_ HotStuff participant is also a member of the random beacon + // committee. Consequently, `InvalidSignerError` should not occur atm. + // TODO: if the random beacon committee is a strict subset of the HotStuff + // committee, we expect `models.InvalidSignerError` here during normal + // operations. + if models.IsInvalidFormatError(err) || + errors.Is(err, models.ErrInvalidSignature) { + return nil, newInvalidVoteError(vote, err) + } + if errors.Is(err, models.ErrRankUnknown) { + return nil, fmt.Errorf( + "no Epoch information available for vote; symptom of internal bug or invalid bootstrapping information: %s", + err.Error(), + ) + } + return nil, fmt.Errorf( + "cannot verify signature for vote (%x): %w", + (*vote).Identity(), + err, + ) + } + + return &voter, nil +} + +func newInvalidQuorumCertificateError( + qc models.QuorumCertificate, + err error, +) error { + return models.InvalidQuorumCertificateError{ + Identifier: qc.GetSelector(), + Rank: qc.GetRank(), + Err: err, + } +} + +func newInvalidTimeoutCertificateError( + tc models.TimeoutCertificate, + err error, +) error { + return models.InvalidTimeoutCertificateError{ + Rank: tc.GetRank(), + Err: err, + } +} + +func newInvalidVoteError[VoteT models.Unique](vote *VoteT, err error) error { + return models.InvalidVoteError[VoteT]{ + Vote: vote, + Err: err, + } +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/verification/common.go b/consensus/verification/common.go new file mode 100644 index 0000000..0566b32 --- /dev/null +++ b/consensus/verification/common.go @@ -0,0 +1,111 @@ +package verification + +import ( + "encoding/binary" + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// MakeVoteMessage generates the message we have to sign in order to be able +// to verify signatures without having the full state. To that effect, each data +// structure that is signed contains the sometimes redundant rank number and +// state ID; this allows us to create the signed message and verify the signed +// message without having the full state contents. +func MakeVoteMessage(rank uint64, stateID models.Identity) []byte { + msg := []byte{} + binary.BigEndian.PutUint64(msg, rank) + msg = append(msg, stateID[:]...) + return msg +} + +// MakeTimeoutMessage generates the message we have to sign in order to be able +// to contribute to Active Pacemaker protocol. Each replica signs with the +// highest QC rank known to that replica. +func MakeTimeoutMessage(rank uint64, newestQCRank uint64) []byte { + msg := make([]byte, 16) + binary.BigEndian.PutUint64(msg[:8], rank) + binary.BigEndian.PutUint64(msg[8:], newestQCRank) + return msg +} + +// verifyAggregatedSignatureOneMessage encapsulates the logic of verifying an +// aggregated signature under the same message. Proofs of possession of all +// input keys are assumed to be valid (checked by the protocol). This logic is +// commonly used across the different implementations of `consensus.Verifier`. +// In this context, all signatures apply to states. +// Return values: +// - nil if `aggregatedSig` is valid against the public keys and message. +// - models.InsufficientSignaturesError if `pubKeys` is empty or nil. +// - models.ErrInvalidSignature if the signature is invalid against the public +// keys and message. +// - unexpected errors should be treated as symptoms of bugs or uncovered +// edge cases in the logic (i.e. as fatal) +func verifyAggregatedSignatureOneMessage( + validator consensus.SignatureAggregator, + aggregatedSig models.AggregatedSignature, + dsTag []byte, + msg []byte, // message to verify against +) error { + valid := validator.VerifySignatureRaw( + aggregatedSig.GetPublicKey(), + aggregatedSig.GetSignature(), + msg, + dsTag, + ) + if !valid { + return fmt.Errorf( + "invalid aggregated signature: %w", + models.ErrInvalidSignature, + ) + } + return nil +} + +// verifyTCSignatureManyMessages checks cryptographic validity of the TC's +// signature w.r.t. multiple messages and public keys. Proofs of possession of +// all input keys are assumed to be valid (checked by the protocol). This logic +// is commonly used across the different implementations of +// `consensus.Verifier`. It is the responsibility of the calling code to ensure +// that all `pks` are authorized, without duplicates. The caller must also make +// sure the `hasher` passed is non nil and has 128-bytes outputs. +// Return values: +// - nil if `sigData` is cryptographically valid +// - models.InsufficientSignaturesError if `pks` is empty. +// - models.InvalidFormatError if `pks`/`highQCRanks` have differing lengths +// - models.ErrInvalidSignature if a signature is invalid +// - unexpected errors should be treated as symptoms of bugs or uncovered +// edge cases in the logic (i.e. as fatal) +func verifyTCSignatureManyMessages( + validator consensus.SignatureAggregator, + pks [][]byte, + sigData []byte, + rank uint64, + highQCRanks []uint64, + dsTag []byte, +) error { + if len(pks) != len(highQCRanks) { + return models.NewInvalidFormatErrorf("public keys and highQCRanks mismatch") + } + + messages := make([][]byte, 0, len(pks)) + for i := 0; i < len(pks); i++ { + messages = append(messages, MakeTimeoutMessage(rank, highQCRanks[i])) + } + + valid := validator.VerifySignatureMultiMessage( + pks, + sigData, + messages, + dsTag, + ) + if !valid { + return fmt.Errorf( + "invalid aggregated TC signature for rank %d: %w", + rank, + models.ErrInvalidSignature, + ) + } + return nil +} diff --git a/consensus/verification/signer.go b/consensus/verification/signer.go new file mode 100644 index 0000000..cb90b28 --- /dev/null +++ b/consensus/verification/signer.go @@ -0,0 +1,120 @@ +package verification + +import ( + "context" + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// Signer creates votes for the collector clusters consensus. When a +// participant votes for a state, it _always_ provide the proving signature +// as part of their vote. Signer is responsible for creating correctly +// signed proposals and votes. +type Signer[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +] struct { + voter consensus.VotingProvider[StateT, VoteT, PeerIDT] +} + +var _ consensus.Signer[*nilUnique, *nilUnique] = (*Signer[*nilUnique, *nilUnique, *nilUnique])(nil) + +// NewSigner instantiates a Signer, which signs votes and +// proposals with the proving key. The generated signatures are aggregatable. +func NewSigner[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +]( + voter consensus.VotingProvider[StateT, VoteT, PeerIDT], +) *Signer[StateT, VoteT, PeerIDT] { + + sc := &Signer[StateT, VoteT, PeerIDT]{ + voter: voter, + } + return sc +} + +// CreateVote will create a vote with a proving signature for the given state. +func (c *Signer[StateT, VoteT, PeerIDT]) CreateVote( + state *models.State[StateT], +) (*VoteT, error) { + + // create the signature data + vote, err := c.voter.SignVote(context.TODO(), state) + if err != nil { + return nil, fmt.Errorf("could not create signature: %w", err) + } + + return vote, nil +} + +// CreateTimeout will create a signed timeout state for the given rank. +func (c *Signer[StateT, VoteT, PeerIDT]) CreateTimeout( + curRank uint64, + newestQC models.QuorumCertificate, + previousRankTimeoutCert models.TimeoutCertificate, +) (*models.TimeoutState[VoteT], error) { + // create timeout state specific message + vote, err := c.voter.SignTimeoutVote( + context.TODO(), + newestQC.GetFilter(), + curRank, + newestQC.GetRank(), + ) + if err != nil { + return nil, fmt.Errorf( + "could not generate signature for timeout state at rank %d: %w", + curRank, + err, + ) + } + + timeout := &models.TimeoutState[VoteT]{ + Rank: curRank, + LatestQuorumCertificate: newestQC, + PriorRankTimeoutCertificate: previousRankTimeoutCert, + Vote: vote, + TimeoutTick: 0, + } + + return timeout, nil +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/vote_aggregator.go b/consensus/vote_aggregator.go new file mode 100644 index 0000000..60bfe72 --- /dev/null +++ b/consensus/vote_aggregator.go @@ -0,0 +1,41 @@ +package consensus + +import ( + "context" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteAggregator verifies and aggregates votes to build QC. When enough votes +// have been collected, it builds a QC and send it to the EventLoop. +// VoteAggregator also detects protocol violation, including invalid votes, +// double voting etc, and notifies a HotStuff consumer for slashing. +type VoteAggregator[StateT models.Unique, VoteT models.Unique] interface { + Start(ctx context.Context) error + + // AddVote verifies and aggregates a vote. The voting state could either be + // known or unknown. If the voting state is unknown, the vote won't be + // processed until AddState is called with the state. This method can be + // called concurrently, votes will be queued and processed asynchronously. + AddVote(vote *VoteT) + + // AddState notifies the VoteAggregator that it should start processing votes + // for the given state. The input state is queued internally within the + // `VoteAggregator` and processed _asynchronously_ by the VoteAggregator's + // internal worker routines. + // CAUTION: we expect that the input state's validity has been confirmed prior + // to calling AddState, including the proposer's signature. Otherwise, + // VoteAggregator might crash or exhibit undefined behaviour. + AddState(state *models.SignedProposal[StateT, VoteT]) + + // InvalidState notifies the VoteAggregator about an invalid proposal, so that + // it can process votes for the invalid state and slash the voters. No errors + // are expected during normal operations. + InvalidState(state *models.SignedProposal[StateT, VoteT]) error + + // PruneUpToRank deletes all votes _below_ to the given rank, as well as + // related indices. We only retain and process whose rank is equal or larger + // than `lowestRetainedRank`. If `lowestRetainedRank` is smaller than the + // previous value, the previous value is kept and the method call is a NoOp. + PruneUpToRank(rank uint64) +} diff --git a/consensus/vote_collector.go b/consensus/vote_collector.go new file mode 100644 index 0000000..ebeceea --- /dev/null +++ b/consensus/vote_collector.go @@ -0,0 +1,135 @@ +package consensus + +import ( + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteConsumer consumes all votes for one specific rank. It is registered with +// the `VoteCollector` for the respective rank. Upon registration, the +// `VoteCollector` feeds votes into the consumer in the order they are received +// (already cached votes as well as votes received in the future). Only votes +// that pass de-duplication and equivocation detection are passed on. CAUTION, +// VoteConsumer implementations must be +// - NON-BLOCKING and consume the votes without noteworthy delay, and +// - CONCURRENCY SAFE +type VoteConsumer[VoteT models.Unique] func(vote *VoteT) + +// OnQuorumCertificateCreated is a callback which will be used by VoteCollector +// to submit a QuorumCertificate when it's able to create it +type OnQuorumCertificateCreated func(models.QuorumCertificate) + +// VoteCollectorStatus indicates the VoteCollector's status +// It has three different status. +type VoteCollectorStatus int + +const ( + // VoteCollectorStatusCaching is for the status when the state has not been + // received. The vote collector in this status will cache all the votes + // without verifying them. + VoteCollectorStatusCaching VoteCollectorStatus = iota + + // VoteCollectorStatusVerifying is for the status when the state has been + // received, and is able to process all votes for it. + VoteCollectorStatusVerifying + + // VoteCollectorStatusInvalid is for the status when the state has been + // verified and is invalid. All votes to this state will be collected to slash + // the voter. + VoteCollectorStatusInvalid +) + +// VoteCollector collects votes for the same state, produces QuorumCertificate +// when enough votes are collected VoteCollector takes a callback function to +// report the event that a QuorumCertificate has been produced. +var collectorStatusNames = [...]string{"VoteCollectorStatusCaching", + "VoteCollectorStatusVerifying", + "VoteCollectorStatusInvalid"} + +func (ps VoteCollectorStatus) String() string { + if ps < 0 || int(ps) > len(collectorStatusNames) { + return "UNKNOWN" + } + return collectorStatusNames[ps] +} + +// VoteCollector collects all votes for a specified rank. On the happy path, it +// generates a QuorumCertificate when enough votes have been collected. +// The VoteCollector internally delegates the vote-format specific processing +// to the VoteProcessor. +type VoteCollector[StateT models.Unique, VoteT models.Unique] interface { + // ProcessState performs validation of state signature and processes state + // with respected collector. Calling this function will mark conflicting + // collector as stale and change state of valid collectors. It returns nil if + // the state is valid. It returns models.InvalidProposalError if state is + // invalid. It returns other error if there is exception processing the state. + ProcessState(state *models.SignedProposal[StateT, VoteT]) error + + // AddVote adds a vote to the collector. When enough votes have been added to + // produce a QuorumCertificate, the QuorumCertificate will be created + // asynchronously, and passed to EventLoop through a callback. No errors are + // expected during normal operations. + AddVote(vote *VoteT) error + + // RegisterVoteConsumer registers a VoteConsumer. Upon registration, the + // collector feeds all cached votes into the consumer in the order they + // arrived. + // CAUTION, VoteConsumer implementations must be + // * NON-BLOCKING and consume the votes without noteworthy delay, and + // * CONCURRENCY SAFE + RegisterVoteConsumer(consumer VoteConsumer[VoteT]) + + // Rank returns the rank that this instance is collecting votes for. + // This method is useful when adding the newly created vote collector to vote + // collectors map. + Rank() uint64 + + // Status returns the status of the vote collector + Status() VoteCollectorStatus +} + +// VoteProcessor processes votes. It implements the vote-format specific +// processing logic. Depending on their implementation, a VoteProcessor might +// drop votes or attempt to construct a QuorumCertificate. +type VoteProcessor[VoteT models.Unique] interface { + // Process performs processing of single vote. This function is safe to call + // from multiple goroutines. + // Expected error returns during normal operations: + // * VoteForIncompatibleBlockError - submitted vote for incompatible state + // * VoteForIncompatibleRankError - submitted vote for incompatible rank + // * models.InvalidVoteError - submitted vote with invalid signature + // * models.DuplicatedSignerError - vote from a signer whose vote was + // previously already processed + // All other errors should be treated as exceptions. + Process(vote *VoteT) error + + // Status returns the status of the vote processor + Status() VoteCollectorStatus +} + +// VerifyingVoteProcessor is a VoteProcessor that attempts to construct a +// QuorumCertificate for the given state. +type VerifyingVoteProcessor[ + StateT models.Unique, + VoteT models.Unique, +] interface { + VoteProcessor[VoteT] + + // State returns which state that will be used to collector votes for. + // Transition to VerifyingVoteCollector can occur only when we have received + // state proposal so this information has to be available. + State() *models.State[StateT] +} + +// VoteProcessorFactory is a factory that can be used to create a verifying vote +// processors for a specific proposal. Depending on factory implementation it +// will return processors for consensus or collection clusters +type VoteProcessorFactory[StateT models.Unique, VoteT models.Unique] interface { + // Create instantiates a VerifyingVoteProcessor for processing votes for a + // specific proposal. Caller can be sure that proposal vote was successfully + // verified and processed. Expected error returns during normal operations: + // * models.InvalidProposalError - proposal has invalid proposer vote + Create(tracer TraceLogger, proposal *models.SignedProposal[StateT, VoteT]) ( + VerifyingVoteProcessor[StateT, VoteT], + error, + ) +} diff --git a/consensus/vote_collectors.go b/consensus/vote_collectors.go new file mode 100644 index 0000000..ef7f922 --- /dev/null +++ b/consensus/vote_collectors.go @@ -0,0 +1,59 @@ +package consensus + +import ( + "context" + + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// VoteCollectors is an interface which allows VoteAggregator to interact with +// collectors structured by rank. +// Implementations of this interface are responsible for state transitions of +// `VoteCollector`s and pruning of stale and outdated collectors by rank. +type VoteCollectors[StateT models.Unique, VoteT models.Unique] interface { + Start(ctx context.Context) error + + // GetOrCreateCollector retrieves the consensus.VoteCollector for the specified + // rank or creates one if none exists. + // It returns: + // - (collector, true, nil) if no collector can be found by the rank, and a + // new collector was created. + // - (collector, false, nil) if the collector can be found by the rank + // - (nil, false, error) if running into any exception creating the vote + // collector state machine + // Expected error returns during normal operations: + // * models.BelowPrunedThresholdError - in case rank is lower than last + // pruned rank + GetOrCreateCollector(rank uint64) ( + collector VoteCollector[StateT, VoteT], + created bool, + err error, + ) + + // PruneUpToRank prunes the vote collectors with ranks _below_ the given + // value, i.e. we only retain and process whose rank is equal or larger than + // `lowestRetainedRank`. If `lowestRetainedRank` is smaller than the previous + // value, the previous value is kept and the method call is a NoOp. + PruneUpToRank(lowestRetainedRank uint64) +} + +// Workers queues and processes submitted tasks. We explicitly do not +// expose any functionality to terminate the worker pool. +type Workers interface { + // Submit enqueues a function for a worker to execute. Submit will not block + // regardless of the number of tasks submitted. Each task is immediately + // given to an available worker or queued otherwise. Tasks are processed in + // FiFO order. + Submit(task func()) +} + +// Workerpool adds the functionality to terminate the workers to the +// Workers interface. +type Workerpool interface { + Workers + + // StopWait stops the worker pool and waits for all queued tasks to + // complete. No additional tasks may be submitted, but all pending tasks are + // executed by workers before this function returns. + StopWait() +} diff --git a/consensus/voteaggregator/pending_status.go b/consensus/voteaggregator/pending_status.go new file mode 100644 index 0000000..be77691 --- /dev/null +++ b/consensus/voteaggregator/pending_status.go @@ -0,0 +1,54 @@ +package voteaggregator + +import "source.quilibrium.com/quilibrium/monorepo/consensus/models" + +// PendingVotes stores all the pending votes for different state proposals +type PendingVotes[VoteT models.Unique] struct { + // maps state ID to pending status for that state + votes map[models.Identity]*PendingStatus[VoteT] +} + +// PendingStatus keeps track of pending votes for the same state +type PendingStatus[VoteT models.Unique] struct { + // When receiving missing state, first received votes will be accumulated + orderedVotes []*VoteT + // For avoiding duplicate votes + voteMap map[models.Identity]struct{} +} + +// AddVote adds a vote as a pending vote +// returns true if it can be added to a PendingStatus successfully +// returns false otherwise +func (pv *PendingVotes[VoteT]) AddVote(vote *VoteT) bool { + status, exists := pv.votes[(*vote).Source()] + if !exists { + status = NewPendingStatus[VoteT]() + pv.votes[(*vote).Source()] = status + } + return status.AddVote(vote) +} + +// AddVote adds a vote as a pending vote +// returns false if it has been added before +// returns true otherwise +func (ps *PendingStatus[VoteT]) AddVote(vote *VoteT) bool { + _, exists := ps.voteMap[(*vote).Identity()] + if exists { + return false + } + ps.voteMap[(*vote).Identity()] = struct{}{} + ps.orderedVotes = append(ps.orderedVotes, vote) + return true +} + +// NewPendingVotes creates a PendingVotes instance +func NewPendingVotes[VoteT models.Unique]() *PendingVotes[VoteT] { + return &PendingVotes[VoteT]{ + votes: make(map[models.Identity]*PendingStatus[VoteT]), + } +} + +// NewPendingStatus creates a PendingStatus instance +func NewPendingStatus[VoteT models.Unique]() *PendingStatus[VoteT] { + return &PendingStatus[VoteT]{voteMap: make(map[models.Identity]struct{})} +} diff --git a/consensus/voteaggregator/vote_aggregator.go b/consensus/voteaggregator/vote_aggregator.go new file mode 100644 index 0000000..733bb4e --- /dev/null +++ b/consensus/voteaggregator/vote_aggregator.go @@ -0,0 +1,434 @@ +package voteaggregator + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// defaultVoteAggregatorWorkers number of workers to dispatch events for vote +// aggregators +const defaultVoteAggregatorWorkers = 8 + +// defaultVoteQueueCapacity maximum capacity of buffering unprocessed votes +const defaultVoteQueueCapacity = 1000 + +// defaultStateQueueCapacity maximum capacity of buffering unprocessed states +const defaultStateQueueCapacity = 1000 + +// VoteAggregator stores the votes and aggregates them into a QC when enough +// votes have been collected. +type VoteAggregator[StateT models.Unique, VoteT models.Unique] struct { + tracer consensus.TraceLogger + notifier consensus.VoteAggregationViolationConsumer[ + StateT, + VoteT, + ] + lowestRetainedRank atomic.Uint64 // lowest rank, for which we still process votes + collectors consensus.VoteCollectors[StateT, VoteT] + queuedMessagesNotifier chan struct{} + finalizationEventsNotifier chan struct{} + finalizedRank atomic.Uint64 // cache the last finalized rank to queue up the pruning work, and unstate the caller who's delivering the finalization event. + queuedVotes chan *VoteT + queuedStates chan *models.SignedProposal[StateT, VoteT] + wg sync.WaitGroup +} + +var _ consensus.VoteAggregator[*nilUnique, *nilUnique] = (*VoteAggregator[*nilUnique, *nilUnique])(nil) + +// NewVoteAggregator creates an instance of vote aggregator +func NewVoteAggregator[StateT models.Unique, VoteT models.Unique]( + tracer consensus.TraceLogger, + notifier consensus.VoteAggregationViolationConsumer[StateT, VoteT], + lowestRetainedRank uint64, + collectors consensus.VoteCollectors[StateT, VoteT], +) (*VoteAggregator[StateT, VoteT], error) { + + queuedVotes := make(chan *VoteT, defaultVoteQueueCapacity) + queuedStates := make( + chan *models.SignedProposal[StateT, VoteT], + defaultStateQueueCapacity, + ) + + aggregator := &VoteAggregator[StateT, VoteT]{ + tracer: tracer, + notifier: notifier, + lowestRetainedRank: atomic.Uint64{}, + finalizedRank: atomic.Uint64{}, + collectors: collectors, + queuedVotes: queuedVotes, + queuedStates: queuedStates, + queuedMessagesNotifier: make(chan struct{}, 1), + finalizationEventsNotifier: make(chan struct{}, 1), + wg: sync.WaitGroup{}, + } + + aggregator.lowestRetainedRank.Store(lowestRetainedRank) + aggregator.finalizedRank.Store(lowestRetainedRank) + + return aggregator, nil +} + +func (va *VoteAggregator[StateT, VoteT]) Start(ctx context.Context) error { + va.wg.Add(defaultVoteAggregatorWorkers + 1) + for i := 0; i < defaultVoteAggregatorWorkers; i++ { + // manager for worker routines that process inbound messages + go func() { + defer va.wg.Done() + va.queuedMessagesProcessingLoop(ctx) + }() + } + go func() { + defer va.wg.Done() + // create new context which is not connected to parent + // we need to ensure that our internal workers stop before asking + // vote collectors to stop. We want to avoid delivering events to already + // stopped vote collectors + innerCtx, cancel := context.WithCancel(context.Background()) + + // start vote collectors + err := va.collectors.Start(innerCtx) + if err != nil { + return + } + + // Handle the component lifecycle in a separate goroutine so we can capture + // any errors thrown during initialization in the main goroutine. + go func() { + select { + case <-ctx.Done(): + // wait for internal workers to stop, then signal vote collectors to + // stop + va.wg.Wait() + cancel() + } + }() + + va.finalizationProcessingLoop(ctx) + }() + return nil +} + +func (va *VoteAggregator[StateT, VoteT]) queuedMessagesProcessingLoop( + ctx context.Context, +) { + notifier := va.queuedMessagesNotifier + for { + select { + case <-ctx.Done(): + return + case <-notifier: + err := va.processQueuedMessages(ctx) + if err != nil { + va.tracer.Error( + "stopping mesage processing loop", + fmt.Errorf("internal error processing queued messages: %w", err), + ) + return + } + } + } +} + +// processQueuedMessages is a function which dispatches previously queued +// messages on worker thread. This function is called whenever we have queued +// messages ready to be dispatched. No errors are expected during normal +// operations. +func (va *VoteAggregator[StateT, VoteT]) processQueuedMessages( + ctx context.Context, +) error { + for { + select { + case <-ctx.Done(): + return nil + default: + } + + state, ok := <-va.queuedStates + if ok { + err := va.processQueuedState(state) + if err != nil { + return fmt.Errorf( + "could not process pending state %v: %w", + state.State.Identifier, + err, + ) + } + + continue + } + + vote, ok := <-va.queuedVotes + if ok { + err := va.processQueuedVote(vote) + + if err != nil { + return fmt.Errorf( + "could not process pending vote %v for state %v: %w", + (*vote).Identity(), + (*vote).Source(), + err, + ) + } + + continue + } + + // when there is no more messages in the queue, back to the loop to wait + // for the next incoming message to arrive. + return nil + } +} + +// processQueuedVote performs actual processing of queued votes, this method is +// called from multiple concurrent goroutines. +func (va *VoteAggregator[StateT, VoteT]) processQueuedVote(vote *VoteT) error { + collector, created, err := va.collectors.GetOrCreateCollector( + (*vote).GetRank(), + ) + if err != nil { + // ignore if our routine is outdated and some other one has pruned + // collectors + if models.IsBelowPrunedThresholdError(err) { + return nil + } + return fmt.Errorf( + "could not get collector for rank %d: %w", + (*vote).GetRank(), + err, + ) + } + if created { + va.tracer.Trace("vote collector is created by processing vote") + } + + err = collector.AddVote(vote) + if err != nil { + return fmt.Errorf( + "could not process vote for rank %d, stateID %v: %w", + (*vote).GetRank(), + (*vote).Source(), + err, + ) + } + + va.tracer.Trace("vote has been processed successfully") + + return nil +} + +// processQueuedState performs actual processing of queued state proposals, this +// method is called from multiple concurrent goroutines. +// CAUTION: we expect that the input state's validity has been confirmed prior +// to calling AddState, including the proposer's signature. Otherwise, +// VoteAggregator might crash or exhibit undefined behaviour. No errors are +// expected during normal operation. +func (va *VoteAggregator[StateT, VoteT]) processQueuedState( + state *models.SignedProposal[StateT, VoteT], +) error { + // check if the state is for a rank that has already been pruned (and is thus + // stale) + if state.State.Rank < va.lowestRetainedRank.Load() { + return nil + } + + collector, created, err := va.collectors.GetOrCreateCollector( + state.State.Rank, + ) + if err != nil { + if models.IsBelowPrunedThresholdError(err) { + return nil + } + return fmt.Errorf( + "could not get or create collector for state %v: %w", + state.State.Identifier, + err, + ) + } + if created { + va.tracer.Trace("vote collector is created by processing state") + } + + err = collector.ProcessState(state) + if err != nil { + if models.IsInvalidProposalError[StateT, VoteT](err) { + // We are attempting process a state which is invalid + // This should never happen, because any component that feeds states into + // VoteAggregator needs to make sure that it's submitting for processing + // ONLY valid states. + return fmt.Errorf( + "received invalid state for processing %v at rank %d", + state.State.Identifier, + state.State.Rank, + ) + } + return fmt.Errorf( + "could not process state: %v, %w", + state.State.Identifier, + err, + ) + } + + va.tracer.Trace("state has been processed successfully") + + return nil +} + +// AddVote checks if vote is stale and appends vote into processing queue +// actual vote processing will be called in other dispatching goroutine. +func (va *VoteAggregator[StateT, VoteT]) AddVote(vote *VoteT) { + // drop stale votes + if (*vote).GetRank() < va.lowestRetainedRank.Load() { + va.tracer.Trace("drop stale votes") + return + } + + // It's ok to silently drop votes in case our processing pipeline is full. + // It means that we are probably catching up. + select { + case va.queuedVotes <- vote: + va.queuedMessagesNotifier <- struct{}{} + default: + va.tracer.Trace("no queue capacity, dropping vote") + } +} + +// AddState notifies the VoteAggregator that it should start processing votes +// for the given state. The input state is queued internally within the +// `VoteAggregator` and processed _asynchronously_ by the VoteAggregator's +// internal worker routines. +// CAUTION: we expect that the input state's validity has been confirmed prior +// to calling AddState, including the proposer's signature. Otherwise, +// VoteAggregator might crash or exhibit undefined behaviour. +func (va *VoteAggregator[StateT, VoteT]) AddState( + state *models.SignedProposal[StateT, VoteT], +) { + // It's ok to silently drop states in case our processing pipeline is full. + // It means that we are probably catching up. + select { + case va.queuedStates <- state: + va.queuedMessagesNotifier <- struct{}{} + default: + va.tracer.Trace(fmt.Sprintf( + "dropping state %x because queue is full", + state.State.Identifier, + )) + } +} + +// InvalidState notifies the VoteAggregator about an invalid proposal, so that +// it can process votes for the invalid state and slash the voters. +// No errors are expected during normal operations +func (va *VoteAggregator[StateT, VoteT]) InvalidState( + proposal *models.SignedProposal[StateT, VoteT], +) error { + slashingVoteConsumer := func(vote *VoteT) { + if proposal.State.Identifier == (*vote).Source() { + va.notifier.OnVoteForInvalidStateDetected(vote, proposal) + } + } + + state := proposal.State + collector, _, err := va.collectors.GetOrCreateCollector(state.Rank) + if err != nil { + // ignore if our routine is outdated and some other one has pruned + // collectors + if models.IsBelowPrunedThresholdError(err) { + return nil + } + return fmt.Errorf( + "could not retrieve vote collector for rank %d: %w", + state.Rank, + err, + ) + } + + // registering vote consumer will deliver all previously cached votes in + // strict order and will keep delivering votes if more are collected + collector.RegisterVoteConsumer(slashingVoteConsumer) + return nil +} + +// PruneUpToRank deletes all votes _below_ to the given rank, as well as +// related indices. We only retain and process whose rank is equal or larger +// than `lowestRetainedRank`. If `lowestRetainedRank` is smaller than the +// previous value, the previous value is kept and the method call is a NoOp. +func (va *VoteAggregator[StateT, VoteT]) PruneUpToRank( + lowestRetainedRank uint64, +) { + if va.lowestRetainedRank.Load() < lowestRetainedRank { + va.lowestRetainedRank.Store(lowestRetainedRank) + va.collectors.PruneUpToRank(lowestRetainedRank) + } +} + +// OnFinalizedState implements the `OnFinalizedState` callback from the +// `consensus.FinalizationConsumer`. It informs sealing.Core about finalization +// of respective state. +// +// CAUTION: the input to this callback is treated as trusted; precautions should +// be taken that messages from external nodes cannot be considered as inputs to +// this function +func (va *VoteAggregator[StateT, VoteT]) OnFinalizedState( + state *models.State[StateT], +) { + if va.finalizedRank.Load() < state.Rank { + va.finalizedRank.Store(state.Rank) + va.finalizationEventsNotifier <- struct{}{} + } +} + +// finalizationProcessingLoop is a separate goroutine that performs processing +// of finalization events +func (va *VoteAggregator[StateT, VoteT]) finalizationProcessingLoop( + ctx context.Context, +) { + finalizationNotifier := va.finalizationEventsNotifier + for { + select { + case <-ctx.Done(): + return + case <-finalizationNotifier: + va.PruneUpToRank(va.finalizedRank.Load()) + } + } +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/voteaggregator/vote_collectors.go b/consensus/voteaggregator/vote_collectors.go new file mode 100644 index 0000000..9644e14 --- /dev/null +++ b/consensus/voteaggregator/vote_collectors.go @@ -0,0 +1,165 @@ +package voteaggregator + +import ( + "context" + "fmt" + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// NewCollectorFactoryMethod is a factory method to generate a VoteCollector for +// concrete rank +type NewCollectorFactoryMethod[StateT models.Unique, VoteT models.Unique] = func( + rank uint64, + workers consensus.Workers, +) (consensus.VoteCollector[StateT, VoteT], error) + +// VoteCollectors implements management of multiple vote collectors indexed by +// rank. Implements consensus.VoteCollectors interface. Creating a VoteCollector +// for a particular rank is lazy (instances are created on demand). +// This structure is concurrency safe. +type VoteCollectors[StateT models.Unique, VoteT models.Unique] struct { + tracer consensus.TraceLogger + lock sync.RWMutex + lowestRetainedRank uint64 // lowest rank, for which we still retain a VoteCollector and process votes + collectors map[uint64]consensus.VoteCollector[StateT, VoteT] // rank -> VoteCollector + workerPool consensus.Workerpool // for processing votes that are already cached in VoteCollectors and waiting for respective state + createCollector NewCollectorFactoryMethod[StateT, VoteT] // factory method for creating collectors +} + +var _ consensus.VoteCollectors[*nilUnique, *nilUnique] = (*VoteCollectors[*nilUnique, *nilUnique])(nil) + +func NewVoteCollectors[StateT models.Unique, VoteT models.Unique]( + ctx context.Context, + tracer consensus.TraceLogger, + lowestRetainedRank uint64, + workerPool consensus.Workerpool, + factoryMethod NewCollectorFactoryMethod[StateT, VoteT], +) *VoteCollectors[StateT, VoteT] { + go func() { + <-ctx.Done() // wait for parent context to signal shutdown + workerPool.StopWait() // wait till all workers exit + }() + + return &VoteCollectors[StateT, VoteT]{ + tracer: tracer, + lowestRetainedRank: lowestRetainedRank, + collectors: make(map[uint64]consensus.VoteCollector[StateT, VoteT]), + workerPool: workerPool, + createCollector: factoryMethod, + } +} + +// GetOrCreateCollector retrieves the consensus.VoteCollector for the specified +// rank or creates one if none exists. +// - (collector, true, nil) if no collector can be found by the rank, and a +// new collector was created. +// - (collector, false, nil) if the collector can be found by the rank +// - (nil, false, error) if running into any exception creating the vote +// collector state machine +// +// Expected error returns during normal operations: +// - models.BelowPrunedThresholdError - in case rank is lower than +// lowestRetainedRank +func (v *VoteCollectors[StateT, VoteT]) GetOrCreateCollector(rank uint64) ( + consensus.VoteCollector[StateT, VoteT], + bool, + error, +) { + cachedCollector, hasCachedCollector, err := v.getCollector(rank) + if err != nil { + return nil, false, err + } + + if hasCachedCollector { + return cachedCollector, false, nil + } + + collector, err := v.createCollector(rank, v.workerPool) + if err != nil { + return nil, false, fmt.Errorf( + "could not create vote collector for rank %d: %w", + rank, + err, + ) + } + + // Initial check showed that there was no collector. However, it's possible + // that after the initial check but before acquiring the lock to add the + // newly-created collector, another goroutine already added the needed + // collector. Hence, check again after acquiring the lock: + v.lock.Lock() + defer v.lock.Unlock() + + clr, found := v.collectors[rank] + if found { + return clr, false, nil + } + + v.collectors[rank] = collector + return collector, true, nil +} + +// getCollector retrieves consensus.VoteCollector from local cache in +// concurrency safe way. Performs check for lowestRetainedRank. +// Expected error returns during normal operations: +// - models.BelowPrunedThresholdError - in case rank is lower than +// lowestRetainedRank +func (v *VoteCollectors[StateT, VoteT]) getCollector(rank uint64) ( + consensus.VoteCollector[StateT, VoteT], + bool, + error, +) { + v.lock.RLock() + defer v.lock.RUnlock() + if rank < v.lowestRetainedRank { + return nil, false, models.NewBelowPrunedThresholdErrorf( + "cannot retrieve collector for pruned rank %d (lowest retained rank %d)", + rank, + v.lowestRetainedRank, + ) + } + + clr, found := v.collectors[rank] + + return clr, found, nil +} + +// PruneUpToRank prunes the vote collectors with ranks _below_ the given value, +// i.e. we only retain and process whose rank is equal or larger than +// `lowestRetainedRank`. If `lowestRetainedRank` is smaller than the previous +// value, the previous value is kept and the method call is a NoOp. +func (v *VoteCollectors[StateT, VoteT]) PruneUpToRank( + lowestRetainedRank uint64, +) { + v.lock.Lock() + defer v.lock.Unlock() + if v.lowestRetainedRank >= lowestRetainedRank { + return + } + if len(v.collectors) == 0 { + v.lowestRetainedRank = lowestRetainedRank + return + } + + // to optimize the pruning of large rank-ranges, we compare: + // * the number of ranks for which we have collectors: len(v.collectors) + // * the number of ranks that need to be pruned: rank-v.lowestRetainedRank + // We iterate over the dimension which is smaller. + if uint64(len(v.collectors)) < lowestRetainedRank-v.lowestRetainedRank { + for w := range v.collectors { + if w < lowestRetainedRank { + delete(v.collectors, w) + } + } + } else { + for w := v.lowestRetainedRank; w < lowestRetainedRank; w++ { + delete(v.collectors, w) + } + } + + v.lowestRetainedRank = lowestRetainedRank + v.tracer.Trace("pruned vote collectors") +} diff --git a/consensus/votecollector/common.go b/consensus/votecollector/common.go new file mode 100644 index 0000000..e58545b --- /dev/null +++ b/consensus/votecollector/common.go @@ -0,0 +1,68 @@ +package votecollector + +import ( + "errors" + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +var ( + VoteForIncompatibleRankError = errors.New("vote for incompatible rank") + VoteForIncompatibleStateError = errors.New("vote for incompatible state") +) + +/******************************* NoopProcessor *******************************/ + +// NoopProcessor implements consensus.VoteProcessor. It drops all votes. +type NoopProcessor[VoteT models.Unique] struct { + status consensus.VoteCollectorStatus +} + +func NewNoopCollector[VoteT models.Unique]( + status consensus.VoteCollectorStatus, +) *NoopProcessor[VoteT] { + return &NoopProcessor[VoteT]{status} +} + +func (c *NoopProcessor[VoteT]) Process(*VoteT) error { + return nil +} + +func (c *NoopProcessor[VoteT]) Status() consensus.VoteCollectorStatus { + return c.status +} + +/************************ enforcing vote is for state ************************/ + +// EnsureVoteForState verifies that the vote is for the given state. +// Returns nil on success and sentinel errors: +// - models.VoteForIncompatibleRankError if the vote is from a different rank +// than state +// - models.VoteForIncompatibleStateError if the vote is from the same rank as +// state but for a different stateID +func EnsureVoteForState[StateT models.Unique, VoteT models.Unique]( + vote *VoteT, + state *StateT, +) error { + if (*vote).GetRank() != (*state).GetRank() { + return fmt.Errorf( + "vote %v has rank %d while state's rank is %d: %w ", + (*vote).Identity(), + (*vote).GetRank(), + (*state).GetRank(), + VoteForIncompatibleRankError, + ) + } + if (*vote).Source() != (*state).Identity() { + return fmt.Errorf( + "expecting only votes for state %v, but vote %v is for state %v: %w ", + (*state).Identity(), + (*vote).Identity(), + (*vote).Source(), + VoteForIncompatibleStateError, + ) + } + return nil +} diff --git a/consensus/votecollector/factory.go b/consensus/votecollector/factory.go new file mode 100644 index 0000000..95ccc1d --- /dev/null +++ b/consensus/votecollector/factory.go @@ -0,0 +1,157 @@ +package votecollector + +import ( + "fmt" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +// baseFactory instantiates VerifyingVoteProcessors. Depending on the specific +// signing scheme, a different baseFactory can be used. +// CAUTION: the baseFactory creates the VerifyingVoteProcessor for the given +// state. It does _not_ check the proposer's vote for its own state. The API +// reflects this by expecting a `models.State` as input (which does _not_ +// contain the proposer vote) as opposed to `models.SignedProposal` (combines +// state with proposer's vote). Therefore, baseFactory does _not_ implement +// `consensus.VoteProcessorFactory` by itself. The VoteProcessorFactory adds the +// missing logic to verify the proposer's vote, by wrapping the baseFactory +// (decorator pattern). +type baseFactory[StateT models.Unique, VoteT models.Unique] func( + tracer consensus.TraceLogger, + state *models.State[StateT], +) ( + consensus.VerifyingVoteProcessor[StateT, VoteT], + error, +) + +// VoteProcessorFactory implements `consensus.VoteProcessorFactory`. Its main +// purpose is to construct instances of VerifyingVoteProcessors for a given +// state proposal. +// VoteProcessorFactory +// * delegates the creation of the actual instances to baseFactory +// * adds the logic to verify the proposer's vote for its own state +// Thereby, VoteProcessorFactory guarantees that only proposals with valid +// proposer vote are accepted (as per API specification). Otherwise, an +// `models.InvalidProposalError` is returned. +type VoteProcessorFactory[StateT models.Unique, VoteT models.Unique] struct { + baseFactory baseFactory[StateT, VoteT] +} + +var _ consensus.VoteProcessorFactory[*nilUnique, *nilUnique] = (*VoteProcessorFactory[*nilUnique, *nilUnique])(nil) + +// Create instantiates a VerifyingVoteProcessor for the given state proposal. +// A VerifyingVoteProcessor are only created for proposals with valid proposer +// votes. Expected error returns during normal operations: +// * models.InvalidProposalError - proposal has invalid proposer vote +func (f *VoteProcessorFactory[StateT, VoteT]) Create( + tracer consensus.TraceLogger, + proposal *models.SignedProposal[StateT, VoteT], +) (consensus.VerifyingVoteProcessor[StateT, VoteT], error) { + processor, err := f.baseFactory(tracer, proposal.State) + if err != nil { + return nil, fmt.Errorf( + "instantiating vote processor for state %v failed: %w", + proposal.State.Identifier, + err, + ) + } + + vote, err := proposal.ProposerVote() + if err != nil { + return nil, fmt.Errorf("could not get vote from proposer vote: %w", err) + } + + err = processor.Process(vote) + if err != nil { + if models.IsInvalidVoteError[VoteT](err) { + return nil, models.NewInvalidProposalErrorf( + proposal, + "invalid proposer vote: %w", + err, + ) + } + return nil, fmt.Errorf( + "processing proposer's vote for state %v failed: %w", + proposal.State.Identifier, + err, + ) + } + return processor, nil +} + +// NewVoteProcessorFactory implements consensus.VoteProcessorFactory. +func NewVoteProcessorFactory[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +]( + committee consensus.DynamicCommittee, + onQCCreated consensus.OnQuorumCertificateCreated, +) *VoteProcessorFactory[StateT, VoteT] { + base := &provingVoteProcessorFactoryBase[StateT, VoteT, PeerIDT]{ + committee: committee, + onQCCreated: onQCCreated, + } + return &VoteProcessorFactory[StateT, VoteT]{ + baseFactory: base.Create, + } +} + +/* ***************************** VerifyingVoteProcessor constructors for bootstrapping ***************************** */ + +// NewBootstrapVoteProcessor directly creates a `VoteProcessor`, +// suitable for the collector's local cluster consensus. +// Intended use: only for bootstrapping. +// UNSAFE: the proposer vote for `state` is _not_ validated or included +func NewBootstrapVoteProcessor[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +]( + tracer consensus.TraceLogger, + committee consensus.DynamicCommittee, + state *models.State[StateT], + onQCCreated consensus.OnQuorumCertificateCreated, +) (consensus.VerifyingVoteProcessor[StateT, VoteT], error) { + factory := &provingVoteProcessorFactoryBase[StateT, VoteT, PeerIDT]{ + committee: committee, + onQCCreated: onQCCreated, + } + return factory.Create(tracer, state) +} + +// Type used to satisfy generic arguments in compiler time type assertion check +type nilUnique struct{} + +// GetSignature implements models.Unique. +func (n *nilUnique) GetSignature() []byte { + panic("unimplemented") +} + +// GetTimestamp implements models.Unique. +func (n *nilUnique) GetTimestamp() uint64 { + panic("unimplemented") +} + +// Source implements models.Unique. +func (n *nilUnique) Source() models.Identity { + panic("unimplemented") +} + +// Clone implements models.Unique. +func (n *nilUnique) Clone() models.Unique { + panic("unimplemented") +} + +// GetRank implements models.Unique. +func (n *nilUnique) GetRank() uint64 { + panic("unimplemented") +} + +// Identity implements models.Unique. +func (n *nilUnique) Identity() models.Identity { + panic("unimplemented") +} + +var _ models.Unique = (*nilUnique)(nil) diff --git a/consensus/votecollector/statemachine.go b/consensus/votecollector/statemachine.go new file mode 100644 index 0000000..5b96648 --- /dev/null +++ b/consensus/votecollector/statemachine.go @@ -0,0 +1,354 @@ +package votecollector + +import ( + "errors" + "fmt" + "sync" + + "github.com/rs/zerolog" + "go.uber.org/atomic" + + "source.quilibrium.com/quilibrium/monorepo/consensus/voteaggregator" +) + +var ( + ErrDifferentCollectorState = errors.New("different state") +) + +// VerifyingVoteProcessorFactory generates consensus.VerifyingVoteCollector +// instances +type VerifyingVoteProcessorFactory[ + StateT models.Unique, + VoteT models.Unique, +] = func( + tracer consensus.TraceLogger, + proposal *models.SignedProposal[StateT, VoteT], +) (consensus.VerifyingVoteProcessor[StateT, VoteT], error) + +// VoteCollector implements a state machine for transition between different +// states of vote collector +type VoteCollector[StateT models.Unique, VoteT models.Unique] struct { + sync.Mutex + log zerolog.Logger + workers consensus.Workers + notifier consensus.VoteAggregationConsumer[StateT, VoteT] + createVerifyingProcessor VerifyingVoteProcessorFactory[StateT, VoteT] + + votesCache VotesCache[VoteT] + votesProcessor atomic.Value +} + +var _ consensus.VoteCollector[*nilUnique, *nilUnique] = (*VoteCollector[*nilUnique, *nilUnique])(nil) + +func ( + m *VoteCollector[StateT, VoteT], +) atomicLoadProcessor() consensus.VoteProcessor[VoteT] { + return m.votesProcessor.Load().(*atomicValueWrapper[VoteT]).processor +} + +// atomic.Value doesn't allow storing interfaces as atomic values, +// it requires that stored type is always the same, so we need a wrapper that +// will mitigate this restriction +// https://github.com/golang/go/issues/22550 +type atomicValueWrapper[VoteT models.Unique] struct { + processor consensus.VoteProcessor[VoteT] +} + +func NewStateMachineFactory[StateT models.Unique, VoteT models.Unique]( + tracer consensus.TraceLogger, + notifier consensus.VoteAggregationConsumer[StateT, VoteT], + verifyingVoteProcessorFactory VerifyingVoteProcessorFactory[StateT, VoteT], +) voteaggregator.NewCollectorFactoryMethod[StateT, VoteT] { + return func(rank uint64, workers consensus.Workers) ( + consensus.VoteCollector[StateT, VoteT], + error, + ) { + return NewStateMachine[StateT, VoteT]( + rank, + tracer, + workers, + notifier, + verifyingVoteProcessorFactory, + ), nil + } +} + +func NewStateMachine[StateT models.Unique, VoteT models.Unique]( + rank uint64, + tracer consensus.TraceLogger, + workers consensus.Workers, + notifier consensus.VoteAggregationConsumer[StateT, VoteT], + verifyingVoteProcessorFactory VerifyingVoteProcessorFactory[StateT, VoteT], +) *VoteCollector[StateT, VoteT] { + sm := &VoteCollector[StateT, VoteT]{ + tracer: tracer + workers: workers, + notifier: notifier, + createVerifyingProcessor: verifyingVoteProcessorFactory, + votesCache: *NewVotesCache[StateT, VoteT](rank), + } + + // without a state, we don't process votes (only cache them) + sm.votesProcessor.Store(&atomicValueWrapper{ + processor: NewNoopCollector(consensus.VoteCollectorStatusCaching), + }) + return sm +} + +// AddVote adds a vote to current vote collector +// All expected errors are handled via callbacks to notifier. +// Under normal execution only exceptions are propagated to caller. +func (m *VoteCollector[StateT, VoteT]) AddVote(vote *VoteT) error { + // Cache vote + err := m.votesCache.AddVote(vote) + if err != nil { + if errors.Is(err, RepeatedVoteErr) { + return nil + } + doubleVoteErr, isDoubleVoteErr := models.AsDoubleVoteError(err) + if isDoubleVoteErr { + m.notifier.OnDoubleVotingDetected( + doubleVoteErr.FirstVote, + doubleVoteErr.ConflictingVote, + ) + return nil + } + return fmt.Errorf( + "internal error adding vote %v to cache for state %v: %w", + vote.ID(), + vote.Identifier, + err, + ) + } + + err = m.processVote(vote) + if err != nil { + if errors.Is(err, VoteForIncompatibleStateError) { + // For honest nodes, there should be only a single proposal per rank and + // all votes should be for this proposal. However, byzantine nodes might + // deviate from this happy path: + // * A malicious leader might create multiple (individually valid) + // conflicting proposals for the same rank. Honest replicas will send + // correct votes for whatever proposal they see first. We only accept + // the first valid state and reject any other conflicting states that + // show up later. + // * Alternatively, malicious replicas might send votes with the expected + // rank, but for states that don't exist. + // In either case, receiving votes for the same rank but for different + // state IDs is a symptom of malicious consensus participants. Hence, we + // log it here as a warning: + m.tracer.Error("received vote for incompatible state", err) + + return nil + } + return fmt.Errorf( + "internal error processing vote %v for state %v: %w", + vote.ID(), + vote.Identifier, + err, + ) + } + return nil +} + +// processVote uses compare-and-repeat pattern to process vote with underlying +// vote processor +func (m *VoteCollector[StateT, VoteT]) processVote(vote *VoteT) error { + for { + processor := m.atomicLoadProcessor() + currentState := processor.Status() + err := processor.Process(vote) + if err != nil { + if invalidVoteErr, ok := models.AsInvalidVoteError(err); ok { + m.notifier.OnInvalidVoteDetected(*invalidVoteErr) + return nil + } + // ATTENTION: due to how our logic is designed this situation is only + // possible where we receive the same vote twice, this is not a case of + // double voting. This scenario is possible if leader submits their vote + // additionally to the vote in proposal. + if models.IsDuplicatedSignerError(err) { + m.tracer.Trace(fmt.Sprintf("duplicated signer %x", vote.SignerID)) + return nil + } + return err + } + + if currentState != m.Status() { + continue + } + + m.notifier.OnVoteProcessed(vote) + return nil + } +} + +// Status returns the status of underlying vote processor +func (m *VoteCollector[StateT, VoteT]) Status() consensus.VoteCollectorStatus { + return m.atomicLoadProcessor().Status() +} + +// Rank returns rank associated with this collector +func (m *VoteCollector[StateT, VoteT]) Rank() uint64 { + return m.votesCache.Rank() +} + +// ProcessState performs validation of state signature and processes state with +// respected collector. In case we have received double proposal, we will stop +// attempting to build a QC for this rank, because we don't want to build on any +// proposal from an equivocating primary. Note: slashing challenges for proposal +// equivocation are triggered by consensus.Forks, so we don't have to do +// anything else here. +// +// The internal state change is implemented as an atomic compare-and-swap, i.e. +// the state transition is only executed if VoteCollector's internal state is +// equal to `expectedValue`. The implementation only allows the transitions +// +// CachingVotes -> VerifyingVotes +// CachingVotes -> Invalid +// VerifyingVotes -> Invalid +func (m *VoteCollector[StateT, VoteT]) ProcessState(proposal *models.SignedProposal) error { + + if proposal.State.Rank != m.Rank() { + return fmt.Errorf( + "this VoteCollector requires a proposal for rank %d but received state %v with rank %d", + m.votesCache.Rank(), + proposal.State.Identifier, + proposal.State.Rank, + ) + } + + for { + proc := m.atomicLoadProcessor() + + switch proc.Status() { + // first valid state for this rank: commence state transition from caching + // to verifying + case consensus.VoteCollectorStatusCaching: + err := m.caching2Verifying(proposal) + if errors.Is(err, ErrDifferentCollectorState) { + continue // concurrent state update by other thread => restart our logic + } + + if err != nil { + return fmt.Errorf( + "internal error updating VoteProcessor's status from %s to %s for state %v: %w", + proc.Status().String(), + consensus.VoteCollectorStatusVerifying.String(), + proposal.State.Identifier, + err, + ) + } + + m.log.Info(). + Hex("state_id", proposal.State.Identifier[:]). + Msg("vote collector status changed from caching to verifying") + + m.processCachedVotes(proposal.State) + + // We already received a valid state for this rank. Check whether the + // proposer is equivocating and terminate vote processing in this case. + // Note: proposal equivocation is handled by consensus.Forks, so we don't + // have to do anything else here. + case consensus.VoteCollectorStatusVerifying: + verifyingProc, ok := proc.(consensus.VerifyingVoteProcessor) + if !ok { + return fmt.Errorf( + "while processing state %v, found that VoteProcessor reports status %s but has an incompatible implementation type %T", + proposal.State.Identifier, + proc.Status(), + verifyingProc, + ) + } + if verifyingProc.State().Identifier != proposal.State.Identifier { + m.terminateVoteProcessing() + } + + // Vote processing for this rank has already been terminated. Note: proposal + // equivocation is handled by consensus.Forks, so we don't have anything to + // do here. + case consensus.VoteCollectorStatusInvalid: /* no op */ + + default: + return fmt.Errorf( + "while processing state %v, found that VoteProcessor reported unknown status %s", + proposal.State.Identifier, + proc.Status(), + ) + } + + return nil + } +} + +// RegisterVoteConsumer registers a VoteConsumer. Upon registration, the +// collector feeds all cached votes into the consumer in the order they arrived. +// CAUTION, VoteConsumer implementations must be +// - NON-BLOCKING and consume the votes without noteworthy delay, and +// - CONCURRENCY SAFE +func (m *VoteCollector[StateT, VoteT]) RegisterVoteConsumer( + consumer consensus.VoteConsumer[VoteT], +) { + m.votesCache.RegisterVoteConsumer(consumer) +} + +// caching2Verifying ensures that the VoteProcessor is currently in state +// `VoteCollectorStatusCaching` and replaces it by a newly-created +// VerifyingVoteProcessor. +// Error returns: +// * ErrDifferentCollectorState if the VoteCollector's state is _not_ +// `CachingVotes` +// * all other errors are unexpected and potential symptoms of internal bugs or +// state corruption (fatal) +func (m *VoteCollector[StateT, VoteT]) caching2Verifying( + proposal *models.SignedProposal[StateT, VoteT], +) error { + stateID := proposal.State.Identifier + newProc, err := m.createVerifyingProcessor(m.log, proposal) + if err != nil { + return fmt.Errorf("failed to create VerifyingVoteProcessor for state %v: %w", stateID, err) + } + newProcWrapper := &atomicValueWrapper{processor: newProc} + + m.Lock() + defer m.Unlock() + proc := m.atomicLoadProcessor() + if proc.Status() != consensus.VoteCollectorStatusCaching { + return fmt.Errorf("processors's current state is %s: %w", proc.Status().String(), ErrDifferentCollectorState) + } + m.votesProcessor.Store(newProcWrapper) + return nil +} + +func (m *VoteCollector[StateT, VoteT]) terminateVoteProcessing() { + if m.Status() == consensus.VoteCollectorStatusInvalid { + return + } + newProcWrapper := &atomicValueWrapper{ + processor: NewNoopCollector(consensus.VoteCollectorStatusInvalid), + } + + m.Lock() + defer m.Unlock() + m.votesProcessor.Store(newProcWrapper) +} + +// processCachedVotes feeds all cached votes into the VoteProcessor +func (m *VoteCollector[StateT, VoteT]) processCachedVotes(state *models.State) { + cachedVotes := m.votesCache.All() + m.log.Info().Msgf("processing %d cached votes", len(cachedVotes)) + for _, vote := range cachedVotes { + if vote.Identifier != state.Identifier { + continue + } + + stateVote := vote + voteProcessingTask := func() { + err := m.processVote(stateVote) + if err != nil { + m.log.Fatal().Err(err).Msg("internal error processing cached vote") + } + } + m.workers.Submit(voteProcessingTask) + } +} diff --git a/consensus/votecollector/vote_cache.go b/consensus/votecollector/vote_cache.go new file mode 100644 index 0000000..6ef0dae --- /dev/null +++ b/consensus/votecollector/vote_cache.go @@ -0,0 +1,149 @@ +package votecollector + +import ( + "errors" + "sync" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" +) + +var ( + // RepeatedVoteErr is emitted, when we receive a vote for the same state + // from the same voter multiple times. This error does _not_ indicate + // equivocation. + RepeatedVoteErr = errors.New("duplicated vote") +) + +// voteContainer container stores the vote and in index representing +// the order in which the votes were received +type voteContainer struct { + *models.Vote + index int +} + +// VotesCache maintains a _concurrency safe_ cache of votes for one particular +// rank. The cache memorizes the order in which the votes were received. Votes +// are de-duplicated based on the following rules: +// - Vor each voter (i.e. SignerID), we store the _first_ vote v0. +// - For any subsequent vote v, we check whether v.Identifier == v0.Identifier. +// If this is the case, we consider the vote a duplicate and drop it. +// If v and v0 have different Identifiers, the voter is equivocating and +// we return a models.DoubleVoteError +type VotesCache[VoteT models.Unique] struct { + lock sync.RWMutex + rank uint64 + votes map[models.Identity]voteContainer // signerID -> first vote + voteConsumers []consensus.VoteConsumer[VoteT] +} + +// NewVotesCache instantiates a VotesCache for the given rank +func NewVotesCache[VoteT models.Unique](rank uint64) *VotesCache[VoteT] { + return &VotesCache[VoteT]{ + rank: rank, + votes: make(map[models.Identity]voteContainer), + } +} + +func (vc *VotesCache[VoteT]) Rank() uint64 { return vc.rank } + +// AddVote stores a vote in the cache. The following errors are expected during +// normal operations: +// - nil: if the vote was successfully added +// - models.DoubleVoteError is returned if the voter is equivocating +// (i.e. voting in the same rank for different states). +// - RepeatedVoteErr is returned when adding a vote for the same state from +// the same voter multiple times. +// - IncompatibleRankErr is returned if the vote is for a different rank. +// +// When AddVote returns an error, the vote is _not_ stored. +func (vc *VotesCache[VoteT]) AddVote(vote *VoteT) error { + if (*vote).GetRank() != vc.rank { + return VoteForIncompatibleRankError + } + vc.lock.Lock() + defer vc.lock.Unlock() + + // De-duplicated votes based on the following rules: + // * Vor each voter (i.e. SignerID), we store the _first_ vote v0. + // * For any subsequent vote v, we check whether + // v.Identifier == v0.Identifier. + // If this is the case, we consider the vote a duplicate and drop it. + // If v and v0 have different Identifiers, the voter is equivocating and + // we return a models.DoubleVoteError + firstVote, exists := vc.votes[(*vote).Identity()] + if exists { + if (*firstVote.Vote).Source() != (*vote).Source() { + return models.NewDoubleVoteErrorf( + firstVote.Vote, + vote, + "detected vote equivocation at rank: %d", + vc.rank, + ) + } + return RepeatedVoteErr + } + + // previously unknown vote: (1) store and (2) forward to consumers + vc.votes[(*vote).Identity()] = voteContainer{vote, len(vc.votes)} + for _, consumer := range vc.voteConsumers { + consumer(vote) + } + return nil +} + +// GetVote returns the stored vote for the given `signerID`. Returns: +// - (vote, true) if a vote from signerID is known +// - (false, nil) no vote from signerID is known +func (vc *VotesCache[VoteT]) GetVote(signerID models.Identity) (*VoteT, bool) { + vc.lock.RLock() + container, exists := vc.votes[signerID] // if signerID is unknown, its `Vote` pointer is nil + vc.lock.RUnlock() + return container.Vote, exists +} + +// Size returns the number of cached votes +func (vc *VotesCache[VoteT]) Size() int { + vc.lock.RLock() + s := len(vc.votes) + vc.lock.RUnlock() + return s +} + +// RegisterVoteConsumer registers a VoteConsumer. Upon registration, the cache +// feeds all cached votes into the consumer in the order they arrived. +// CAUTION: a consumer _must_ be non-stateing and consume the votes without +// noteworthy delay. Otherwise, consensus speed is impacted. +// +// Expected usage patter: During happy-path operations, the state arrives in a +// timely manner. Hence, we expect that only a few votes are cached when a +// consumer is registered. For the purpose of forensics, we might register a +// consumer later, when already lots of votes are cached. However, this should +// be a rare occurrence (we except moderate performance overhead in this case). +func (vc *VotesCache[VoteT]) RegisterVoteConsumer( + consumer consensus.VoteConsumer[VoteT], +) { + vc.lock.Lock() + defer vc.lock.Unlock() + + vc.voteConsumers = append(vc.voteConsumers, consumer) + for _, vote := range vc.all() { // feed the consumer with the cached votes + consumer(vote) // non-stateing per API contract + } +} + +// All returns all currently cached votes. Concurrency safe. +func (vc *VotesCache[VoteT]) All() []*VoteT { + vc.lock.Lock() + defer vc.lock.Unlock() + return vc.all() +} + +// all returns all currently cached votes. NOT concurrency safe +func (vc *VotesCache[VoteT]) all() []*VoteT { + orderedVotes := make([]*VoteT, len(vc.votes)) + for _, v := range vc.votes { + orderedVotes[v.index] = v.Vote + } + return orderedVotes +} diff --git a/consensus/votecollector/vote_processor.go b/consensus/votecollector/vote_processor.go new file mode 100644 index 0000000..da213c3 --- /dev/null +++ b/consensus/votecollector/vote_processor.go @@ -0,0 +1,238 @@ +package votecollector + +import ( + "context" + "errors" + "fmt" + + "go.uber.org/atomic" + + "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" + "source.quilibrium.com/quilibrium/monorepo/consensus/signature" + "source.quilibrium.com/quilibrium/monorepo/consensus/verification" +) + +/* ***************** Base-Factory for VoteProcessor ****************** */ + +// provingVoteProcessorFactoryBase implements a factory for creating +// VoteProcessor holds needed dependencies to initialize VoteProcessor. +// CAUTION: +// this base factory only creates the VerifyingVoteProcessor for the given +// state. It does _not_ check the proposer's vote for its own state, i.e. it +// does _not_ implement `consensus.VoteProcessorFactory`. This base factory +// should be wrapped by `votecollector.VoteProcessorFactory` which adds the +// logic to verify the proposer's vote (decorator pattern). +type provingVoteProcessorFactoryBase[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +] struct { + committee consensus.DynamicCommittee + onQCCreated consensus.OnQuorumCertificateCreated +} + +// Create creates VoteProcessor for processing votes for the given state. +// Caller must treat all errors as exceptions +func (f *provingVoteProcessorFactoryBase[StateT, VoteT, PeerIDT]) Create( + tracer consensus.TraceLogger, + state *models.State[StateT], + dsTag []byte, + aggregator consensus.SignatureAggregator, +) (consensus.VerifyingVoteProcessor[StateT, VoteT], error) { + allParticipants, err := f.committee.IdentitiesByState(state.Identifier) + if err != nil { + return nil, fmt.Errorf("error retrieving consensus participants: %w", err) + } + + // message that has to be verified against aggregated signature + msg := verification.MakeVoteMessage(state.Rank, state.Identifier) + + // prepare the proving public keys of participants + provingKeys := make([][]byte, 0, len(allParticipants)) + for _, participant := range allParticipants { + provingKeys = append(provingKeys, participant.PublicKey()) + } + + provingSigAggtor, err := signature.NewWeightedSignatureAggregator( + allParticipants, + provingKeys, + msg, + dsTag, + aggregator, + ) + if err != nil { + return nil, fmt.Errorf( + "could not create aggregator for proving signatures: %w", + err, + ) + } + + minRequiredWeight, err := f.committee.QuorumThresholdForRank(state.Rank) + if err != nil { + return nil, fmt.Errorf( + "could not get weight threshold for rank %d: %w", + state.Rank, + err, + ) + } + + return &VoteProcessor[StateT, VoteT, PeerIDT]{ + tracer: tracer, + state: state, + provingSigAggtor: provingSigAggtor, + onQCCreated: f.onQCCreated, + minRequiredWeight: minRequiredWeight, + done: *atomic.NewBool(false), + allParticipants: allParticipants, + }, nil +} + +/* ****************** VoteProcessor Implementation ******************* */ + +// VoteProcessor implements the consensus.VerifyingVoteProcessor interface. +// It processes hotstuff votes from a collector cluster, where participants vote +// in favour of a state by proving their proving key signature. +// Concurrency safe. +type VoteProcessor[ + StateT models.Unique, + VoteT models.Unique, + PeerIDT models.Unique, +] struct { + tracer consensus.TraceLogger + state *models.State[StateT] + provingSigAggtor consensus.WeightedSignatureAggregator + onQCCreated consensus.OnQuorumCertificateCreated + votingProvider consensus.VotingProvider[StateT, VoteT, PeerIDT] + minRequiredWeight uint64 + done atomic.Bool + allParticipants []models.WeightedIdentity +} + +// State returns state that is part of proposal that we are processing votes for. +func (p *VoteProcessor[StateT, VoteT, PeerIDT]) State() *models.State[StateT] { + return p.state +} + +// Status returns status of this vote processor, it's always verifying. +func (p *VoteProcessor[ + StateT, + VoteT, + PeerIDT, +]) Status() consensus.VoteCollectorStatus { + return consensus.VoteCollectorStatusVerifying +} + +// Process performs processing of single vote in concurrent safe way. This +// function is implemented to be called by multiple goroutines at the same time. +// Supports processing of both proving and threshold signatures. Design of this +// function is event driven, as soon as we collect enough weight to create a QC +// we will immediately do this and submit it via callback for further +// processing. +// Expected error returns during normal operations: +// * VoteForIncompatibleStateError - submitted vote for incompatible state +// * VoteForIncompatibleRankError - submitted vote for incompatible rank +// * models.InvalidVoteError - submitted vote with invalid signature +// All other errors should be treated as exceptions. +func (p *VoteProcessor[StateT, VoteT, PeerIDT]) Process(vote *VoteT) error { + err := EnsureVoteForState[StateT, VoteT](vote, p.state) + if err != nil { + return fmt.Errorf("received incompatible vote: %w", err) + } + + // Vote Processing state machine + if p.done.Load() { + return nil + } + err = p.provingSigAggtor.Verify((*vote).Identity(), (*vote).GetSignature()) + if err != nil { + if models.IsInvalidSignerError(err) { + return models.NewInvalidVoteErrorf( + vote, + "vote %x for rank %d is not signed by an authorized consensus participant: %w", + (*vote).Identity(), + (*vote).GetRank(), + err, + ) + } + if errors.Is(err, models.ErrInvalidSignature) { + return models.NewInvalidVoteErrorf( + vote, + "vote %x for rank %d has an invalid proving signature: %w", + (*vote).Identity(), + (*vote).GetRank(), + err, + ) + } + return fmt.Errorf("internal error checking signature validity: %w", err) + } + + if p.done.Load() { + return nil + } + totalWeight, err := p.provingSigAggtor.TrustedAdd( + (*vote).Identity(), + (*vote).GetSignature(), + ) + if err != nil { + // we don't expect any errors here during normal operation, as we previously + // checked for duplicated votes from the same signer and verified the + // signer+signature + return fmt.Errorf( + "unexpected exception adding signature from vote %x to proving aggregator: %w", + (*vote).Identity(), + err, + ) + } + + p.tracer.Trace(fmt.Sprintf( + "processed vote, total weight=(%d), required=(%d)", + totalWeight, + p.minRequiredWeight, + )) + + // checking of conditions for building QC are satisfied + if totalWeight < p.minRequiredWeight { + return nil + } + + // At this point, we have enough signatures to build a QC. Another routine + // might just be at this point. To avoid duplicate work, only one routine can + // pass: + if !p.done.CompareAndSwap(false, true) { + return nil + } + qc, err := p.buildQC() + if err != nil { + return fmt.Errorf("internal error constructing QC from votes: %w", err) + } + + p.tracer.Trace("new QC has been created") + p.onQCCreated(qc) + + return nil +} + +// buildQC performs aggregation of signatures when we have collected enough +// weight for building QC. This function is run only once by single worker. +// Any error should be treated as exception. +func (p *VoteProcessor[StateT, VoteT, PeerIDT]) buildQC() ( + models.QuorumCertificate, + error, +) { + _, aggregatedSig, err := p.provingSigAggtor.Aggregate() + if err != nil { + return nil, fmt.Errorf("could not aggregate proving signature: %w", err) + } + + qc, err := p.votingProvider.FinalizeQuorumCertificate( + context.Background(), + p.state, + aggregatedSig, + ) + if err != nil { + return nil, fmt.Errorf("could not build quorum certificate: %w", err) + } + + return qc, nil +} diff --git a/go-libp2p-blossomsub/bitmask_test.go b/go-libp2p-blossomsub/bitmask_test.go index c95c3e9..49adab7 100644 --- a/go-libp2p-blossomsub/bitmask_test.go +++ b/go-libp2p-blossomsub/bitmask_test.go @@ -710,7 +710,7 @@ func notifSubThenUnSub(ctx context.Context, t *testing.T, bitmasks []*Bitmask) { } // Wait for the unsubscribe messages to reach the primary peer - for len(primaryBitmask.ListPeers()) < 0 { + for len(primaryBitmask.ListPeers()) != 0 { time.Sleep(time.Millisecond * 100) } diff --git a/go.mod b/go.mod deleted file mode 100644 index 53b7abd..0000000 --- a/go.mod +++ /dev/null @@ -1,11 +0,0 @@ -module source.quilibrium.com/quilibrium/monorepo - -go 1.18 - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/objx v0.5.2 // indirect - github.com/stretchr/testify v1.10.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/go.sum b/go.sum deleted file mode 100644 index d821b09..0000000 --- a/go.sum +++ /dev/null @@ -1,11 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/node/consensus/app/consensus_leader_provider.go b/node/consensus/app/consensus_leader_provider.go index ea82012..7ad8c35 100644 --- a/node/consensus/app/consensus_leader_provider.go +++ b/node/consensus/app/consensus_leader_provider.go @@ -1,6 +1,7 @@ package app import ( + "bytes" "context" "encoding/hex" "time" @@ -9,6 +10,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" "source.quilibrium.com/quilibrium/monorepo/protobufs" ) @@ -72,6 +74,28 @@ func (p *AppLeaderProvider) ProveNextState( return nil, errors.Wrap(errors.New("nil prior frame"), "prove next state") } + // Get prover index + provers, err := p.engine.proverRegistry.GetActiveProvers(p.engine.appAddress) + if err != nil { + frameProvingTotal.WithLabelValues("error").Inc() + return nil, errors.Wrap(err, "prove next state") + } + + found := false + for _, prover := range provers { + if bytes.Equal(prover.Address, p.engine.getProverAddress()) { + found = true + break + } + } + + if !found { + return nil, errors.Wrap( + models.NewNoVoteErrorf("not a prover"), + "prove next state", + ) + } + // Get collected messages to include in frame p.engine.pendingMessagesMu.RLock() messages := make([]*protobufs.Message, len(p.engine.collectedMessages[string( diff --git a/node/consensus/app/consensus_types.go b/node/consensus/app/consensus_types.go index 69af852..b8c6772 100644 --- a/node/consensus/app/consensus_types.go +++ b/node/consensus/app/consensus_types.go @@ -6,6 +6,7 @@ import ( "slices" "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" ) // Type aliases for consensus types @@ -21,7 +22,7 @@ func (p PeerID) Rank() uint64 { return 0 } -func (p PeerID) Clone() consensus.Unique { +func (p PeerID) Clone() models.Unique { return PeerID{ ID: slices.Clone(p.ID), } @@ -48,7 +49,7 @@ func (c CollectedCommitments) Rank() uint64 { return c.frameNumber } -func (c CollectedCommitments) Clone() consensus.Unique { +func (c CollectedCommitments) Clone() models.Unique { return CollectedCommitments{ frameNumber: c.frameNumber, commitmentHash: slices.Clone(c.commitmentHash), diff --git a/node/consensus/global/consensus_leader_provider.go b/node/consensus/global/consensus_leader_provider.go index e27a16d..8725a07 100644 --- a/node/consensus/global/consensus_leader_provider.go +++ b/node/consensus/global/consensus_leader_provider.go @@ -11,6 +11,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" "source.quilibrium.com/quilibrium/monorepo/protobufs" ) @@ -72,6 +73,30 @@ func (p *GlobalLeaderProvider) ProveNextState( return nil, errors.Wrap(errors.New("nil prior frame"), "prove next state") } + // Get prover index + provers, err := p.engine.proverRegistry.GetActiveProvers(nil) + if err != nil { + frameProvingTotal.WithLabelValues("error").Inc() + return nil, errors.Wrap(err, "prove next state") + } + + proverIndex := uint8(0) + found := false + for i, prover := range provers { + if bytes.Equal(prover.Address, p.engine.getProverAddress()) { + proverIndex = uint8(i) + found = true + break + } + } + + if !found { + return nil, errors.Wrap( + models.NewNoVoteErrorf("not a prover"), + "prove next state", + ) + } + p.engine.logger.Info( "proving next global state", zap.Uint64("frame_number", (*prior).Header.FrameNumber+1), @@ -103,21 +128,6 @@ func (p *GlobalLeaderProvider) ProveNextState( p.engine.currentDifficulty = uint32(difficulty) p.engine.currentDifficultyMu.Unlock() - // Get prover index - provers, err := p.engine.proverRegistry.GetActiveProvers(nil) - if err != nil { - frameProvingTotal.WithLabelValues("error").Inc() - return nil, errors.Wrap(err, "prove next state") - } - - proverIndex := uint8(0) - for i, prover := range provers { - if bytes.Equal(prover.Address, p.engine.getProverAddress()) { - proverIndex = uint8(i) - break - } - } - // Prove the global frame header newHeader, err := p.engine.frameProver.ProveGlobalFrameHeader( (*prior).Header, diff --git a/node/consensus/global/consensus_types.go b/node/consensus/global/consensus_types.go index 43523ae..833c7f0 100644 --- a/node/consensus/global/consensus_types.go +++ b/node/consensus/global/consensus_types.go @@ -6,6 +6,7 @@ import ( "slices" "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" ) // Type aliases for consensus types @@ -21,7 +22,7 @@ func (p GlobalPeerID) Rank() uint64 { return 0 } -func (p GlobalPeerID) Clone() consensus.Unique { +func (p GlobalPeerID) Clone() models.Unique { return GlobalPeerID{ ID: slices.Clone(p.ID), } @@ -48,7 +49,7 @@ func (c GlobalCollectedCommitments) Rank() uint64 { return c.frameNumber } -func (c GlobalCollectedCommitments) Clone() consensus.Unique { +func (c GlobalCollectedCommitments) Clone() models.Unique { return GlobalCollectedCommitments{ frameNumber: c.frameNumber, commitmentHash: slices.Clone(c.commitmentHash), diff --git a/node/dbscan/main.go b/node/dbscan/main.go index cd46aba..afb780b 100644 --- a/node/dbscan/main.go +++ b/node/dbscan/main.go @@ -205,7 +205,11 @@ func main() { var status string switch { case entry.inFirst && entry.inSecond: - status = fmt.Sprintf("present in %s and %s", *configDirectory1, *configDirectory2) + status = fmt.Sprintf( + "present in %s and %s", + *configDirectory1, + *configDirectory2, + ) case entry.inFirst: status = fmt.Sprintf("only present in %s", *configDirectory1) case entry.inSecond: @@ -354,7 +358,11 @@ func decodeKeyBundleValue(sub byte, value []byte) string { if len(value) >= 32 { counterparty := shortHex(value[:32]) signature := shortHex(value[32:]) - return fmt.Sprintf("counterparty=%s\nsignature=%s", counterparty, signature) + return fmt.Sprintf( + "counterparty=%s\nsignature=%s", + counterparty, + signature, + ) } } return shortHex(value) @@ -487,7 +495,10 @@ func decodeHypergraphProto(value []byte) (string, bool) { } hasFields := false - msg.ProtoReflect().Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + msg.ProtoReflect().Range(func( + fd protoreflect.FieldDescriptor, + v protoreflect.Value, + ) bool { hasFields = true return false }) @@ -516,7 +527,11 @@ func decodeHypergraphProto(value []byte) (string, bool) { func summarizeVectorCommitmentTree(value []byte) string { _, err := tries.DeserializeNonLazyTree(value) if err != nil { - return fmt.Sprintf("vector_commitment_tree decode_error=%v raw=%s", err, shortHex(value)) + return fmt.Sprintf( + "vector_commitment_tree decode_error=%v raw=%s", + err, + shortHex(value), + ) } sum := sha256.Sum256(value) @@ -569,7 +584,11 @@ func summarizeHypergraphTreeNode(value []byte) string { jsonBytes, err := json.MarshalIndent(summary, "", " ") if err != nil { - return fmt.Sprintf("tree_leaf key=%s sha256=%s", shortHex(leaf.Key), hashStr) + return fmt.Sprintf( + "tree_leaf key=%s sha256=%s", + shortHex(leaf.Key), + hashStr, + ) } return string(jsonBytes) case tries.TypeBranch: diff --git a/protobufs/canonical_types.go b/protobufs/canonical_types.go index 9c238e8..1463f89 100644 --- a/protobufs/canonical_types.go +++ b/protobufs/canonical_types.go @@ -49,8 +49,8 @@ const ( GlobalFrameHeaderType uint32 = 0x0309 FrameHeaderType uint32 = 0x030A ProverLivenessCheckType uint32 = 0x030B - FrameVoteType uint32 = 0x030C - FrameConfirmationType uint32 = 0x030D + ProposalVoteType uint32 = 0x030C + QuorumCertificateType uint32 = 0x030D GlobalFrameType uint32 = 0x030E AppShardFrameType uint32 = 0x030F SeniorityMergeType uint32 = 0x0310 @@ -60,6 +60,7 @@ const ( PathType uint32 = 0x0314 TraversalSubProofType uint32 = 0x0315 TraversalProofType uint32 = 0x0316 + TimeoutCertificateType uint32 = 0x031D // Hypergraph types (0x0400 - 0x04FF) HypergraphConfigurationType uint32 = 0x0401 diff --git a/protobufs/global.go b/protobufs/global.go index c7cb0d6..4884b83 100644 --- a/protobufs/global.go +++ b/protobufs/global.go @@ -10,50 +10,35 @@ import ( "github.com/pkg/errors" "google.golang.org/protobuf/proto" "source.quilibrium.com/quilibrium/monorepo/consensus" + "source.quilibrium.com/quilibrium/monorepo/consensus/models" ) -func (g *GlobalFrame) Clone() consensus.Unique { - g.Identity() - frame := proto.Clone(g) - return frame.(*GlobalFrame) +func (g *QuorumCertificate) Clone() models.Unique { + return proto.Clone(g).(*QuorumCertificate) } -func (g *GlobalFrame) Identity() consensus.Identity { - return consensus.Identity(g.Header.Output) +func (g *QuorumCertificate) Identity() consensus.Identity { + return consensus.Identity(g.Selector) } -func (g *GlobalFrame) Rank() uint64 { - return g.Header.FrameNumber +func (g *TimeoutCertificate) Clone() models.Unique { + return proto.Clone(g).(*TimeoutCertificate) } -func (a *AppShardFrame) Clone() consensus.Unique { - a.Identity() - frame := proto.Clone(a) - return frame.(*AppShardFrame) +func (g *TimeoutCertificate) Identity() consensus.Identity { + return consensus.Identity( + binary.BigEndian.AppendUint64(slices.Clone(g.Filter), g.Rank), + ) } -func (a *AppShardFrame) Identity() consensus.Identity { - return consensus.Identity(a.Header.Output) +func (f *ProposalVote) Clone() models.Unique { + return proto.Clone(f).(*ProposalVote) } -func (a *AppShardFrame) Rank() uint64 { - return a.Header.FrameNumber -} - -func (f *FrameVote) Clone() consensus.Unique { - f.Identity() - frame := proto.Clone(f) - return frame.(*FrameVote) -} - -func (f *FrameVote) Identity() consensus.Identity { +func (f *ProposalVote) Identity() consensus.Identity { return consensus.Identity(f.PublicKeySignatureBls48581.Signature) } -func (f *FrameVote) Rank() uint64 { - return f.FrameNumber -} - func (s *SeniorityMerge) ToCanonicalBytes() ([]byte, error) { buf := new(bytes.Buffer) @@ -2091,11 +2076,11 @@ func (p *ProverLivenessCheck) FromCanonicalBytes(data []byte) error { return nil } -func (f *FrameVote) ToCanonicalBytes() ([]byte, error) { +func (f *ProposalVote) ToCanonicalBytes() ([]byte, error) { buf := new(bytes.Buffer) // Write type prefix - if err := binary.Write(buf, binary.BigEndian, FrameVoteType); err != nil { + if err := binary.Write(buf, binary.BigEndian, ProposalVoteType); err != nil { return nil, errors.Wrap(err, "to canonical bytes") } @@ -2111,32 +2096,29 @@ func (f *FrameVote) ToCanonicalBytes() ([]byte, error) { return nil, errors.Wrap(err, "to canonical bytes") } + // Write rank + if err := binary.Write(buf, binary.BigEndian, f.Rank); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + // Write frame_number if err := binary.Write(buf, binary.BigEndian, f.FrameNumber); err != nil { return nil, errors.Wrap(err, "to canonical bytes") } - // Write proposer + // Write selector if err := binary.Write( buf, binary.BigEndian, - uint32(len(f.Proposer)), + uint32(len(f.Selector)), ); err != nil { return nil, errors.Wrap(err, "to canonical bytes") } - if _, err := buf.Write(f.Proposer); err != nil { - return nil, errors.Wrap(err, "to canonical bytes") - } - - // Write approve - approve := uint8(0) - if f.Approve { - approve = 1 - } - if err := binary.Write(buf, binary.BigEndian, approve); err != nil { + if _, err := buf.Write(f.Selector); err != nil { return nil, errors.Wrap(err, "to canonical bytes") } + // Write timestamp if err := binary.Write(buf, binary.BigEndian, f.Timestamp); err != nil { return nil, errors.Wrap(err, "to canonical bytes") } @@ -2166,7 +2148,7 @@ func (f *FrameVote) ToCanonicalBytes() ([]byte, error) { return buf.Bytes(), nil } -func (f *FrameVote) FromCanonicalBytes(data []byte) error { +func (f *ProposalVote) FromCanonicalBytes(data []byte) error { buf := bytes.NewBuffer(data) // Read and verify type prefix @@ -2174,7 +2156,7 @@ func (f *FrameVote) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &typePrefix); err != nil { return errors.Wrap(err, "from canonical bytes") } - if typePrefix != FrameVoteType { + if typePrefix != ProposalVoteType { return errors.Wrap( errors.New("invalid type prefix"), "from canonical bytes", @@ -2191,28 +2173,26 @@ func (f *FrameVote) FromCanonicalBytes(data []byte) error { return errors.Wrap(err, "from canonical bytes") } + // Read rank + if err := binary.Read(buf, binary.BigEndian, &f.Rank); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + // Read frame_number if err := binary.Read(buf, binary.BigEndian, &f.FrameNumber); err != nil { return errors.Wrap(err, "from canonical bytes") } - // Read proposer - var proposerLen uint32 - if err := binary.Read(buf, binary.BigEndian, &proposerLen); err != nil { + // Read selector + var selectorLen uint32 + if err := binary.Read(buf, binary.BigEndian, &selectorLen); err != nil { return errors.Wrap(err, "from canonical bytes") } - f.Proposer = make([]byte, proposerLen) - if _, err := buf.Read(f.Proposer); err != nil { + f.Selector = make([]byte, selectorLen) + if _, err := buf.Read(f.Selector); err != nil { return errors.Wrap(err, "from canonical bytes") } - // Read approve - var approve uint8 - if err := binary.Read(buf, binary.BigEndian, &approve); err != nil { - return errors.Wrap(err, "from canonical bytes") - } - f.Approve = approve != 0 - // Read timestamp if err := binary.Read(buf, binary.BigEndian, &f.Timestamp); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -2239,14 +2219,14 @@ func (f *FrameVote) FromCanonicalBytes(data []byte) error { return nil } -func (f *FrameConfirmation) ToCanonicalBytes() ([]byte, error) { +func (f *QuorumCertificate) ToCanonicalBytes() ([]byte, error) { buf := new(bytes.Buffer) // Write type prefix if err := binary.Write( buf, binary.BigEndian, - FrameConfirmationType, + QuorumCertificateType, ); err != nil { return nil, errors.Wrap(err, "to canonical bytes") } @@ -2263,6 +2243,11 @@ func (f *FrameConfirmation) ToCanonicalBytes() ([]byte, error) { return nil, errors.Wrap(err, "to canonical bytes") } + // Write rank + if err := binary.Write(buf, binary.BigEndian, f.Rank); err != nil { + return nil, errors.Wrap(err, "to canonical bytes") + } + // Write frame_number if err := binary.Write(buf, binary.BigEndian, f.FrameNumber); err != nil { return nil, errors.Wrap(err, "to canonical bytes") @@ -2309,7 +2294,7 @@ func (f *FrameConfirmation) ToCanonicalBytes() ([]byte, error) { return buf.Bytes(), nil } -func (f *FrameConfirmation) FromCanonicalBytes(data []byte) error { +func (f *QuorumCertificate) FromCanonicalBytes(data []byte) error { buf := bytes.NewBuffer(data) // Read and verify type prefix @@ -2317,7 +2302,7 @@ func (f *FrameConfirmation) FromCanonicalBytes(data []byte) error { if err := binary.Read(buf, binary.BigEndian, &typePrefix); err != nil { return errors.Wrap(err, "from canonical bytes") } - if typePrefix != FrameConfirmationType { + if typePrefix != QuorumCertificateType { return errors.Wrap( errors.New("invalid type prefix"), "from canonical bytes", @@ -2334,6 +2319,11 @@ func (f *FrameConfirmation) FromCanonicalBytes(data []byte) error { return errors.Wrap(err, "from canonical bytes") } + // Read rank + if err := binary.Read(buf, binary.BigEndian, &f.Rank); err != nil { + return errors.Wrap(err, "from canonical bytes") + } + // Read frame_number if err := binary.Read(buf, binary.BigEndian, &f.FrameNumber); err != nil { return errors.Wrap(err, "from canonical bytes") @@ -3881,22 +3871,20 @@ func (p *ProverLivenessCheck) GetSignatureDomain() []byte { return slices.Concat([]byte("PROVER_LIVENESS"), p.Filter) } -var _ ValidatableMessage = (*FrameVote)(nil) +var _ ValidatableMessage = (*ProposalVote)(nil) -func (f *FrameVote) Validate() error { +func (f *ProposalVote) Validate() error { if f == nil { return errors.Wrap(errors.New("nil frame vote"), "validate") } - // Frame number is uint64, any value is valid + // Rank and frame number is uint64, any value is valid - // Proposer should be 32 bytes - if len(f.Proposer) != 32 { - return errors.Wrap(errors.New("invalid proposer length"), "validate") + // Selector should be 32 bytes + if len(f.Selector) != 32 { + return errors.Wrap(errors.New("invalid selector length"), "validate") } - // Approve is bool, any value is valid - // Signature must be present if f.PublicKeySignatureBls48581 == nil { return errors.Wrap(errors.New("missing signature"), "validate") @@ -3910,14 +3898,14 @@ func (f *FrameVote) Validate() error { return nil } -var _ ValidatableMessage = (*FrameConfirmation)(nil) +var _ ValidatableMessage = (*QuorumCertificate)(nil) -func (f *FrameConfirmation) Validate() error { +func (f *QuorumCertificate) Validate() error { if f == nil { return errors.Wrap(errors.New("nil frame confirmation"), "validate") } - // Frame number is uint64, any value is valid + // Rank and frame number is uint64, any value is valid // Selector should be 32 bytes if len(f.Selector) != 32 { diff --git a/protobufs/global.pb.go b/protobufs/global.pb.go index 15dc28c..a75e2fe 100644 --- a/protobufs/global.pb.go +++ b/protobufs/global.pb.go @@ -1441,14 +1441,16 @@ type ProverLivenessCheck struct { // The filter for the prover's commitment in the trie Filter []byte `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // The rank of the consensus clique + Rank uint64 `protobuf:"varint,2,opt,name=rank,proto3" json:"rank,omitempty"` // The frame number for which this liveness check is being sent - FrameNumber uint64 `protobuf:"varint,2,opt,name=frame_number,json=frameNumber,proto3" json:"frame_number,omitempty"` + FrameNumber uint64 `protobuf:"varint,3,opt,name=frame_number,json=frameNumber,proto3" json:"frame_number,omitempty"` // The timestamp when the liveness check was created - Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp int64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // The hash of the shard commitments and prover root - CommitmentHash []byte `protobuf:"bytes,4,opt,name=commitment_hash,json=commitmentHash,proto3" json:"commitment_hash,omitempty"` + CommitmentHash []byte `protobuf:"bytes,5,opt,name=commitment_hash,json=commitmentHash,proto3" json:"commitment_hash,omitempty"` // The BLS signature with the prover's address - PublicKeySignatureBls48581 *BLS48581AddressedSignature `protobuf:"bytes,5,opt,name=public_key_signature_bls48581,json=publicKeySignatureBls48581,proto3" json:"public_key_signature_bls48581,omitempty"` + PublicKeySignatureBls48581 *BLS48581AddressedSignature `protobuf:"bytes,6,opt,name=public_key_signature_bls48581,json=publicKeySignatureBls48581,proto3" json:"public_key_signature_bls48581,omitempty"` } func (x *ProverLivenessCheck) Reset() { @@ -1490,6 +1492,13 @@ func (x *ProverLivenessCheck) GetFilter() []byte { return nil } +func (x *ProverLivenessCheck) GetRank() uint64 { + if x != nil { + return x.Rank + } + return 0 +} + func (x *ProverLivenessCheck) GetFrameNumber() uint64 { if x != nil { return x.FrameNumber @@ -1518,27 +1527,27 @@ func (x *ProverLivenessCheck) GetPublicKeySignatureBls48581() *BLS48581Addressed return nil } -type FrameVote struct { +type ProposalVote struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // The filter for the prover's commitment in the trie Filter []byte `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` - // The frame number being voted on - FrameNumber uint64 `protobuf:"varint,2,opt,name=frame_number,json=frameNumber,proto3" json:"frame_number,omitempty"` - // The proposer of the frame - Proposer []byte `protobuf:"bytes,3,opt,name=proposer,proto3" json:"proposer,omitempty"` - // Whether the voter approves the frame - Approve bool `protobuf:"varint,4,opt,name=approve,proto3" json:"approve,omitempty"` + // The rank of the consensus clique + Rank uint64 `protobuf:"varint,2,opt,name=rank,proto3" json:"rank,omitempty"` + // The frame number for which this proposal applies + FrameNumber uint64 `protobuf:"varint,3,opt,name=frame_number,json=frameNumber,proto3" json:"frame_number,omitempty"` + // The selector being voted for + Selector []byte `protobuf:"bytes,4,opt,name=selector,proto3" json:"selector,omitempty"` // The timestamp when the vote was created Timestamp int64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // The BLS signature with the voter's address PublicKeySignatureBls48581 *BLS48581AddressedSignature `protobuf:"bytes,6,opt,name=public_key_signature_bls48581,json=publicKeySignatureBls48581,proto3" json:"public_key_signature_bls48581,omitempty"` } -func (x *FrameVote) Reset() { - *x = FrameVote{} +func (x *ProposalVote) Reset() { + *x = ProposalVote{} if protoimpl.UnsafeEnabled { mi := &file_global_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1546,13 +1555,13 @@ func (x *FrameVote) Reset() { } } -func (x *FrameVote) String() string { +func (x *ProposalVote) String() string { return protoimpl.X.MessageStringOf(x) } -func (*FrameVote) ProtoMessage() {} +func (*ProposalVote) ProtoMessage() {} -func (x *FrameVote) ProtoReflect() protoreflect.Message { +func (x *ProposalVote) ProtoReflect() protoreflect.Message { mi := &file_global_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1564,72 +1573,74 @@ func (x *FrameVote) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use FrameVote.ProtoReflect.Descriptor instead. -func (*FrameVote) Descriptor() ([]byte, []int) { +// Deprecated: Use ProposalVote.ProtoReflect.Descriptor instead. +func (*ProposalVote) Descriptor() ([]byte, []int) { return file_global_proto_rawDescGZIP(), []int{15} } -func (x *FrameVote) GetFilter() []byte { +func (x *ProposalVote) GetFilter() []byte { if x != nil { return x.Filter } return nil } -func (x *FrameVote) GetFrameNumber() uint64 { +func (x *ProposalVote) GetRank() uint64 { + if x != nil { + return x.Rank + } + return 0 +} + +func (x *ProposalVote) GetFrameNumber() uint64 { if x != nil { return x.FrameNumber } return 0 } -func (x *FrameVote) GetProposer() []byte { +func (x *ProposalVote) GetSelector() []byte { if x != nil { - return x.Proposer + return x.Selector } return nil } -func (x *FrameVote) GetApprove() bool { - if x != nil { - return x.Approve - } - return false -} - -func (x *FrameVote) GetTimestamp() int64 { +func (x *ProposalVote) GetTimestamp() int64 { if x != nil { return x.Timestamp } return 0 } -func (x *FrameVote) GetPublicKeySignatureBls48581() *BLS48581AddressedSignature { +func (x *ProposalVote) GetPublicKeySignatureBls48581() *BLS48581AddressedSignature { if x != nil { return x.PublicKeySignatureBls48581 } return nil } -type FrameConfirmation struct { +type QuorumCertificate struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // The filter for the prover's commitment in the trie Filter []byte `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` - // The frame number that was confirmed - FrameNumber uint64 `protobuf:"varint,2,opt,name=frame_number,json=frameNumber,proto3" json:"frame_number,omitempty"` + // The rank of the consensus clique + Rank uint64 `protobuf:"varint,2,opt,name=rank,proto3" json:"rank,omitempty"` + // The frame number for which this certificate applies + FrameNumber uint64 `protobuf:"varint,3,opt,name=frame_number,json=frameNumber,proto3" json:"frame_number,omitempty"` // The selector (hash) of the confirmed frame - Selector []byte `protobuf:"bytes,3,opt,name=selector,proto3" json:"selector,omitempty"` + Selector []byte `protobuf:"bytes,4,opt,name=selector,proto3" json:"selector,omitempty"` // The timestamp when the vote was created - Timestamp int64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp int64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // The aggregated BLS signature from all voters - AggregateSignature *BLS48581AggregateSignature `protobuf:"bytes,5,opt,name=aggregate_signature,json=aggregateSignature,proto3" json:"aggregate_signature,omitempty"` + AggregateSignature *BLS48581AggregateSignature `protobuf:"bytes,6,opt,name=aggregate_signature,json=aggregateSignature,proto3" json:"aggregate_signature,omitempty"` } -func (x *FrameConfirmation) Reset() { - *x = FrameConfirmation{} +func (x *QuorumCertificate) Reset() { + *x = QuorumCertificate{} if protoimpl.UnsafeEnabled { mi := &file_global_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1637,13 +1648,13 @@ func (x *FrameConfirmation) Reset() { } } -func (x *FrameConfirmation) String() string { +func (x *QuorumCertificate) String() string { return protoimpl.X.MessageStringOf(x) } -func (*FrameConfirmation) ProtoMessage() {} +func (*QuorumCertificate) ProtoMessage() {} -func (x *FrameConfirmation) ProtoReflect() protoreflect.Message { +func (x *QuorumCertificate) ProtoReflect() protoreflect.Message { mi := &file_global_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1655,40 +1666,131 @@ func (x *FrameConfirmation) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use FrameConfirmation.ProtoReflect.Descriptor instead. -func (*FrameConfirmation) Descriptor() ([]byte, []int) { +// Deprecated: Use QuorumCertificate.ProtoReflect.Descriptor instead. +func (*QuorumCertificate) Descriptor() ([]byte, []int) { return file_global_proto_rawDescGZIP(), []int{16} } -func (x *FrameConfirmation) GetFilter() []byte { +func (x *QuorumCertificate) GetFilter() []byte { if x != nil { return x.Filter } return nil } -func (x *FrameConfirmation) GetFrameNumber() uint64 { +func (x *QuorumCertificate) GetRank() uint64 { + if x != nil { + return x.Rank + } + return 0 +} + +func (x *QuorumCertificate) GetFrameNumber() uint64 { if x != nil { return x.FrameNumber } return 0 } -func (x *FrameConfirmation) GetSelector() []byte { +func (x *QuorumCertificate) GetSelector() []byte { if x != nil { return x.Selector } return nil } -func (x *FrameConfirmation) GetTimestamp() int64 { +func (x *QuorumCertificate) GetTimestamp() int64 { if x != nil { return x.Timestamp } return 0 } -func (x *FrameConfirmation) GetAggregateSignature() *BLS48581AggregateSignature { +func (x *QuorumCertificate) GetAggregateSignature() *BLS48581AggregateSignature { + if x != nil { + return x.AggregateSignature + } + return nil +} + +type TimeoutCertificate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The filter for the prover's commitment in the trie + Filter []byte `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // The rank of the consensus clique + Rank uint64 `protobuf:"varint,2,opt,name=rank,proto3" json:"rank,omitempty"` + // The latest ranks in signer order + LatestRanks []uint64 `protobuf:"varint,3,rep,packed,name=latest_ranks,json=latestRanks,proto3" json:"latest_ranks,omitempty"` + // The latest quorum certificate from all timeouts + LatestQuorumCertificate *QuorumCertificate `protobuf:"bytes,4,opt,name=latest_quorum_certificate,json=latestQuorumCertificate,proto3" json:"latest_quorum_certificate,omitempty"` + // The aggregated BLS signature from all voters + AggregateSignature *BLS48581AggregateSignature `protobuf:"bytes,5,opt,name=aggregate_signature,json=aggregateSignature,proto3" json:"aggregate_signature,omitempty"` +} + +func (x *TimeoutCertificate) Reset() { + *x = TimeoutCertificate{} + if protoimpl.UnsafeEnabled { + mi := &file_global_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeoutCertificate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeoutCertificate) ProtoMessage() {} + +func (x *TimeoutCertificate) ProtoReflect() protoreflect.Message { + mi := &file_global_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeoutCertificate.ProtoReflect.Descriptor instead. +func (*TimeoutCertificate) Descriptor() ([]byte, []int) { + return file_global_proto_rawDescGZIP(), []int{17} +} + +func (x *TimeoutCertificate) GetFilter() []byte { + if x != nil { + return x.Filter + } + return nil +} + +func (x *TimeoutCertificate) GetRank() uint64 { + if x != nil { + return x.Rank + } + return 0 +} + +func (x *TimeoutCertificate) GetLatestRanks() []uint64 { + if x != nil { + return x.LatestRanks + } + return nil +} + +func (x *TimeoutCertificate) GetLatestQuorumCertificate() *QuorumCertificate { + if x != nil { + return x.LatestQuorumCertificate + } + return nil +} + +func (x *TimeoutCertificate) GetAggregateSignature() *BLS48581AggregateSignature { if x != nil { return x.AggregateSignature } @@ -1707,7 +1809,7 @@ type GlobalFrame struct { func (x *GlobalFrame) Reset() { *x = GlobalFrame{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[17] + mi := &file_global_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1720,7 +1822,7 @@ func (x *GlobalFrame) String() string { func (*GlobalFrame) ProtoMessage() {} func (x *GlobalFrame) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[17] + mi := &file_global_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1733,7 +1835,7 @@ func (x *GlobalFrame) ProtoReflect() protoreflect.Message { // Deprecated: Use GlobalFrame.ProtoReflect.Descriptor instead. func (*GlobalFrame) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{17} + return file_global_proto_rawDescGZIP(), []int{18} } func (x *GlobalFrame) GetHeader() *GlobalFrameHeader { @@ -1762,7 +1864,7 @@ type AppShardFrame struct { func (x *AppShardFrame) Reset() { *x = AppShardFrame{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[18] + mi := &file_global_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1775,7 +1877,7 @@ func (x *AppShardFrame) String() string { func (*AppShardFrame) ProtoMessage() {} func (x *AppShardFrame) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[18] + mi := &file_global_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1788,7 +1890,7 @@ func (x *AppShardFrame) ProtoReflect() protoreflect.Message { // Deprecated: Use AppShardFrame.ProtoReflect.Descriptor instead. func (*AppShardFrame) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{18} + return file_global_proto_rawDescGZIP(), []int{19} } func (x *AppShardFrame) GetHeader() *FrameHeader { @@ -1817,7 +1919,7 @@ type GlobalAlert struct { func (x *GlobalAlert) Reset() { *x = GlobalAlert{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[19] + mi := &file_global_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1830,7 +1932,7 @@ func (x *GlobalAlert) String() string { func (*GlobalAlert) ProtoMessage() {} func (x *GlobalAlert) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[19] + mi := &file_global_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1843,7 +1945,7 @@ func (x *GlobalAlert) ProtoReflect() protoreflect.Message { // Deprecated: Use GlobalAlert.ProtoReflect.Descriptor instead. func (*GlobalAlert) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{19} + return file_global_proto_rawDescGZIP(), []int{20} } func (x *GlobalAlert) GetMessage() string { @@ -1871,7 +1973,7 @@ type GetGlobalFrameRequest struct { func (x *GetGlobalFrameRequest) Reset() { *x = GetGlobalFrameRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[20] + mi := &file_global_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1884,7 +1986,7 @@ func (x *GetGlobalFrameRequest) String() string { func (*GetGlobalFrameRequest) ProtoMessage() {} func (x *GetGlobalFrameRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[20] + mi := &file_global_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1897,7 +1999,7 @@ func (x *GetGlobalFrameRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetGlobalFrameRequest.ProtoReflect.Descriptor instead. func (*GetGlobalFrameRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{20} + return file_global_proto_rawDescGZIP(), []int{21} } func (x *GetGlobalFrameRequest) GetFrameNumber() uint64 { @@ -1919,7 +2021,7 @@ type GlobalFrameResponse struct { func (x *GlobalFrameResponse) Reset() { *x = GlobalFrameResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[21] + mi := &file_global_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1932,7 +2034,7 @@ func (x *GlobalFrameResponse) String() string { func (*GlobalFrameResponse) ProtoMessage() {} func (x *GlobalFrameResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[21] + mi := &file_global_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1945,7 +2047,7 @@ func (x *GlobalFrameResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GlobalFrameResponse.ProtoReflect.Descriptor instead. func (*GlobalFrameResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{21} + return file_global_proto_rawDescGZIP(), []int{22} } func (x *GlobalFrameResponse) GetFrame() *GlobalFrame { @@ -1974,7 +2076,7 @@ type GetAppShardFrameRequest struct { func (x *GetAppShardFrameRequest) Reset() { *x = GetAppShardFrameRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[22] + mi := &file_global_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1987,7 +2089,7 @@ func (x *GetAppShardFrameRequest) String() string { func (*GetAppShardFrameRequest) ProtoMessage() {} func (x *GetAppShardFrameRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[22] + mi := &file_global_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2000,7 +2102,7 @@ func (x *GetAppShardFrameRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAppShardFrameRequest.ProtoReflect.Descriptor instead. func (*GetAppShardFrameRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{22} + return file_global_proto_rawDescGZIP(), []int{23} } func (x *GetAppShardFrameRequest) GetFilter() []byte { @@ -2029,7 +2131,7 @@ type AppShardFrameResponse struct { func (x *AppShardFrameResponse) Reset() { *x = AppShardFrameResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[23] + mi := &file_global_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2042,7 +2144,7 @@ func (x *AppShardFrameResponse) String() string { func (*AppShardFrameResponse) ProtoMessage() {} func (x *AppShardFrameResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[23] + mi := &file_global_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2055,7 +2157,7 @@ func (x *AppShardFrameResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use AppShardFrameResponse.ProtoReflect.Descriptor instead. func (*AppShardFrameResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{23} + return file_global_proto_rawDescGZIP(), []int{24} } func (x *AppShardFrameResponse) GetFrame() *AppShardFrame { @@ -2084,7 +2186,7 @@ type GetAppShardsRequest struct { func (x *GetAppShardsRequest) Reset() { *x = GetAppShardsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[24] + mi := &file_global_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2097,7 +2199,7 @@ func (x *GetAppShardsRequest) String() string { func (*GetAppShardsRequest) ProtoMessage() {} func (x *GetAppShardsRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[24] + mi := &file_global_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2110,7 +2212,7 @@ func (x *GetAppShardsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAppShardsRequest.ProtoReflect.Descriptor instead. func (*GetAppShardsRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{24} + return file_global_proto_rawDescGZIP(), []int{25} } func (x *GetAppShardsRequest) GetShardKey() []byte { @@ -2142,7 +2244,7 @@ type AppShardInfo struct { func (x *AppShardInfo) Reset() { *x = AppShardInfo{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[25] + mi := &file_global_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2155,7 +2257,7 @@ func (x *AppShardInfo) String() string { func (*AppShardInfo) ProtoMessage() {} func (x *AppShardInfo) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[25] + mi := &file_global_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2168,7 +2270,7 @@ func (x *AppShardInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use AppShardInfo.ProtoReflect.Descriptor instead. func (*AppShardInfo) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{25} + return file_global_proto_rawDescGZIP(), []int{26} } func (x *AppShardInfo) GetPrefix() []uint32 { @@ -2217,7 +2319,7 @@ type GetAppShardsResponse struct { func (x *GetAppShardsResponse) Reset() { *x = GetAppShardsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[26] + mi := &file_global_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2230,7 +2332,7 @@ func (x *GetAppShardsResponse) String() string { func (*GetAppShardsResponse) ProtoMessage() {} func (x *GetAppShardsResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[26] + mi := &file_global_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2243,7 +2345,7 @@ func (x *GetAppShardsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAppShardsResponse.ProtoReflect.Descriptor instead. func (*GetAppShardsResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{26} + return file_global_proto_rawDescGZIP(), []int{27} } func (x *GetAppShardsResponse) GetInfo() []*AppShardInfo { @@ -2265,7 +2367,7 @@ type GetGlobalShardsRequest struct { func (x *GetGlobalShardsRequest) Reset() { *x = GetGlobalShardsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[27] + mi := &file_global_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2278,7 +2380,7 @@ func (x *GetGlobalShardsRequest) String() string { func (*GetGlobalShardsRequest) ProtoMessage() {} func (x *GetGlobalShardsRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[27] + mi := &file_global_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2291,7 +2393,7 @@ func (x *GetGlobalShardsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetGlobalShardsRequest.ProtoReflect.Descriptor instead. func (*GetGlobalShardsRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{27} + return file_global_proto_rawDescGZIP(), []int{28} } func (x *GetGlobalShardsRequest) GetL1() []byte { @@ -2320,7 +2422,7 @@ type GetGlobalShardsResponse struct { func (x *GetGlobalShardsResponse) Reset() { *x = GetGlobalShardsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[28] + mi := &file_global_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2333,7 +2435,7 @@ func (x *GetGlobalShardsResponse) String() string { func (*GetGlobalShardsResponse) ProtoMessage() {} func (x *GetGlobalShardsResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[28] + mi := &file_global_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2346,7 +2448,7 @@ func (x *GetGlobalShardsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetGlobalShardsResponse.ProtoReflect.Descriptor instead. func (*GetGlobalShardsResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{28} + return file_global_proto_rawDescGZIP(), []int{29} } func (x *GetGlobalShardsResponse) GetSize() []byte { @@ -2378,7 +2480,7 @@ type GetLockedAddressesRequest struct { func (x *GetLockedAddressesRequest) Reset() { *x = GetLockedAddressesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[29] + mi := &file_global_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2391,7 +2493,7 @@ func (x *GetLockedAddressesRequest) String() string { func (*GetLockedAddressesRequest) ProtoMessage() {} func (x *GetLockedAddressesRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[29] + mi := &file_global_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2404,7 +2506,7 @@ func (x *GetLockedAddressesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetLockedAddressesRequest.ProtoReflect.Descriptor instead. func (*GetLockedAddressesRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{29} + return file_global_proto_rawDescGZIP(), []int{30} } func (x *GetLockedAddressesRequest) GetShardAddress() []byte { @@ -2439,7 +2541,7 @@ type LockedTransaction struct { func (x *LockedTransaction) Reset() { *x = LockedTransaction{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[30] + mi := &file_global_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2452,7 +2554,7 @@ func (x *LockedTransaction) String() string { func (*LockedTransaction) ProtoMessage() {} func (x *LockedTransaction) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[30] + mi := &file_global_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2465,7 +2567,7 @@ func (x *LockedTransaction) ProtoReflect() protoreflect.Message { // Deprecated: Use LockedTransaction.ProtoReflect.Descriptor instead. func (*LockedTransaction) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{30} + return file_global_proto_rawDescGZIP(), []int{31} } func (x *LockedTransaction) GetTransactionHash() []byte { @@ -2507,7 +2609,7 @@ type GetLockedAddressesResponse struct { func (x *GetLockedAddressesResponse) Reset() { *x = GetLockedAddressesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[31] + mi := &file_global_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2520,7 +2622,7 @@ func (x *GetLockedAddressesResponse) String() string { func (*GetLockedAddressesResponse) ProtoMessage() {} func (x *GetLockedAddressesResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[31] + mi := &file_global_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2533,7 +2635,7 @@ func (x *GetLockedAddressesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetLockedAddressesResponse.ProtoReflect.Descriptor instead. func (*GetLockedAddressesResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{31} + return file_global_proto_rawDescGZIP(), []int{32} } func (x *GetLockedAddressesResponse) GetTransactions() []*LockedTransaction { @@ -2552,7 +2654,7 @@ type GlobalGetWorkerInfoRequest struct { func (x *GlobalGetWorkerInfoRequest) Reset() { *x = GlobalGetWorkerInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[32] + mi := &file_global_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2565,7 +2667,7 @@ func (x *GlobalGetWorkerInfoRequest) String() string { func (*GlobalGetWorkerInfoRequest) ProtoMessage() {} func (x *GlobalGetWorkerInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[32] + mi := &file_global_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2578,7 +2680,7 @@ func (x *GlobalGetWorkerInfoRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GlobalGetWorkerInfoRequest.ProtoReflect.Descriptor instead. func (*GlobalGetWorkerInfoRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{32} + return file_global_proto_rawDescGZIP(), []int{33} } type GlobalGetWorkerInfoResponseItem struct { @@ -2597,7 +2699,7 @@ type GlobalGetWorkerInfoResponseItem struct { func (x *GlobalGetWorkerInfoResponseItem) Reset() { *x = GlobalGetWorkerInfoResponseItem{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[33] + mi := &file_global_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2610,7 +2712,7 @@ func (x *GlobalGetWorkerInfoResponseItem) String() string { func (*GlobalGetWorkerInfoResponseItem) ProtoMessage() {} func (x *GlobalGetWorkerInfoResponseItem) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[33] + mi := &file_global_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2623,7 +2725,7 @@ func (x *GlobalGetWorkerInfoResponseItem) ProtoReflect() protoreflect.Message { // Deprecated: Use GlobalGetWorkerInfoResponseItem.ProtoReflect.Descriptor instead. func (*GlobalGetWorkerInfoResponseItem) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{33} + return file_global_proto_rawDescGZIP(), []int{34} } func (x *GlobalGetWorkerInfoResponseItem) GetCoreId() uint32 { @@ -2679,7 +2781,7 @@ type GlobalGetWorkerInfoResponse struct { func (x *GlobalGetWorkerInfoResponse) Reset() { *x = GlobalGetWorkerInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[34] + mi := &file_global_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2692,7 +2794,7 @@ func (x *GlobalGetWorkerInfoResponse) String() string { func (*GlobalGetWorkerInfoResponse) ProtoMessage() {} func (x *GlobalGetWorkerInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[34] + mi := &file_global_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2705,7 +2807,7 @@ func (x *GlobalGetWorkerInfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GlobalGetWorkerInfoResponse.ProtoReflect.Descriptor instead. func (*GlobalGetWorkerInfoResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{34} + return file_global_proto_rawDescGZIP(), []int{35} } func (x *GlobalGetWorkerInfoResponse) GetWorkers() []*GlobalGetWorkerInfoResponseItem { @@ -2728,7 +2830,7 @@ type SendMessage struct { func (x *SendMessage) Reset() { *x = SendMessage{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[35] + mi := &file_global_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2741,7 +2843,7 @@ func (x *SendMessage) String() string { func (*SendMessage) ProtoMessage() {} func (x *SendMessage) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[35] + mi := &file_global_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2754,7 +2856,7 @@ func (x *SendMessage) ProtoReflect() protoreflect.Message { // Deprecated: Use SendMessage.ProtoReflect.Descriptor instead. func (*SendMessage) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{35} + return file_global_proto_rawDescGZIP(), []int{36} } func (x *SendMessage) GetPeerId() []byte { @@ -2791,7 +2893,7 @@ type ReceiveMessage struct { func (x *ReceiveMessage) Reset() { *x = ReceiveMessage{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[36] + mi := &file_global_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2804,7 +2906,7 @@ func (x *ReceiveMessage) String() string { func (*ReceiveMessage) ProtoMessage() {} func (x *ReceiveMessage) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[36] + mi := &file_global_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2817,7 +2919,7 @@ func (x *ReceiveMessage) ProtoReflect() protoreflect.Message { // Deprecated: Use ReceiveMessage.ProtoReflect.Descriptor instead. func (*ReceiveMessage) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{36} + return file_global_proto_rawDescGZIP(), []int{37} } func (x *ReceiveMessage) GetSourcePeerId() []byte { @@ -2852,7 +2954,7 @@ type GetKeyRegistryRequest struct { func (x *GetKeyRegistryRequest) Reset() { *x = GetKeyRegistryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[37] + mi := &file_global_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2865,7 +2967,7 @@ func (x *GetKeyRegistryRequest) String() string { func (*GetKeyRegistryRequest) ProtoMessage() {} func (x *GetKeyRegistryRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[37] + mi := &file_global_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2878,7 +2980,7 @@ func (x *GetKeyRegistryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetKeyRegistryRequest.ProtoReflect.Descriptor instead. func (*GetKeyRegistryRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{37} + return file_global_proto_rawDescGZIP(), []int{38} } func (x *GetKeyRegistryRequest) GetIdentityKeyAddress() []byte { @@ -2900,7 +3002,7 @@ type GetKeyRegistryResponse struct { func (x *GetKeyRegistryResponse) Reset() { *x = GetKeyRegistryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[38] + mi := &file_global_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2913,7 +3015,7 @@ func (x *GetKeyRegistryResponse) String() string { func (*GetKeyRegistryResponse) ProtoMessage() {} func (x *GetKeyRegistryResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[38] + mi := &file_global_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2926,7 +3028,7 @@ func (x *GetKeyRegistryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetKeyRegistryResponse.ProtoReflect.Descriptor instead. func (*GetKeyRegistryResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{38} + return file_global_proto_rawDescGZIP(), []int{39} } func (x *GetKeyRegistryResponse) GetRegistry() *KeyRegistry { @@ -2954,7 +3056,7 @@ type GetKeyRegistryByProverRequest struct { func (x *GetKeyRegistryByProverRequest) Reset() { *x = GetKeyRegistryByProverRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[39] + mi := &file_global_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2967,7 +3069,7 @@ func (x *GetKeyRegistryByProverRequest) String() string { func (*GetKeyRegistryByProverRequest) ProtoMessage() {} func (x *GetKeyRegistryByProverRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[39] + mi := &file_global_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2980,7 +3082,7 @@ func (x *GetKeyRegistryByProverRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetKeyRegistryByProverRequest.ProtoReflect.Descriptor instead. func (*GetKeyRegistryByProverRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{39} + return file_global_proto_rawDescGZIP(), []int{40} } func (x *GetKeyRegistryByProverRequest) GetProverKeyAddress() []byte { @@ -3002,7 +3104,7 @@ type GetKeyRegistryByProverResponse struct { func (x *GetKeyRegistryByProverResponse) Reset() { *x = GetKeyRegistryByProverResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[40] + mi := &file_global_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3015,7 +3117,7 @@ func (x *GetKeyRegistryByProverResponse) String() string { func (*GetKeyRegistryByProverResponse) ProtoMessage() {} func (x *GetKeyRegistryByProverResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[40] + mi := &file_global_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3028,7 +3130,7 @@ func (x *GetKeyRegistryByProverResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetKeyRegistryByProverResponse.ProtoReflect.Descriptor instead. func (*GetKeyRegistryByProverResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{40} + return file_global_proto_rawDescGZIP(), []int{41} } func (x *GetKeyRegistryByProverResponse) GetRegistry() *KeyRegistry { @@ -3057,7 +3159,7 @@ type PutIdentityKeyRequest struct { func (x *PutIdentityKeyRequest) Reset() { *x = PutIdentityKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[41] + mi := &file_global_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3070,7 +3172,7 @@ func (x *PutIdentityKeyRequest) String() string { func (*PutIdentityKeyRequest) ProtoMessage() {} func (x *PutIdentityKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[41] + mi := &file_global_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3083,7 +3185,7 @@ func (x *PutIdentityKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PutIdentityKeyRequest.ProtoReflect.Descriptor instead. func (*PutIdentityKeyRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{41} + return file_global_proto_rawDescGZIP(), []int{42} } func (x *PutIdentityKeyRequest) GetAddress() []byte { @@ -3111,7 +3213,7 @@ type PutIdentityKeyResponse struct { func (x *PutIdentityKeyResponse) Reset() { *x = PutIdentityKeyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[42] + mi := &file_global_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3124,7 +3226,7 @@ func (x *PutIdentityKeyResponse) String() string { func (*PutIdentityKeyResponse) ProtoMessage() {} func (x *PutIdentityKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[42] + mi := &file_global_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3137,7 +3239,7 @@ func (x *PutIdentityKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PutIdentityKeyResponse.ProtoReflect.Descriptor instead. func (*PutIdentityKeyResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{42} + return file_global_proto_rawDescGZIP(), []int{43} } func (x *PutIdentityKeyResponse) GetError() string { @@ -3158,7 +3260,7 @@ type PutProvingKeyRequest struct { func (x *PutProvingKeyRequest) Reset() { *x = PutProvingKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[43] + mi := &file_global_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3171,7 +3273,7 @@ func (x *PutProvingKeyRequest) String() string { func (*PutProvingKeyRequest) ProtoMessage() {} func (x *PutProvingKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[43] + mi := &file_global_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3184,7 +3286,7 @@ func (x *PutProvingKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PutProvingKeyRequest.ProtoReflect.Descriptor instead. func (*PutProvingKeyRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{43} + return file_global_proto_rawDescGZIP(), []int{44} } func (x *PutProvingKeyRequest) GetProvingKey() *BLS48581SignatureWithProofOfPossession { @@ -3205,7 +3307,7 @@ type PutProvingKeyResponse struct { func (x *PutProvingKeyResponse) Reset() { *x = PutProvingKeyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[44] + mi := &file_global_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3218,7 +3320,7 @@ func (x *PutProvingKeyResponse) String() string { func (*PutProvingKeyResponse) ProtoMessage() {} func (x *PutProvingKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[44] + mi := &file_global_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3231,7 +3333,7 @@ func (x *PutProvingKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PutProvingKeyResponse.ProtoReflect.Descriptor instead. func (*PutProvingKeyResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{44} + return file_global_proto_rawDescGZIP(), []int{45} } func (x *PutProvingKeyResponse) GetError() string { @@ -3255,7 +3357,7 @@ type PutCrossSignatureRequest struct { func (x *PutCrossSignatureRequest) Reset() { *x = PutCrossSignatureRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[45] + mi := &file_global_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3268,7 +3370,7 @@ func (x *PutCrossSignatureRequest) String() string { func (*PutCrossSignatureRequest) ProtoMessage() {} func (x *PutCrossSignatureRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[45] + mi := &file_global_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3281,7 +3383,7 @@ func (x *PutCrossSignatureRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PutCrossSignatureRequest.ProtoReflect.Descriptor instead. func (*PutCrossSignatureRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{45} + return file_global_proto_rawDescGZIP(), []int{46} } func (x *PutCrossSignatureRequest) GetIdentityKeyAddress() []byte { @@ -3323,7 +3425,7 @@ type PutCrossSignatureResponse struct { func (x *PutCrossSignatureResponse) Reset() { *x = PutCrossSignatureResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[46] + mi := &file_global_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3336,7 +3438,7 @@ func (x *PutCrossSignatureResponse) String() string { func (*PutCrossSignatureResponse) ProtoMessage() {} func (x *PutCrossSignatureResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[46] + mi := &file_global_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3349,7 +3451,7 @@ func (x *PutCrossSignatureResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PutCrossSignatureResponse.ProtoReflect.Descriptor instead. func (*PutCrossSignatureResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{46} + return file_global_proto_rawDescGZIP(), []int{47} } func (x *PutCrossSignatureResponse) GetError() string { @@ -3371,7 +3473,7 @@ type PutSignedKeyRequest struct { func (x *PutSignedKeyRequest) Reset() { *x = PutSignedKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[47] + mi := &file_global_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3384,7 +3486,7 @@ func (x *PutSignedKeyRequest) String() string { func (*PutSignedKeyRequest) ProtoMessage() {} func (x *PutSignedKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[47] + mi := &file_global_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3397,7 +3499,7 @@ func (x *PutSignedKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PutSignedKeyRequest.ProtoReflect.Descriptor instead. func (*PutSignedKeyRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{47} + return file_global_proto_rawDescGZIP(), []int{48} } func (x *PutSignedKeyRequest) GetAddress() []byte { @@ -3425,7 +3527,7 @@ type PutSignedKeyResponse struct { func (x *PutSignedKeyResponse) Reset() { *x = PutSignedKeyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[48] + mi := &file_global_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3438,7 +3540,7 @@ func (x *PutSignedKeyResponse) String() string { func (*PutSignedKeyResponse) ProtoMessage() {} func (x *PutSignedKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[48] + mi := &file_global_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3451,7 +3553,7 @@ func (x *PutSignedKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PutSignedKeyResponse.ProtoReflect.Descriptor instead. func (*PutSignedKeyResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{48} + return file_global_proto_rawDescGZIP(), []int{49} } func (x *PutSignedKeyResponse) GetError() string { @@ -3472,7 +3574,7 @@ type GetIdentityKeyRequest struct { func (x *GetIdentityKeyRequest) Reset() { *x = GetIdentityKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[49] + mi := &file_global_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3485,7 +3587,7 @@ func (x *GetIdentityKeyRequest) String() string { func (*GetIdentityKeyRequest) ProtoMessage() {} func (x *GetIdentityKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[49] + mi := &file_global_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3498,7 +3600,7 @@ func (x *GetIdentityKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetIdentityKeyRequest.ProtoReflect.Descriptor instead. func (*GetIdentityKeyRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{49} + return file_global_proto_rawDescGZIP(), []int{50} } func (x *GetIdentityKeyRequest) GetAddress() []byte { @@ -3520,7 +3622,7 @@ type GetIdentityKeyResponse struct { func (x *GetIdentityKeyResponse) Reset() { *x = GetIdentityKeyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[50] + mi := &file_global_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3533,7 +3635,7 @@ func (x *GetIdentityKeyResponse) String() string { func (*GetIdentityKeyResponse) ProtoMessage() {} func (x *GetIdentityKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[50] + mi := &file_global_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3546,7 +3648,7 @@ func (x *GetIdentityKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetIdentityKeyResponse.ProtoReflect.Descriptor instead. func (*GetIdentityKeyResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{50} + return file_global_proto_rawDescGZIP(), []int{51} } func (x *GetIdentityKeyResponse) GetKey() *Ed448PublicKey { @@ -3574,7 +3676,7 @@ type GetProvingKeyRequest struct { func (x *GetProvingKeyRequest) Reset() { *x = GetProvingKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[51] + mi := &file_global_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3587,7 +3689,7 @@ func (x *GetProvingKeyRequest) String() string { func (*GetProvingKeyRequest) ProtoMessage() {} func (x *GetProvingKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[51] + mi := &file_global_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3600,7 +3702,7 @@ func (x *GetProvingKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProvingKeyRequest.ProtoReflect.Descriptor instead. func (*GetProvingKeyRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{51} + return file_global_proto_rawDescGZIP(), []int{52} } func (x *GetProvingKeyRequest) GetAddress() []byte { @@ -3622,7 +3724,7 @@ type GetProvingKeyResponse struct { func (x *GetProvingKeyResponse) Reset() { *x = GetProvingKeyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[52] + mi := &file_global_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3635,7 +3737,7 @@ func (x *GetProvingKeyResponse) String() string { func (*GetProvingKeyResponse) ProtoMessage() {} func (x *GetProvingKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[52] + mi := &file_global_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3648,7 +3750,7 @@ func (x *GetProvingKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProvingKeyResponse.ProtoReflect.Descriptor instead. func (*GetProvingKeyResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{52} + return file_global_proto_rawDescGZIP(), []int{53} } func (x *GetProvingKeyResponse) GetKey() *BLS48581SignatureWithProofOfPossession { @@ -3676,7 +3778,7 @@ type GetSignedKeyRequest struct { func (x *GetSignedKeyRequest) Reset() { *x = GetSignedKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[53] + mi := &file_global_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3689,7 +3791,7 @@ func (x *GetSignedKeyRequest) String() string { func (*GetSignedKeyRequest) ProtoMessage() {} func (x *GetSignedKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[53] + mi := &file_global_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3702,7 +3804,7 @@ func (x *GetSignedKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSignedKeyRequest.ProtoReflect.Descriptor instead. func (*GetSignedKeyRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{53} + return file_global_proto_rawDescGZIP(), []int{54} } func (x *GetSignedKeyRequest) GetAddress() []byte { @@ -3724,7 +3826,7 @@ type GetSignedKeyResponse struct { func (x *GetSignedKeyResponse) Reset() { *x = GetSignedKeyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[54] + mi := &file_global_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3737,7 +3839,7 @@ func (x *GetSignedKeyResponse) String() string { func (*GetSignedKeyResponse) ProtoMessage() {} func (x *GetSignedKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[54] + mi := &file_global_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3750,7 +3852,7 @@ func (x *GetSignedKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSignedKeyResponse.ProtoReflect.Descriptor instead. func (*GetSignedKeyResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{54} + return file_global_proto_rawDescGZIP(), []int{55} } func (x *GetSignedKeyResponse) GetKey() *SignedX448Key { @@ -3779,7 +3881,7 @@ type GetSignedKeysByParentRequest struct { func (x *GetSignedKeysByParentRequest) Reset() { *x = GetSignedKeysByParentRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[55] + mi := &file_global_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3792,7 +3894,7 @@ func (x *GetSignedKeysByParentRequest) String() string { func (*GetSignedKeysByParentRequest) ProtoMessage() {} func (x *GetSignedKeysByParentRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[55] + mi := &file_global_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3805,7 +3907,7 @@ func (x *GetSignedKeysByParentRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSignedKeysByParentRequest.ProtoReflect.Descriptor instead. func (*GetSignedKeysByParentRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{55} + return file_global_proto_rawDescGZIP(), []int{56} } func (x *GetSignedKeysByParentRequest) GetParentKeyAddress() []byte { @@ -3834,7 +3936,7 @@ type GetSignedKeysByParentResponse struct { func (x *GetSignedKeysByParentResponse) Reset() { *x = GetSignedKeysByParentResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[56] + mi := &file_global_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3847,7 +3949,7 @@ func (x *GetSignedKeysByParentResponse) String() string { func (*GetSignedKeysByParentResponse) ProtoMessage() {} func (x *GetSignedKeysByParentResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[56] + mi := &file_global_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3860,7 +3962,7 @@ func (x *GetSignedKeysByParentResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSignedKeysByParentResponse.ProtoReflect.Descriptor instead. func (*GetSignedKeysByParentResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{56} + return file_global_proto_rawDescGZIP(), []int{57} } func (x *GetSignedKeysByParentResponse) GetKeys() []*SignedX448Key { @@ -3886,7 +3988,7 @@ type RangeProvingKeysRequest struct { func (x *RangeProvingKeysRequest) Reset() { *x = RangeProvingKeysRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[57] + mi := &file_global_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3899,7 +4001,7 @@ func (x *RangeProvingKeysRequest) String() string { func (*RangeProvingKeysRequest) ProtoMessage() {} func (x *RangeProvingKeysRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[57] + mi := &file_global_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3912,7 +4014,7 @@ func (x *RangeProvingKeysRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RangeProvingKeysRequest.ProtoReflect.Descriptor instead. func (*RangeProvingKeysRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{57} + return file_global_proto_rawDescGZIP(), []int{58} } type RangeProvingKeysResponse struct { @@ -3927,7 +4029,7 @@ type RangeProvingKeysResponse struct { func (x *RangeProvingKeysResponse) Reset() { *x = RangeProvingKeysResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[58] + mi := &file_global_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3940,7 +4042,7 @@ func (x *RangeProvingKeysResponse) String() string { func (*RangeProvingKeysResponse) ProtoMessage() {} func (x *RangeProvingKeysResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[58] + mi := &file_global_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3953,7 +4055,7 @@ func (x *RangeProvingKeysResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RangeProvingKeysResponse.ProtoReflect.Descriptor instead. func (*RangeProvingKeysResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{58} + return file_global_proto_rawDescGZIP(), []int{59} } func (x *RangeProvingKeysResponse) GetKey() *BLS48581SignatureWithProofOfPossession { @@ -3979,7 +4081,7 @@ type RangeIdentityKeysRequest struct { func (x *RangeIdentityKeysRequest) Reset() { *x = RangeIdentityKeysRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[59] + mi := &file_global_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3992,7 +4094,7 @@ func (x *RangeIdentityKeysRequest) String() string { func (*RangeIdentityKeysRequest) ProtoMessage() {} func (x *RangeIdentityKeysRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[59] + mi := &file_global_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4005,7 +4107,7 @@ func (x *RangeIdentityKeysRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RangeIdentityKeysRequest.ProtoReflect.Descriptor instead. func (*RangeIdentityKeysRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{59} + return file_global_proto_rawDescGZIP(), []int{60} } type RangeIdentityKeysResponse struct { @@ -4020,7 +4122,7 @@ type RangeIdentityKeysResponse struct { func (x *RangeIdentityKeysResponse) Reset() { *x = RangeIdentityKeysResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[60] + mi := &file_global_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4033,7 +4135,7 @@ func (x *RangeIdentityKeysResponse) String() string { func (*RangeIdentityKeysResponse) ProtoMessage() {} func (x *RangeIdentityKeysResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[60] + mi := &file_global_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4046,7 +4148,7 @@ func (x *RangeIdentityKeysResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RangeIdentityKeysResponse.ProtoReflect.Descriptor instead. func (*RangeIdentityKeysResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{60} + return file_global_proto_rawDescGZIP(), []int{61} } func (x *RangeIdentityKeysResponse) GetKey() *Ed448PublicKey { @@ -4075,7 +4177,7 @@ type RangeSignedKeysRequest struct { func (x *RangeSignedKeysRequest) Reset() { *x = RangeSignedKeysRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[61] + mi := &file_global_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4088,7 +4190,7 @@ func (x *RangeSignedKeysRequest) String() string { func (*RangeSignedKeysRequest) ProtoMessage() {} func (x *RangeSignedKeysRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[61] + mi := &file_global_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4101,7 +4203,7 @@ func (x *RangeSignedKeysRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RangeSignedKeysRequest.ProtoReflect.Descriptor instead. func (*RangeSignedKeysRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{61} + return file_global_proto_rawDescGZIP(), []int{62} } func (x *RangeSignedKeysRequest) GetParentKeyAddress() []byte { @@ -4130,7 +4232,7 @@ type RangeSignedKeysResponse struct { func (x *RangeSignedKeysResponse) Reset() { *x = RangeSignedKeysResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[62] + mi := &file_global_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4143,7 +4245,7 @@ func (x *RangeSignedKeysResponse) String() string { func (*RangeSignedKeysResponse) ProtoMessage() {} func (x *RangeSignedKeysResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[62] + mi := &file_global_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4156,7 +4258,7 @@ func (x *RangeSignedKeysResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RangeSignedKeysResponse.ProtoReflect.Descriptor instead. func (*RangeSignedKeysResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{62} + return file_global_proto_rawDescGZIP(), []int{63} } func (x *RangeSignedKeysResponse) GetKey() *SignedX448Key { @@ -4185,7 +4287,7 @@ type MessageKeyShard struct { func (x *MessageKeyShard) Reset() { *x = MessageKeyShard{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[63] + mi := &file_global_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4198,7 +4300,7 @@ func (x *MessageKeyShard) String() string { func (*MessageKeyShard) ProtoMessage() {} func (x *MessageKeyShard) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[63] + mi := &file_global_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4211,7 +4313,7 @@ func (x *MessageKeyShard) ProtoReflect() protoreflect.Message { // Deprecated: Use MessageKeyShard.ProtoReflect.Descriptor instead. func (*MessageKeyShard) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{63} + return file_global_proto_rawDescGZIP(), []int{64} } func (x *MessageKeyShard) GetPartyIdentifier() uint32 { @@ -4246,7 +4348,7 @@ type PutMessageRequest struct { func (x *PutMessageRequest) Reset() { *x = PutMessageRequest{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[64] + mi := &file_global_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4259,7 +4361,7 @@ func (x *PutMessageRequest) String() string { func (*PutMessageRequest) ProtoMessage() {} func (x *PutMessageRequest) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[64] + mi := &file_global_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4272,7 +4374,7 @@ func (x *PutMessageRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PutMessageRequest.ProtoReflect.Descriptor instead. func (*PutMessageRequest) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{64} + return file_global_proto_rawDescGZIP(), []int{65} } func (x *PutMessageRequest) GetMessageShards() []*MessageKeyShard { @@ -4305,7 +4407,7 @@ type PutMessageResponse struct { func (x *PutMessageResponse) Reset() { *x = PutMessageResponse{} if protoimpl.UnsafeEnabled { - mi := &file_global_proto_msgTypes[65] + mi := &file_global_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4318,7 +4420,7 @@ func (x *PutMessageResponse) String() string { func (*PutMessageResponse) ProtoMessage() {} func (x *PutMessageResponse) ProtoReflect() protoreflect.Message { - mi := &file_global_proto_msgTypes[65] + mi := &file_global_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4331,7 +4433,7 @@ func (x *PutMessageResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PutMessageResponse.ProtoReflect.Descriptor instead. func (*PutMessageResponse) Descriptor() ([]byte, []int) { - return file_global_proto_rawDescGZIP(), []int{65} + return file_global_proto_rawDescGZIP(), []int{66} } var File_global_proto protoreflect.FileDescriptor @@ -4665,569 +4767,591 @@ var file_global_proto_rawDesc = []byte{ 0x35, 0x38, 0x31, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x1a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x6c, 0x73, 0x34, 0x38, 0x35, 0x38, - 0x31, 0x22, 0x8f, 0x02, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x76, 0x65, + 0x31, 0x22, 0xa3, 0x02, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, - 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x63, 0x6f, 0x6d, - 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x76, 0x0a, 0x1d, 0x70, - 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x5f, 0x62, 0x6c, 0x73, 0x34, 0x38, 0x35, 0x38, 0x31, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, - 0x34, 0x38, 0x35, 0x38, 0x31, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x1a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, - 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x6c, 0x73, 0x34, 0x38, - 0x35, 0x38, 0x31, 0x22, 0x92, 0x02, 0x0a, 0x09, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x56, 0x6f, 0x74, - 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, + 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0e, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, + 0x76, 0x0a, 0x1d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x62, 0x6c, 0x73, 0x34, 0x38, 0x35, 0x38, 0x31, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, + 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, + 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x1a, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, + 0x6c, 0x73, 0x34, 0x38, 0x35, 0x38, 0x31, 0x22, 0x8f, 0x02, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x70, + 0x6f, 0x73, 0x61, 0x6c, 0x56, 0x6f, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, + 0x72, 0x61, 0x6e, 0x6b, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, + 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x12, 0x76, 0x0a, 0x1d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x62, 0x6c, 0x73, 0x34, 0x38, 0x35, + 0x38, 0x31, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, + 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, + 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x65, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x1a, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x42, 0x6c, 0x73, 0x34, 0x38, 0x35, 0x38, 0x31, 0x22, 0x82, 0x02, 0x0a, 0x11, 0x51, 0x75, + 0x6f, 0x72, 0x75, 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x12, 0x21, 0x0a, 0x0c, 0x66, + 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1a, + 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x64, 0x0a, 0x13, 0x61, 0x67, 0x67, 0x72, + 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, + 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, + 0x42, 0x4c, 0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, + 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x12, 0x61, 0x67, 0x67, 0x72, + 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xb3, + 0x02, 0x0a, 0x12, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x72, 0x61, 0x6e, + 0x6b, 0x12, 0x21, 0x0a, 0x0c, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x6b, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0b, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x52, + 0x61, 0x6e, 0x6b, 0x73, 0x12, 0x68, 0x0a, 0x19, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x71, + 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, + 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, + 0x2e, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x17, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x51, 0x75, 0x6f, + 0x72, 0x75, 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x64, + 0x0a, 0x13, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x69, 0x67, 0x6e, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x71, 0x75, + 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, + 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x41, 0x67, + 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x52, 0x12, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x22, 0x99, 0x01, 0x0a, 0x0b, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, + 0x72, 0x61, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, + 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, + 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, + 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, + 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, + 0x22, 0x95, 0x01, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, 0x61, + 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x46, + 0x72, 0x61, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x12, 0x44, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, + 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, + 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x08, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, 0x45, 0x0a, 0x0b, 0x47, 0x6c, 0x6f, 0x62, + 0x61, 0x6c, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, + 0x3a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, + 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, + 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x69, 0x0a, 0x13, 0x47, + 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x05, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6c, + 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x05, 0x66, 0x72, 0x61, 0x6d, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x54, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, - 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, - 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, 0x72, - 0x6f, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x61, 0x70, 0x70, 0x72, 0x6f, - 0x76, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x12, 0x76, 0x0a, 0x1d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x62, 0x6c, 0x73, 0x34, 0x38, 0x35, 0x38, - 0x31, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, - 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, - 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x65, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x1a, 0x70, 0x75, - 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x42, 0x6c, 0x73, 0x34, 0x38, 0x35, 0x38, 0x31, 0x22, 0xee, 0x01, 0x0a, 0x11, 0x46, 0x72, 0x61, - 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, - 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, - 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x12, 0x64, 0x0a, 0x13, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, - 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, - 0x35, 0x38, 0x31, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x12, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, - 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x99, 0x01, 0x0a, 0x0b, 0x47, 0x6c, - 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x06, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c, - 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, - 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, - 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, - 0x44, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x08, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, 0x95, 0x01, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, - 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, - 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, - 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, - 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, - 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x75, 0x6e, - 0x64, 0x6c, 0x65, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, 0x45, 0x0a, - 0x0b, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x12, 0x18, 0x0a, 0x07, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x22, 0x3a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, - 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x22, 0x69, 0x0a, 0x13, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x05, 0x66, 0x72, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, - 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, - 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x05, + 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x6d, 0x0a, 0x15, + 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x05, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, + 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, + 0x2e, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x05, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x54, 0x0a, 0x17, 0x47, - 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x21, - 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x22, 0x6d, 0x0a, 0x15, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, 0x61, - 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x05, 0x66, 0x72, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x4a, 0x0a, 0x13, 0x47, + 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4b, 0x65, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, + 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x98, 0x01, 0x0a, 0x0c, 0x41, 0x70, 0x70, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, + 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, + 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4b, + 0x65, 0x79, 0x22, 0x53, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x04, 0x69, 0x6e, + 0x66, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, + 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, + 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x38, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x47, 0x6c, + 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x31, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x6c, + 0x31, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x32, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x6c, + 0x32, 0x22, 0x4d, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, + 0x22, 0x63, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, + 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x9d, 0x01, 0x0a, 0x11, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x48, 0x61, 0x73, 0x68, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, + 0x0e, 0x73, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, + 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x69, 0x6c, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x6c, 0x65, 0x64, 0x22, 0x6e, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x6b, + 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, - 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, - 0x61, 0x6d, 0x65, 0x52, 0x05, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x72, - 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, - 0x22, 0x4a, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0d, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x98, 0x01, 0x0a, - 0x0c, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, - 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x06, 0x70, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, - 0x61, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, - 0x64, 0x61, 0x74, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, - 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, - 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x4b, 0x65, 0x79, 0x22, 0x53, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x41, 0x70, - 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x3b, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, - 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x38, 0x0a, 0x16, - 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x31, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x02, 0x6c, 0x31, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x32, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x02, 0x6c, 0x32, 0x22, 0x4d, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, - 0x62, 0x61, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, - 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, - 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x22, 0x63, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x6b, - 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, - 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, - 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x9d, 0x01, 0x0a, 0x11, 0x4c, - 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x29, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x61, 0x73, 0x68, 0x12, 0x27, 0x0a, 0x0f, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, - 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x6c, 0x65, 0x64, 0x22, 0x6e, 0x0a, 0x1a, 0x47, 0x65, - 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, + 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x1c, 0x0a, 0x1a, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x47, + 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0xf8, 0x01, 0x0a, 0x1f, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x47, 0x65, + 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x17, 0x0a, 0x07, 0x63, 0x6f, 0x72, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, 0x6f, 0x72, 0x65, 0x49, 0x64, + 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, 0x12, 0x36, 0x0a, 0x17, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x6d, 0x75, 0x6c, + 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x61, + 0x64, 0x64, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x22, 0x73, + 0x0a, 0x1b, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, + 0x07, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x65, - 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x74, 0x72, - 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x1c, 0x0a, 0x1a, 0x47, 0x6c, - 0x6f, 0x62, 0x61, 0x6c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xf8, 0x01, 0x0a, 0x1f, 0x47, 0x6c, 0x6f, - 0x62, 0x61, 0x6c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x17, 0x0a, 0x07, - 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, - 0x6f, 0x72, 0x65, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, - 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, - 0x12, 0x36, 0x0a, 0x17, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x15, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x4d, - 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x22, 0x73, 0x0a, 0x1b, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x47, 0x65, 0x74, - 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x54, 0x0a, 0x07, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, - 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, - 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x52, - 0x07, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x22, 0x53, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, + 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, + 0x6c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x07, 0x77, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x73, 0x22, 0x53, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x63, + 0x69, 0x72, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, 0x69, + 0x72, 0x63, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x63, 0x0a, 0x0e, 0x52, 0x65, 0x63, 0x65, + 0x69, 0x76, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x63, 0x69, 0x72, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, 0x69, 0x72, 0x63, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, - 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x63, 0x0a, - 0x0e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, - 0x24, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, - 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x63, 0x69, 0x72, 0x63, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, 0x69, 0x72, 0x63, 0x49, 0x64, 0x12, 0x12, - 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x63, 0x65, - 0x6c, 0x6c, 0x22, 0x49, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x14, 0x69, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x70, 0x0a, - 0x16, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x08, 0x72, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x71, 0x75, 0x69, 0x6c, - 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, - 0x2e, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x52, - 0x08, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, - 0x4d, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, - 0x79, 0x42, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x70, 0x72, - 0x6f, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x78, - 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, - 0x42, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x40, 0x0a, 0x08, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x52, 0x08, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x72, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x7d, 0x0a, 0x15, 0x50, 0x75, 0x74, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x4a, 0x0a, 0x0c, 0x69, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x64, 0x34, 0x34, - 0x38, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x22, 0x2e, 0x0a, 0x16, 0x50, 0x75, 0x74, 0x49, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x78, 0x0a, 0x14, 0x50, 0x75, 0x74, 0x50, 0x72, - 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x60, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, - 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, - 0x4c, 0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x57, 0x69, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x4f, 0x66, 0x50, 0x6f, 0x73, 0x73, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, - 0x79, 0x22, 0x2d, 0x0a, 0x15, 0x50, 0x75, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, - 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x22, 0x9e, 0x02, 0x0a, 0x18, 0x50, 0x75, 0x74, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, - 0x14, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x69, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, - 0x2e, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x70, 0x72, - 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, - 0x4f, 0x0a, 0x25, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x5f, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x70, 0x72, 0x6f, - 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x20, - 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x4f, 0x66, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, - 0x12, 0x4f, 0x0a, 0x25, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x5f, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x69, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x20, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x4f, 0x66, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, - 0x79, 0x22, 0x31, 0x0a, 0x19, 0x50, 0x75, 0x74, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x22, 0x69, 0x0a, 0x13, 0x50, 0x75, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, - 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x69, 0x67, - 0x6e, 0x65, 0x64, 0x58, 0x34, 0x34, 0x38, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, - 0x2c, 0x0a, 0x14, 0x50, 0x75, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x31, 0x0a, - 0x15, 0x47, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, + 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x49, 0x0a, + 0x15, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, + 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x70, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4b, + 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x40, 0x0a, 0x08, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, + 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x4b, + 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x52, 0x08, 0x72, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x4d, 0x0a, 0x1d, 0x47, 0x65, + 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x42, 0x79, 0x50, 0x72, + 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x70, + 0x72, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x4b, + 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x78, 0x0a, 0x1e, 0x47, 0x65, 0x74, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x42, 0x79, 0x50, 0x72, 0x6f, + 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x08, 0x72, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x72, 0x79, 0x52, 0x08, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x22, 0x7d, 0x0a, 0x15, 0x50, 0x75, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x4a, 0x0a, 0x0c, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, + 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, + 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x64, 0x34, 0x34, 0x38, 0x50, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, + 0x65, 0x79, 0x22, 0x2e, 0x0a, 0x16, 0x50, 0x75, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x22, 0x78, 0x0a, 0x14, 0x50, 0x75, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x60, 0x0a, 0x0b, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x3f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, 0x35, + 0x38, 0x31, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x57, 0x69, 0x74, 0x68, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x4f, 0x66, 0x50, 0x6f, 0x73, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x22, 0x2d, 0x0a, 0x15, + 0x50, 0x75, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x9e, 0x02, 0x0a, 0x18, + 0x50, 0x75, 0x74, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, + 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x4f, 0x0a, 0x25, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x4f, + 0x66, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x25, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x20, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x4f, + 0x66, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x22, 0x31, 0x0a, 0x19, + 0x50, 0x75, 0x74, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, + 0x69, 0x0a, 0x13, 0x50, 0x75, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x22, 0x69, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, - 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, - 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, - 0x62, 0x2e, 0x45, 0x64, 0x34, 0x34, 0x38, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x30, 0x0a, 0x14, 0x47, - 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x80, 0x01, - 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, - 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, - 0x4c, 0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x57, 0x69, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x4f, 0x66, 0x50, 0x6f, 0x73, 0x73, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x22, 0x2f, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x22, 0x66, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, - 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, - 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x58, 0x34, 0x34, 0x38, 0x4b, 0x65, 0x79, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x6d, 0x0a, 0x1c, 0x47, 0x65, 0x74, - 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x70, - 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6b, 0x65, - 0x79, 0x50, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x22, 0x71, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x53, - 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x04, 0x6b, 0x65, 0x79, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, - 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, - 0x62, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x58, 0x34, 0x34, 0x38, 0x4b, 0x65, 0x79, 0x52, - 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x19, 0x0a, 0x17, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x3f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, - 0x35, 0x38, 0x31, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x57, 0x69, 0x74, 0x68, - 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x4f, 0x66, 0x50, 0x6f, 0x73, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x1a, 0x0a, 0x18, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x6c, 0x0a, 0x19, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, + 0x12, 0x38, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, + 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x58, 0x34, + 0x34, 0x38, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x2c, 0x0a, 0x14, 0x50, 0x75, + 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x31, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x69, 0x0a, 0x16, 0x47, + 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x64, 0x34, 0x34, 0x38, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x67, 0x0a, 0x16, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x53, - 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1f, - 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x50, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x22, - 0x69, 0x0a, 0x17, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, - 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, - 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, - 0x62, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x58, 0x34, 0x34, 0x38, 0x4b, 0x65, 0x79, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x61, 0x0a, 0x0f, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x29, 0x0a, - 0x10, 0x70, 0x61, 0x72, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x0c, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x22, 0xb2, 0x01, - 0x0a, 0x11, 0x50, 0x75, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x51, 0x0a, 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x71, 0x75, - 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, - 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4b, - 0x65, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x30, 0x0a, 0x14, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x5f, 0x70, 0x75, - 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, - 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, - 0x65, 0x79, 0x22, 0x14, 0x0a, 0x12, 0x50, 0x75, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xf2, 0x04, 0x0a, 0x0d, 0x47, 0x6c, 0x6f, - 0x62, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x72, 0x0a, 0x0e, 0x47, 0x65, - 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x2e, 0x71, - 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, - 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, - 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, - 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, - 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x2e, - 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x70, - 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, - 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x70, - 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x78, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x73, 0x12, 0x31, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, - 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, - 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, - 0x62, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x12, 0x47, 0x65, - 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, - 0x12, 0x34, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, - 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, - 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, - 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7e, 0x0a, - 0x0d, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x35, - 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, - 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, - 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, - 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x8b, 0x01, - 0x0a, 0x0f, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x12, 0x78, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x32, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, - 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, - 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, 0x61, - 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, - 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, - 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, - 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x70, 0x0a, 0x0c, 0x4f, - 0x6e, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x60, 0x0a, 0x07, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, - 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, - 0x70, 0x62, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x29, - 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, - 0x76, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x28, 0x01, 0x30, 0x01, 0x32, 0xdf, 0x01, - 0x0a, 0x0d, 0x4d, 0x69, 0x78, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, - 0x69, 0x0a, 0x0a, 0x50, 0x75, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x2e, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x30, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, + 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x80, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x51, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x3f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, 0x35, + 0x38, 0x31, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x57, 0x69, 0x74, 0x68, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x4f, 0x66, 0x50, 0x6f, 0x73, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x2f, 0x0a, 0x13, 0x47, + 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x66, 0x0a, 0x14, + 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x69, 0x67, 0x6e, + 0x65, 0x64, 0x58, 0x34, 0x34, 0x38, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x22, 0x6d, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, + 0x64, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6b, + 0x65, 0x79, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x10, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x75, 0x72, 0x70, 0x6f, 0x73, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x50, 0x75, 0x72, 0x70, + 0x6f, 0x73, 0x65, 0x22, 0x71, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, + 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x69, 0x67, + 0x6e, 0x65, 0x64, 0x58, 0x34, 0x34, 0x38, 0x4b, 0x65, 0x79, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x19, 0x0a, 0x17, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x71, 0x75, + 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, + 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x57, 0x69, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x6f, 0x66, + 0x4f, 0x66, 0x50, 0x6f, 0x73, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x1a, 0x0a, 0x18, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0x6c, 0x0a, 0x19, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x39, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x71, 0x75, - 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, - 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x0b, 0x52, 0x6f, - 0x75, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, - 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x61, 0x70, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x1a, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x28, 0x01, 0x30, 0x01, 0x32, - 0xd7, 0x0c, 0x0a, 0x12, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x75, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x12, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, - 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x71, 0x75, 0x69, - 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, - 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8d, 0x01, - 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, - 0x42, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x12, 0x38, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, - 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x72, 0x79, 0x42, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, - 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x42, 0x79, 0x50, - 0x72, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, - 0x0e, 0x50, 0x75, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x12, - 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x31, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, - 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x0d, 0x50, 0x75, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x12, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, - 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, - 0x62, 0x2e, 0x50, 0x75, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, + 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x64, 0x34, 0x34, 0x38, 0x50, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x22, 0x67, 0x0a, 0x16, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, + 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4b, + 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6b, 0x65, 0x79, + 0x5f, 0x70, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x6b, 0x65, 0x79, 0x50, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x22, 0x69, 0x0a, 0x17, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x69, 0x67, + 0x6e, 0x65, 0x64, 0x58, 0x34, 0x34, 0x38, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x61, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x70, 0x61, 0x72, 0x74, + 0x79, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x79, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, + 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x65, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x22, 0xb2, 0x01, 0x0a, 0x11, 0x50, 0x75, 0x74, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x51, + 0x0a, 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, - 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7e, 0x0a, 0x11, 0x50, 0x75, 0x74, 0x43, - 0x72, 0x6f, 0x73, 0x73, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x33, 0x2e, + 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x65, + 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x70, 0x68, 0x65, 0x6d, + 0x65, 0x72, 0x61, 0x6c, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0x14, 0x0a, + 0x12, 0x50, 0x75, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x32, 0xf2, 0x04, 0x0a, 0x0d, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x72, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, + 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, + 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, + 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, + 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, + 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, + 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x46, 0x72, 0x61, 0x6d, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x0c, 0x47, 0x65, 0x74, + 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, + 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, + 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, + 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, + 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x0f, 0x47, 0x65, + 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x31, 0x2e, + 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x6c, 0x6f, + 0x62, 0x61, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x32, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, + 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x6b, + 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x34, 0x2e, 0x71, 0x75, + 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, + 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x65, + 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x35, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, + 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7e, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x57, + 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x35, 0x2e, 0x71, 0x75, 0x69, 0x6c, + 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, + 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x47, 0x65, 0x74, 0x57, + 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x36, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6c, 0x6f, + 0x62, 0x61, 0x6c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x8b, 0x01, 0x0a, 0x0f, 0x41, 0x70, 0x70, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x78, 0x0a, 0x10, + 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, + 0x12, 0x32, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, + 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, + 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, + 0x2e, 0x41, 0x70, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x70, 0x0a, 0x0c, 0x4f, 0x6e, 0x69, 0x6f, 0x6e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x60, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x12, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, + 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x29, 0x2e, 0x71, 0x75, 0x69, 0x6c, + 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, + 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x28, 0x01, 0x30, 0x01, 0x32, 0xdf, 0x01, 0x0a, 0x0d, 0x4d, 0x69, 0x78, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x69, 0x0a, 0x0a, 0x50, 0x75, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, + 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, + 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, + 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, + 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x12, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, + 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x27, 0x2e, + 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x62, 0x2e, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x28, 0x01, 0x30, 0x01, 0x32, 0xd7, 0x0c, 0x0a, 0x12, 0x4b, + 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x75, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x72, 0x79, 0x12, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, + 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, + 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, + 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8d, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x42, 0x79, 0x50, 0x72, 0x6f, + 0x76, 0x65, 0x72, 0x12, 0x38, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, + 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x42, 0x79, + 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, + 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, + 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x42, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x0e, 0x50, 0x75, 0x74, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x12, 0x30, 0x2e, 0x71, 0x75, 0x69, + 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, + 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x71, + 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, + 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x72, 0x0a, 0x0d, 0x50, 0x75, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, + 0x12, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, + 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x7e, 0x0a, 0x11, 0x50, 0x75, 0x74, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, + 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, + 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x43, 0x72, 0x6f, - 0x73, 0x73, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, - 0x75, 0x74, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x0c, 0x50, 0x75, 0x74, 0x53, - 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, - 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, - 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x0e, 0x47, 0x65, 0x74, - 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x12, 0x30, 0x2e, 0x71, 0x75, - 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, - 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, - 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x72, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, - 0x79, 0x12, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, - 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, - 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, - 0x64, 0x4b, 0x65, 0x79, 0x12, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, - 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, - 0x2e, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, - 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, - 0x2e, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8a, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, - 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, - 0x37, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x53, - 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, - 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, - 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x10, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x32, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, + 0x73, 0x73, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x0c, 0x50, 0x75, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, + 0x4b, 0x65, 0x79, 0x12, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, + 0x50, 0x75, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, + 0x50, 0x75, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x12, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, - 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, - 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x71, 0x75, 0x69, + 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, + 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, + 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x0d, 0x47, + 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x12, 0x2f, 0x2e, 0x71, + 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, + 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, + 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x6f, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x12, + 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x8a, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, + 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, - 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x76, - 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x7e, 0x0a, 0x11, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x4b, 0x65, 0x79, 0x73, 0x12, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, + 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, + 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, + 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, + 0x10, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, + 0x73, 0x12, 0x32, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, + 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, + 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x4b, 0x65, + 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7e, 0x0a, 0x11, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x12, + 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4b, 0x65, - 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x71, 0x75, 0x69, 0x6c, - 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, - 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x78, 0x0a, 0x0f, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, - 0x79, 0x73, 0x12, 0x31, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, - 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, - 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xec, 0x03, 0x0a, 0x0f, 0x44, 0x69, - 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x56, 0x0a, - 0x0f, 0x50, 0x75, 0x74, 0x49, 0x6e, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, - 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x1a, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x75, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x62, 0x6f, - 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, - 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, - 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, - 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, - 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x06, - 0x50, 0x75, 0x74, 0x48, 0x75, 0x62, 0x12, 0x22, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, - 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x75, 0x62, 0x50, 0x75, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x12, 0x59, 0x0a, 0x06, 0x47, 0x65, 0x74, 0x48, 0x75, 0x62, 0x12, 0x26, 0x2e, 0x71, - 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, - 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x75, 0x62, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, + 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x0f, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x31, 0x2e, + 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x53, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x32, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xec, 0x03, 0x0a, 0x0f, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, + 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x56, 0x0a, 0x0f, 0x50, 0x75, 0x74, 0x49, + 0x6e, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2b, 0x2e, 0x71, 0x75, + 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, + 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x78, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x12, 0x75, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x73, 0x12, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, - 0x62, 0x2e, 0x48, 0x75, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, - 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, + 0x62, 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, - 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x53, 0x79, 0x6e, 0x63, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, - 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x53, 0x79, 0x6e, 0x63, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3a, 0x5a, 0x38, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f, 0x6e, - 0x6f, 0x72, 0x65, 0x70, 0x6f, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x06, 0x50, 0x75, 0x74, 0x48, 0x75, + 0x62, 0x12, 0x22, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x48, + 0x75, 0x62, 0x50, 0x75, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x59, 0x0a, + 0x06, 0x47, 0x65, 0x74, 0x48, 0x75, 0x62, 0x12, 0x26, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, + 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x75, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x75, 0x62, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, + 0x12, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x69, + 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x44, + 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x42, 0x3a, 0x5a, 0x38, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x71, 0x75, + 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x71, 0x75, 0x69, + 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f, 0x6e, 0x6f, 0x72, 0x65, 0x70, 0x6f, + 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x73, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -5242,7 +5366,7 @@ func file_global_proto_rawDescGZIP() []byte { return file_global_proto_rawDescData } -var file_global_proto_msgTypes = make([]protoimpl.MessageInfo, 66) +var file_global_proto_msgTypes = make([]protoimpl.MessageInfo, 67) var file_global_proto_goTypes = []interface{}{ (*LegacyProverRequest)(nil), // 0: quilibrium.node.global.pb.LegacyProverRequest (*SeniorityMerge)(nil), // 1: quilibrium.node.global.pb.SeniorityMerge @@ -5259,215 +5383,218 @@ var file_global_proto_goTypes = []interface{}{ (*GlobalFrameHeader)(nil), // 12: quilibrium.node.global.pb.GlobalFrameHeader (*FrameHeader)(nil), // 13: quilibrium.node.global.pb.FrameHeader (*ProverLivenessCheck)(nil), // 14: quilibrium.node.global.pb.ProverLivenessCheck - (*FrameVote)(nil), // 15: quilibrium.node.global.pb.FrameVote - (*FrameConfirmation)(nil), // 16: quilibrium.node.global.pb.FrameConfirmation - (*GlobalFrame)(nil), // 17: quilibrium.node.global.pb.GlobalFrame - (*AppShardFrame)(nil), // 18: quilibrium.node.global.pb.AppShardFrame - (*GlobalAlert)(nil), // 19: quilibrium.node.global.pb.GlobalAlert - (*GetGlobalFrameRequest)(nil), // 20: quilibrium.node.global.pb.GetGlobalFrameRequest - (*GlobalFrameResponse)(nil), // 21: quilibrium.node.global.pb.GlobalFrameResponse - (*GetAppShardFrameRequest)(nil), // 22: quilibrium.node.global.pb.GetAppShardFrameRequest - (*AppShardFrameResponse)(nil), // 23: quilibrium.node.global.pb.AppShardFrameResponse - (*GetAppShardsRequest)(nil), // 24: quilibrium.node.global.pb.GetAppShardsRequest - (*AppShardInfo)(nil), // 25: quilibrium.node.global.pb.AppShardInfo - (*GetAppShardsResponse)(nil), // 26: quilibrium.node.global.pb.GetAppShardsResponse - (*GetGlobalShardsRequest)(nil), // 27: quilibrium.node.global.pb.GetGlobalShardsRequest - (*GetGlobalShardsResponse)(nil), // 28: quilibrium.node.global.pb.GetGlobalShardsResponse - (*GetLockedAddressesRequest)(nil), // 29: quilibrium.node.global.pb.GetLockedAddressesRequest - (*LockedTransaction)(nil), // 30: quilibrium.node.global.pb.LockedTransaction - (*GetLockedAddressesResponse)(nil), // 31: quilibrium.node.global.pb.GetLockedAddressesResponse - (*GlobalGetWorkerInfoRequest)(nil), // 32: quilibrium.node.global.pb.GlobalGetWorkerInfoRequest - (*GlobalGetWorkerInfoResponseItem)(nil), // 33: quilibrium.node.global.pb.GlobalGetWorkerInfoResponseItem - (*GlobalGetWorkerInfoResponse)(nil), // 34: quilibrium.node.global.pb.GlobalGetWorkerInfoResponse - (*SendMessage)(nil), // 35: quilibrium.node.global.pb.SendMessage - (*ReceiveMessage)(nil), // 36: quilibrium.node.global.pb.ReceiveMessage - (*GetKeyRegistryRequest)(nil), // 37: quilibrium.node.global.pb.GetKeyRegistryRequest - (*GetKeyRegistryResponse)(nil), // 38: quilibrium.node.global.pb.GetKeyRegistryResponse - (*GetKeyRegistryByProverRequest)(nil), // 39: quilibrium.node.global.pb.GetKeyRegistryByProverRequest - (*GetKeyRegistryByProverResponse)(nil), // 40: quilibrium.node.global.pb.GetKeyRegistryByProverResponse - (*PutIdentityKeyRequest)(nil), // 41: quilibrium.node.global.pb.PutIdentityKeyRequest - (*PutIdentityKeyResponse)(nil), // 42: quilibrium.node.global.pb.PutIdentityKeyResponse - (*PutProvingKeyRequest)(nil), // 43: quilibrium.node.global.pb.PutProvingKeyRequest - (*PutProvingKeyResponse)(nil), // 44: quilibrium.node.global.pb.PutProvingKeyResponse - (*PutCrossSignatureRequest)(nil), // 45: quilibrium.node.global.pb.PutCrossSignatureRequest - (*PutCrossSignatureResponse)(nil), // 46: quilibrium.node.global.pb.PutCrossSignatureResponse - (*PutSignedKeyRequest)(nil), // 47: quilibrium.node.global.pb.PutSignedKeyRequest - (*PutSignedKeyResponse)(nil), // 48: quilibrium.node.global.pb.PutSignedKeyResponse - (*GetIdentityKeyRequest)(nil), // 49: quilibrium.node.global.pb.GetIdentityKeyRequest - (*GetIdentityKeyResponse)(nil), // 50: quilibrium.node.global.pb.GetIdentityKeyResponse - (*GetProvingKeyRequest)(nil), // 51: quilibrium.node.global.pb.GetProvingKeyRequest - (*GetProvingKeyResponse)(nil), // 52: quilibrium.node.global.pb.GetProvingKeyResponse - (*GetSignedKeyRequest)(nil), // 53: quilibrium.node.global.pb.GetSignedKeyRequest - (*GetSignedKeyResponse)(nil), // 54: quilibrium.node.global.pb.GetSignedKeyResponse - (*GetSignedKeysByParentRequest)(nil), // 55: quilibrium.node.global.pb.GetSignedKeysByParentRequest - (*GetSignedKeysByParentResponse)(nil), // 56: quilibrium.node.global.pb.GetSignedKeysByParentResponse - (*RangeProvingKeysRequest)(nil), // 57: quilibrium.node.global.pb.RangeProvingKeysRequest - (*RangeProvingKeysResponse)(nil), // 58: quilibrium.node.global.pb.RangeProvingKeysResponse - (*RangeIdentityKeysRequest)(nil), // 59: quilibrium.node.global.pb.RangeIdentityKeysRequest - (*RangeIdentityKeysResponse)(nil), // 60: quilibrium.node.global.pb.RangeIdentityKeysResponse - (*RangeSignedKeysRequest)(nil), // 61: quilibrium.node.global.pb.RangeSignedKeysRequest - (*RangeSignedKeysResponse)(nil), // 62: quilibrium.node.global.pb.RangeSignedKeysResponse - (*MessageKeyShard)(nil), // 63: quilibrium.node.global.pb.MessageKeyShard - (*PutMessageRequest)(nil), // 64: quilibrium.node.global.pb.PutMessageRequest - (*PutMessageResponse)(nil), // 65: quilibrium.node.global.pb.PutMessageResponse - (*Ed448Signature)(nil), // 66: quilibrium.node.keys.pb.Ed448Signature - (*BLS48581SignatureWithProofOfPossession)(nil), // 67: quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession - (*BLS48581AddressedSignature)(nil), // 68: quilibrium.node.keys.pb.BLS48581AddressedSignature - (*TraversalProof)(nil), // 69: quilibrium.node.application.pb.TraversalProof - (*TokenDeploy)(nil), // 70: quilibrium.node.token.pb.TokenDeploy - (*TokenUpdate)(nil), // 71: quilibrium.node.token.pb.TokenUpdate - (*Transaction)(nil), // 72: quilibrium.node.token.pb.Transaction - (*PendingTransaction)(nil), // 73: quilibrium.node.token.pb.PendingTransaction - (*MintTransaction)(nil), // 74: quilibrium.node.token.pb.MintTransaction - (*HypergraphDeploy)(nil), // 75: quilibrium.node.hypergraph.pb.HypergraphDeploy - (*HypergraphUpdate)(nil), // 76: quilibrium.node.hypergraph.pb.HypergraphUpdate - (*VertexAdd)(nil), // 77: quilibrium.node.hypergraph.pb.VertexAdd - (*VertexRemove)(nil), // 78: quilibrium.node.hypergraph.pb.VertexRemove - (*HyperedgeAdd)(nil), // 79: quilibrium.node.hypergraph.pb.HyperedgeAdd - (*HyperedgeRemove)(nil), // 80: quilibrium.node.hypergraph.pb.HyperedgeRemove - (*ComputeDeploy)(nil), // 81: quilibrium.node.compute.pb.ComputeDeploy - (*ComputeUpdate)(nil), // 82: quilibrium.node.compute.pb.ComputeUpdate - (*CodeDeployment)(nil), // 83: quilibrium.node.compute.pb.CodeDeployment - (*CodeExecute)(nil), // 84: quilibrium.node.compute.pb.CodeExecute - (*CodeFinalize)(nil), // 85: quilibrium.node.compute.pb.CodeFinalize - (*BLS48581AggregateSignature)(nil), // 86: quilibrium.node.keys.pb.BLS48581AggregateSignature - (*KeyRegistry)(nil), // 87: quilibrium.node.keys.pb.KeyRegistry - (*Ed448PublicKey)(nil), // 88: quilibrium.node.keys.pb.Ed448PublicKey - (*SignedX448Key)(nil), // 89: quilibrium.node.keys.pb.SignedX448Key - (*Message)(nil), // 90: quilibrium.node.application.pb.Message - (*InboxMessagePut)(nil), // 91: quilibrium.node.channel.pb.InboxMessagePut - (*InboxMessageRequest)(nil), // 92: quilibrium.node.channel.pb.InboxMessageRequest - (*HubPut)(nil), // 93: quilibrium.node.channel.pb.HubPut - (*HubRequest)(nil), // 94: quilibrium.node.channel.pb.HubRequest - (*DispatchSyncRequest)(nil), // 95: quilibrium.node.channel.pb.DispatchSyncRequest - (*emptypb.Empty)(nil), // 96: google.protobuf.Empty - (*InboxMessageResponse)(nil), // 97: quilibrium.node.channel.pb.InboxMessageResponse - (*HubResponse)(nil), // 98: quilibrium.node.channel.pb.HubResponse - (*DispatchSyncResponse)(nil), // 99: quilibrium.node.channel.pb.DispatchSyncResponse + (*ProposalVote)(nil), // 15: quilibrium.node.global.pb.ProposalVote + (*QuorumCertificate)(nil), // 16: quilibrium.node.global.pb.QuorumCertificate + (*TimeoutCertificate)(nil), // 17: quilibrium.node.global.pb.TimeoutCertificate + (*GlobalFrame)(nil), // 18: quilibrium.node.global.pb.GlobalFrame + (*AppShardFrame)(nil), // 19: quilibrium.node.global.pb.AppShardFrame + (*GlobalAlert)(nil), // 20: quilibrium.node.global.pb.GlobalAlert + (*GetGlobalFrameRequest)(nil), // 21: quilibrium.node.global.pb.GetGlobalFrameRequest + (*GlobalFrameResponse)(nil), // 22: quilibrium.node.global.pb.GlobalFrameResponse + (*GetAppShardFrameRequest)(nil), // 23: quilibrium.node.global.pb.GetAppShardFrameRequest + (*AppShardFrameResponse)(nil), // 24: quilibrium.node.global.pb.AppShardFrameResponse + (*GetAppShardsRequest)(nil), // 25: quilibrium.node.global.pb.GetAppShardsRequest + (*AppShardInfo)(nil), // 26: quilibrium.node.global.pb.AppShardInfo + (*GetAppShardsResponse)(nil), // 27: quilibrium.node.global.pb.GetAppShardsResponse + (*GetGlobalShardsRequest)(nil), // 28: quilibrium.node.global.pb.GetGlobalShardsRequest + (*GetGlobalShardsResponse)(nil), // 29: quilibrium.node.global.pb.GetGlobalShardsResponse + (*GetLockedAddressesRequest)(nil), // 30: quilibrium.node.global.pb.GetLockedAddressesRequest + (*LockedTransaction)(nil), // 31: quilibrium.node.global.pb.LockedTransaction + (*GetLockedAddressesResponse)(nil), // 32: quilibrium.node.global.pb.GetLockedAddressesResponse + (*GlobalGetWorkerInfoRequest)(nil), // 33: quilibrium.node.global.pb.GlobalGetWorkerInfoRequest + (*GlobalGetWorkerInfoResponseItem)(nil), // 34: quilibrium.node.global.pb.GlobalGetWorkerInfoResponseItem + (*GlobalGetWorkerInfoResponse)(nil), // 35: quilibrium.node.global.pb.GlobalGetWorkerInfoResponse + (*SendMessage)(nil), // 36: quilibrium.node.global.pb.SendMessage + (*ReceiveMessage)(nil), // 37: quilibrium.node.global.pb.ReceiveMessage + (*GetKeyRegistryRequest)(nil), // 38: quilibrium.node.global.pb.GetKeyRegistryRequest + (*GetKeyRegistryResponse)(nil), // 39: quilibrium.node.global.pb.GetKeyRegistryResponse + (*GetKeyRegistryByProverRequest)(nil), // 40: quilibrium.node.global.pb.GetKeyRegistryByProverRequest + (*GetKeyRegistryByProverResponse)(nil), // 41: quilibrium.node.global.pb.GetKeyRegistryByProverResponse + (*PutIdentityKeyRequest)(nil), // 42: quilibrium.node.global.pb.PutIdentityKeyRequest + (*PutIdentityKeyResponse)(nil), // 43: quilibrium.node.global.pb.PutIdentityKeyResponse + (*PutProvingKeyRequest)(nil), // 44: quilibrium.node.global.pb.PutProvingKeyRequest + (*PutProvingKeyResponse)(nil), // 45: quilibrium.node.global.pb.PutProvingKeyResponse + (*PutCrossSignatureRequest)(nil), // 46: quilibrium.node.global.pb.PutCrossSignatureRequest + (*PutCrossSignatureResponse)(nil), // 47: quilibrium.node.global.pb.PutCrossSignatureResponse + (*PutSignedKeyRequest)(nil), // 48: quilibrium.node.global.pb.PutSignedKeyRequest + (*PutSignedKeyResponse)(nil), // 49: quilibrium.node.global.pb.PutSignedKeyResponse + (*GetIdentityKeyRequest)(nil), // 50: quilibrium.node.global.pb.GetIdentityKeyRequest + (*GetIdentityKeyResponse)(nil), // 51: quilibrium.node.global.pb.GetIdentityKeyResponse + (*GetProvingKeyRequest)(nil), // 52: quilibrium.node.global.pb.GetProvingKeyRequest + (*GetProvingKeyResponse)(nil), // 53: quilibrium.node.global.pb.GetProvingKeyResponse + (*GetSignedKeyRequest)(nil), // 54: quilibrium.node.global.pb.GetSignedKeyRequest + (*GetSignedKeyResponse)(nil), // 55: quilibrium.node.global.pb.GetSignedKeyResponse + (*GetSignedKeysByParentRequest)(nil), // 56: quilibrium.node.global.pb.GetSignedKeysByParentRequest + (*GetSignedKeysByParentResponse)(nil), // 57: quilibrium.node.global.pb.GetSignedKeysByParentResponse + (*RangeProvingKeysRequest)(nil), // 58: quilibrium.node.global.pb.RangeProvingKeysRequest + (*RangeProvingKeysResponse)(nil), // 59: quilibrium.node.global.pb.RangeProvingKeysResponse + (*RangeIdentityKeysRequest)(nil), // 60: quilibrium.node.global.pb.RangeIdentityKeysRequest + (*RangeIdentityKeysResponse)(nil), // 61: quilibrium.node.global.pb.RangeIdentityKeysResponse + (*RangeSignedKeysRequest)(nil), // 62: quilibrium.node.global.pb.RangeSignedKeysRequest + (*RangeSignedKeysResponse)(nil), // 63: quilibrium.node.global.pb.RangeSignedKeysResponse + (*MessageKeyShard)(nil), // 64: quilibrium.node.global.pb.MessageKeyShard + (*PutMessageRequest)(nil), // 65: quilibrium.node.global.pb.PutMessageRequest + (*PutMessageResponse)(nil), // 66: quilibrium.node.global.pb.PutMessageResponse + (*Ed448Signature)(nil), // 67: quilibrium.node.keys.pb.Ed448Signature + (*BLS48581SignatureWithProofOfPossession)(nil), // 68: quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession + (*BLS48581AddressedSignature)(nil), // 69: quilibrium.node.keys.pb.BLS48581AddressedSignature + (*TraversalProof)(nil), // 70: quilibrium.node.application.pb.TraversalProof + (*TokenDeploy)(nil), // 71: quilibrium.node.token.pb.TokenDeploy + (*TokenUpdate)(nil), // 72: quilibrium.node.token.pb.TokenUpdate + (*Transaction)(nil), // 73: quilibrium.node.token.pb.Transaction + (*PendingTransaction)(nil), // 74: quilibrium.node.token.pb.PendingTransaction + (*MintTransaction)(nil), // 75: quilibrium.node.token.pb.MintTransaction + (*HypergraphDeploy)(nil), // 76: quilibrium.node.hypergraph.pb.HypergraphDeploy + (*HypergraphUpdate)(nil), // 77: quilibrium.node.hypergraph.pb.HypergraphUpdate + (*VertexAdd)(nil), // 78: quilibrium.node.hypergraph.pb.VertexAdd + (*VertexRemove)(nil), // 79: quilibrium.node.hypergraph.pb.VertexRemove + (*HyperedgeAdd)(nil), // 80: quilibrium.node.hypergraph.pb.HyperedgeAdd + (*HyperedgeRemove)(nil), // 81: quilibrium.node.hypergraph.pb.HyperedgeRemove + (*ComputeDeploy)(nil), // 82: quilibrium.node.compute.pb.ComputeDeploy + (*ComputeUpdate)(nil), // 83: quilibrium.node.compute.pb.ComputeUpdate + (*CodeDeployment)(nil), // 84: quilibrium.node.compute.pb.CodeDeployment + (*CodeExecute)(nil), // 85: quilibrium.node.compute.pb.CodeExecute + (*CodeFinalize)(nil), // 86: quilibrium.node.compute.pb.CodeFinalize + (*BLS48581AggregateSignature)(nil), // 87: quilibrium.node.keys.pb.BLS48581AggregateSignature + (*KeyRegistry)(nil), // 88: quilibrium.node.keys.pb.KeyRegistry + (*Ed448PublicKey)(nil), // 89: quilibrium.node.keys.pb.Ed448PublicKey + (*SignedX448Key)(nil), // 90: quilibrium.node.keys.pb.SignedX448Key + (*Message)(nil), // 91: quilibrium.node.application.pb.Message + (*InboxMessagePut)(nil), // 92: quilibrium.node.channel.pb.InboxMessagePut + (*InboxMessageRequest)(nil), // 93: quilibrium.node.channel.pb.InboxMessageRequest + (*HubPut)(nil), // 94: quilibrium.node.channel.pb.HubPut + (*HubRequest)(nil), // 95: quilibrium.node.channel.pb.HubRequest + (*DispatchSyncRequest)(nil), // 96: quilibrium.node.channel.pb.DispatchSyncRequest + (*emptypb.Empty)(nil), // 97: google.protobuf.Empty + (*InboxMessageResponse)(nil), // 98: quilibrium.node.channel.pb.InboxMessageResponse + (*HubResponse)(nil), // 99: quilibrium.node.channel.pb.HubResponse + (*DispatchSyncResponse)(nil), // 100: quilibrium.node.channel.pb.DispatchSyncResponse } var file_global_proto_depIdxs = []int32{ - 66, // 0: quilibrium.node.global.pb.LegacyProverRequest.public_key_signatures_ed448:type_name -> quilibrium.node.keys.pb.Ed448Signature - 67, // 1: quilibrium.node.global.pb.ProverJoin.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession - 1, // 2: quilibrium.node.global.pb.ProverJoin.merge_targets:type_name -> quilibrium.node.global.pb.SeniorityMerge - 68, // 3: quilibrium.node.global.pb.ProverLeave.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature - 68, // 4: quilibrium.node.global.pb.ProverPause.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature - 68, // 5: quilibrium.node.global.pb.ProverResume.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature - 68, // 6: quilibrium.node.global.pb.ProverConfirm.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature - 68, // 7: quilibrium.node.global.pb.ProverUpdate.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature - 69, // 8: quilibrium.node.global.pb.ProverKick.traversal_proof:type_name -> quilibrium.node.application.pb.TraversalProof - 68, // 9: quilibrium.node.global.pb.ProverReject.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature - 2, // 10: quilibrium.node.global.pb.MessageRequest.join:type_name -> quilibrium.node.global.pb.ProverJoin - 3, // 11: quilibrium.node.global.pb.MessageRequest.leave:type_name -> quilibrium.node.global.pb.ProverLeave - 4, // 12: quilibrium.node.global.pb.MessageRequest.pause:type_name -> quilibrium.node.global.pb.ProverPause - 5, // 13: quilibrium.node.global.pb.MessageRequest.resume:type_name -> quilibrium.node.global.pb.ProverResume - 6, // 14: quilibrium.node.global.pb.MessageRequest.confirm:type_name -> quilibrium.node.global.pb.ProverConfirm - 9, // 15: quilibrium.node.global.pb.MessageRequest.reject:type_name -> quilibrium.node.global.pb.ProverReject - 8, // 16: quilibrium.node.global.pb.MessageRequest.kick:type_name -> quilibrium.node.global.pb.ProverKick - 7, // 17: quilibrium.node.global.pb.MessageRequest.update:type_name -> quilibrium.node.global.pb.ProverUpdate - 70, // 18: quilibrium.node.global.pb.MessageRequest.token_deploy:type_name -> quilibrium.node.token.pb.TokenDeploy - 71, // 19: quilibrium.node.global.pb.MessageRequest.token_update:type_name -> quilibrium.node.token.pb.TokenUpdate - 72, // 20: quilibrium.node.global.pb.MessageRequest.transaction:type_name -> quilibrium.node.token.pb.Transaction - 73, // 21: quilibrium.node.global.pb.MessageRequest.pending_transaction:type_name -> quilibrium.node.token.pb.PendingTransaction - 74, // 22: quilibrium.node.global.pb.MessageRequest.mint_transaction:type_name -> quilibrium.node.token.pb.MintTransaction - 75, // 23: quilibrium.node.global.pb.MessageRequest.hypergraph_deploy:type_name -> quilibrium.node.hypergraph.pb.HypergraphDeploy - 76, // 24: quilibrium.node.global.pb.MessageRequest.hypergraph_update:type_name -> quilibrium.node.hypergraph.pb.HypergraphUpdate - 77, // 25: quilibrium.node.global.pb.MessageRequest.vertex_add:type_name -> quilibrium.node.hypergraph.pb.VertexAdd - 78, // 26: quilibrium.node.global.pb.MessageRequest.vertex_remove:type_name -> quilibrium.node.hypergraph.pb.VertexRemove - 79, // 27: quilibrium.node.global.pb.MessageRequest.hyperedge_add:type_name -> quilibrium.node.hypergraph.pb.HyperedgeAdd - 80, // 28: quilibrium.node.global.pb.MessageRequest.hyperedge_remove:type_name -> quilibrium.node.hypergraph.pb.HyperedgeRemove - 81, // 29: quilibrium.node.global.pb.MessageRequest.compute_deploy:type_name -> quilibrium.node.compute.pb.ComputeDeploy - 82, // 30: quilibrium.node.global.pb.MessageRequest.compute_update:type_name -> quilibrium.node.compute.pb.ComputeUpdate - 83, // 31: quilibrium.node.global.pb.MessageRequest.code_deploy:type_name -> quilibrium.node.compute.pb.CodeDeployment - 84, // 32: quilibrium.node.global.pb.MessageRequest.code_execute:type_name -> quilibrium.node.compute.pb.CodeExecute - 85, // 33: quilibrium.node.global.pb.MessageRequest.code_finalize:type_name -> quilibrium.node.compute.pb.CodeFinalize - 13, // 34: quilibrium.node.global.pb.MessageRequest.shard:type_name -> quilibrium.node.global.pb.FrameHeader - 10, // 35: quilibrium.node.global.pb.MessageBundle.requests:type_name -> quilibrium.node.global.pb.MessageRequest - 86, // 36: quilibrium.node.global.pb.GlobalFrameHeader.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AggregateSignature - 86, // 37: quilibrium.node.global.pb.FrameHeader.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AggregateSignature - 68, // 38: quilibrium.node.global.pb.ProverLivenessCheck.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature - 68, // 39: quilibrium.node.global.pb.FrameVote.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature - 86, // 40: quilibrium.node.global.pb.FrameConfirmation.aggregate_signature:type_name -> quilibrium.node.keys.pb.BLS48581AggregateSignature - 12, // 41: quilibrium.node.global.pb.GlobalFrame.header:type_name -> quilibrium.node.global.pb.GlobalFrameHeader - 11, // 42: quilibrium.node.global.pb.GlobalFrame.requests:type_name -> quilibrium.node.global.pb.MessageBundle - 13, // 43: quilibrium.node.global.pb.AppShardFrame.header:type_name -> quilibrium.node.global.pb.FrameHeader - 11, // 44: quilibrium.node.global.pb.AppShardFrame.requests:type_name -> quilibrium.node.global.pb.MessageBundle - 17, // 45: quilibrium.node.global.pb.GlobalFrameResponse.frame:type_name -> quilibrium.node.global.pb.GlobalFrame - 18, // 46: quilibrium.node.global.pb.AppShardFrameResponse.frame:type_name -> quilibrium.node.global.pb.AppShardFrame - 25, // 47: quilibrium.node.global.pb.GetAppShardsResponse.info:type_name -> quilibrium.node.global.pb.AppShardInfo - 30, // 48: quilibrium.node.global.pb.GetLockedAddressesResponse.transactions:type_name -> quilibrium.node.global.pb.LockedTransaction - 33, // 49: quilibrium.node.global.pb.GlobalGetWorkerInfoResponse.workers:type_name -> quilibrium.node.global.pb.GlobalGetWorkerInfoResponseItem - 87, // 50: quilibrium.node.global.pb.GetKeyRegistryResponse.registry:type_name -> quilibrium.node.keys.pb.KeyRegistry - 87, // 51: quilibrium.node.global.pb.GetKeyRegistryByProverResponse.registry:type_name -> quilibrium.node.keys.pb.KeyRegistry - 88, // 52: quilibrium.node.global.pb.PutIdentityKeyRequest.identity_key:type_name -> quilibrium.node.keys.pb.Ed448PublicKey - 67, // 53: quilibrium.node.global.pb.PutProvingKeyRequest.proving_key:type_name -> quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession - 89, // 54: quilibrium.node.global.pb.PutSignedKeyRequest.key:type_name -> quilibrium.node.keys.pb.SignedX448Key - 88, // 55: quilibrium.node.global.pb.GetIdentityKeyResponse.key:type_name -> quilibrium.node.keys.pb.Ed448PublicKey - 67, // 56: quilibrium.node.global.pb.GetProvingKeyResponse.key:type_name -> quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession - 89, // 57: quilibrium.node.global.pb.GetSignedKeyResponse.key:type_name -> quilibrium.node.keys.pb.SignedX448Key - 89, // 58: quilibrium.node.global.pb.GetSignedKeysByParentResponse.keys:type_name -> quilibrium.node.keys.pb.SignedX448Key - 67, // 59: quilibrium.node.global.pb.RangeProvingKeysResponse.key:type_name -> quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession - 88, // 60: quilibrium.node.global.pb.RangeIdentityKeysResponse.key:type_name -> quilibrium.node.keys.pb.Ed448PublicKey - 89, // 61: quilibrium.node.global.pb.RangeSignedKeysResponse.key:type_name -> quilibrium.node.keys.pb.SignedX448Key - 63, // 62: quilibrium.node.global.pb.PutMessageRequest.message_shards:type_name -> quilibrium.node.global.pb.MessageKeyShard - 20, // 63: quilibrium.node.global.pb.GlobalService.GetGlobalFrame:input_type -> quilibrium.node.global.pb.GetGlobalFrameRequest - 24, // 64: quilibrium.node.global.pb.GlobalService.GetAppShards:input_type -> quilibrium.node.global.pb.GetAppShardsRequest - 27, // 65: quilibrium.node.global.pb.GlobalService.GetGlobalShards:input_type -> quilibrium.node.global.pb.GetGlobalShardsRequest - 29, // 66: quilibrium.node.global.pb.GlobalService.GetLockedAddresses:input_type -> quilibrium.node.global.pb.GetLockedAddressesRequest - 32, // 67: quilibrium.node.global.pb.GlobalService.GetWorkerInfo:input_type -> quilibrium.node.global.pb.GlobalGetWorkerInfoRequest - 22, // 68: quilibrium.node.global.pb.AppShardService.GetAppShardFrame:input_type -> quilibrium.node.global.pb.GetAppShardFrameRequest - 35, // 69: quilibrium.node.global.pb.OnionService.Connect:input_type -> quilibrium.node.global.pb.SendMessage - 64, // 70: quilibrium.node.global.pb.MixnetService.PutMessage:input_type -> quilibrium.node.global.pb.PutMessageRequest - 90, // 71: quilibrium.node.global.pb.MixnetService.RoundStream:input_type -> quilibrium.node.application.pb.Message - 37, // 72: quilibrium.node.global.pb.KeyRegistryService.GetKeyRegistry:input_type -> quilibrium.node.global.pb.GetKeyRegistryRequest - 39, // 73: quilibrium.node.global.pb.KeyRegistryService.GetKeyRegistryByProver:input_type -> quilibrium.node.global.pb.GetKeyRegistryByProverRequest - 41, // 74: quilibrium.node.global.pb.KeyRegistryService.PutIdentityKey:input_type -> quilibrium.node.global.pb.PutIdentityKeyRequest - 43, // 75: quilibrium.node.global.pb.KeyRegistryService.PutProvingKey:input_type -> quilibrium.node.global.pb.PutProvingKeyRequest - 45, // 76: quilibrium.node.global.pb.KeyRegistryService.PutCrossSignature:input_type -> quilibrium.node.global.pb.PutCrossSignatureRequest - 47, // 77: quilibrium.node.global.pb.KeyRegistryService.PutSignedKey:input_type -> quilibrium.node.global.pb.PutSignedKeyRequest - 49, // 78: quilibrium.node.global.pb.KeyRegistryService.GetIdentityKey:input_type -> quilibrium.node.global.pb.GetIdentityKeyRequest - 51, // 79: quilibrium.node.global.pb.KeyRegistryService.GetProvingKey:input_type -> quilibrium.node.global.pb.GetProvingKeyRequest - 53, // 80: quilibrium.node.global.pb.KeyRegistryService.GetSignedKey:input_type -> quilibrium.node.global.pb.GetSignedKeyRequest - 55, // 81: quilibrium.node.global.pb.KeyRegistryService.GetSignedKeysByParent:input_type -> quilibrium.node.global.pb.GetSignedKeysByParentRequest - 57, // 82: quilibrium.node.global.pb.KeyRegistryService.RangeProvingKeys:input_type -> quilibrium.node.global.pb.RangeProvingKeysRequest - 59, // 83: quilibrium.node.global.pb.KeyRegistryService.RangeIdentityKeys:input_type -> quilibrium.node.global.pb.RangeIdentityKeysRequest - 61, // 84: quilibrium.node.global.pb.KeyRegistryService.RangeSignedKeys:input_type -> quilibrium.node.global.pb.RangeSignedKeysRequest - 91, // 85: quilibrium.node.global.pb.DispatchService.PutInboxMessage:input_type -> quilibrium.node.channel.pb.InboxMessagePut - 92, // 86: quilibrium.node.global.pb.DispatchService.GetInboxMessages:input_type -> quilibrium.node.channel.pb.InboxMessageRequest - 93, // 87: quilibrium.node.global.pb.DispatchService.PutHub:input_type -> quilibrium.node.channel.pb.HubPut - 94, // 88: quilibrium.node.global.pb.DispatchService.GetHub:input_type -> quilibrium.node.channel.pb.HubRequest - 95, // 89: quilibrium.node.global.pb.DispatchService.Sync:input_type -> quilibrium.node.channel.pb.DispatchSyncRequest - 21, // 90: quilibrium.node.global.pb.GlobalService.GetGlobalFrame:output_type -> quilibrium.node.global.pb.GlobalFrameResponse - 26, // 91: quilibrium.node.global.pb.GlobalService.GetAppShards:output_type -> quilibrium.node.global.pb.GetAppShardsResponse - 28, // 92: quilibrium.node.global.pb.GlobalService.GetGlobalShards:output_type -> quilibrium.node.global.pb.GetGlobalShardsResponse - 31, // 93: quilibrium.node.global.pb.GlobalService.GetLockedAddresses:output_type -> quilibrium.node.global.pb.GetLockedAddressesResponse - 34, // 94: quilibrium.node.global.pb.GlobalService.GetWorkerInfo:output_type -> quilibrium.node.global.pb.GlobalGetWorkerInfoResponse - 23, // 95: quilibrium.node.global.pb.AppShardService.GetAppShardFrame:output_type -> quilibrium.node.global.pb.AppShardFrameResponse - 36, // 96: quilibrium.node.global.pb.OnionService.Connect:output_type -> quilibrium.node.global.pb.ReceiveMessage - 65, // 97: quilibrium.node.global.pb.MixnetService.PutMessage:output_type -> quilibrium.node.global.pb.PutMessageResponse - 90, // 98: quilibrium.node.global.pb.MixnetService.RoundStream:output_type -> quilibrium.node.application.pb.Message - 38, // 99: quilibrium.node.global.pb.KeyRegistryService.GetKeyRegistry:output_type -> quilibrium.node.global.pb.GetKeyRegistryResponse - 40, // 100: quilibrium.node.global.pb.KeyRegistryService.GetKeyRegistryByProver:output_type -> quilibrium.node.global.pb.GetKeyRegistryByProverResponse - 42, // 101: quilibrium.node.global.pb.KeyRegistryService.PutIdentityKey:output_type -> quilibrium.node.global.pb.PutIdentityKeyResponse - 44, // 102: quilibrium.node.global.pb.KeyRegistryService.PutProvingKey:output_type -> quilibrium.node.global.pb.PutProvingKeyResponse - 46, // 103: quilibrium.node.global.pb.KeyRegistryService.PutCrossSignature:output_type -> quilibrium.node.global.pb.PutCrossSignatureResponse - 48, // 104: quilibrium.node.global.pb.KeyRegistryService.PutSignedKey:output_type -> quilibrium.node.global.pb.PutSignedKeyResponse - 50, // 105: quilibrium.node.global.pb.KeyRegistryService.GetIdentityKey:output_type -> quilibrium.node.global.pb.GetIdentityKeyResponse - 52, // 106: quilibrium.node.global.pb.KeyRegistryService.GetProvingKey:output_type -> quilibrium.node.global.pb.GetProvingKeyResponse - 54, // 107: quilibrium.node.global.pb.KeyRegistryService.GetSignedKey:output_type -> quilibrium.node.global.pb.GetSignedKeyResponse - 56, // 108: quilibrium.node.global.pb.KeyRegistryService.GetSignedKeysByParent:output_type -> quilibrium.node.global.pb.GetSignedKeysByParentResponse - 58, // 109: quilibrium.node.global.pb.KeyRegistryService.RangeProvingKeys:output_type -> quilibrium.node.global.pb.RangeProvingKeysResponse - 60, // 110: quilibrium.node.global.pb.KeyRegistryService.RangeIdentityKeys:output_type -> quilibrium.node.global.pb.RangeIdentityKeysResponse - 62, // 111: quilibrium.node.global.pb.KeyRegistryService.RangeSignedKeys:output_type -> quilibrium.node.global.pb.RangeSignedKeysResponse - 96, // 112: quilibrium.node.global.pb.DispatchService.PutInboxMessage:output_type -> google.protobuf.Empty - 97, // 113: quilibrium.node.global.pb.DispatchService.GetInboxMessages:output_type -> quilibrium.node.channel.pb.InboxMessageResponse - 96, // 114: quilibrium.node.global.pb.DispatchService.PutHub:output_type -> google.protobuf.Empty - 98, // 115: quilibrium.node.global.pb.DispatchService.GetHub:output_type -> quilibrium.node.channel.pb.HubResponse - 99, // 116: quilibrium.node.global.pb.DispatchService.Sync:output_type -> quilibrium.node.channel.pb.DispatchSyncResponse - 90, // [90:117] is the sub-list for method output_type - 63, // [63:90] is the sub-list for method input_type - 63, // [63:63] is the sub-list for extension type_name - 63, // [63:63] is the sub-list for extension extendee - 0, // [0:63] is the sub-list for field type_name + 67, // 0: quilibrium.node.global.pb.LegacyProverRequest.public_key_signatures_ed448:type_name -> quilibrium.node.keys.pb.Ed448Signature + 68, // 1: quilibrium.node.global.pb.ProverJoin.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession + 1, // 2: quilibrium.node.global.pb.ProverJoin.merge_targets:type_name -> quilibrium.node.global.pb.SeniorityMerge + 69, // 3: quilibrium.node.global.pb.ProverLeave.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature + 69, // 4: quilibrium.node.global.pb.ProverPause.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature + 69, // 5: quilibrium.node.global.pb.ProverResume.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature + 69, // 6: quilibrium.node.global.pb.ProverConfirm.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature + 69, // 7: quilibrium.node.global.pb.ProverUpdate.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature + 70, // 8: quilibrium.node.global.pb.ProverKick.traversal_proof:type_name -> quilibrium.node.application.pb.TraversalProof + 69, // 9: quilibrium.node.global.pb.ProverReject.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature + 2, // 10: quilibrium.node.global.pb.MessageRequest.join:type_name -> quilibrium.node.global.pb.ProverJoin + 3, // 11: quilibrium.node.global.pb.MessageRequest.leave:type_name -> quilibrium.node.global.pb.ProverLeave + 4, // 12: quilibrium.node.global.pb.MessageRequest.pause:type_name -> quilibrium.node.global.pb.ProverPause + 5, // 13: quilibrium.node.global.pb.MessageRequest.resume:type_name -> quilibrium.node.global.pb.ProverResume + 6, // 14: quilibrium.node.global.pb.MessageRequest.confirm:type_name -> quilibrium.node.global.pb.ProverConfirm + 9, // 15: quilibrium.node.global.pb.MessageRequest.reject:type_name -> quilibrium.node.global.pb.ProverReject + 8, // 16: quilibrium.node.global.pb.MessageRequest.kick:type_name -> quilibrium.node.global.pb.ProverKick + 7, // 17: quilibrium.node.global.pb.MessageRequest.update:type_name -> quilibrium.node.global.pb.ProverUpdate + 71, // 18: quilibrium.node.global.pb.MessageRequest.token_deploy:type_name -> quilibrium.node.token.pb.TokenDeploy + 72, // 19: quilibrium.node.global.pb.MessageRequest.token_update:type_name -> quilibrium.node.token.pb.TokenUpdate + 73, // 20: quilibrium.node.global.pb.MessageRequest.transaction:type_name -> quilibrium.node.token.pb.Transaction + 74, // 21: quilibrium.node.global.pb.MessageRequest.pending_transaction:type_name -> quilibrium.node.token.pb.PendingTransaction + 75, // 22: quilibrium.node.global.pb.MessageRequest.mint_transaction:type_name -> quilibrium.node.token.pb.MintTransaction + 76, // 23: quilibrium.node.global.pb.MessageRequest.hypergraph_deploy:type_name -> quilibrium.node.hypergraph.pb.HypergraphDeploy + 77, // 24: quilibrium.node.global.pb.MessageRequest.hypergraph_update:type_name -> quilibrium.node.hypergraph.pb.HypergraphUpdate + 78, // 25: quilibrium.node.global.pb.MessageRequest.vertex_add:type_name -> quilibrium.node.hypergraph.pb.VertexAdd + 79, // 26: quilibrium.node.global.pb.MessageRequest.vertex_remove:type_name -> quilibrium.node.hypergraph.pb.VertexRemove + 80, // 27: quilibrium.node.global.pb.MessageRequest.hyperedge_add:type_name -> quilibrium.node.hypergraph.pb.HyperedgeAdd + 81, // 28: quilibrium.node.global.pb.MessageRequest.hyperedge_remove:type_name -> quilibrium.node.hypergraph.pb.HyperedgeRemove + 82, // 29: quilibrium.node.global.pb.MessageRequest.compute_deploy:type_name -> quilibrium.node.compute.pb.ComputeDeploy + 83, // 30: quilibrium.node.global.pb.MessageRequest.compute_update:type_name -> quilibrium.node.compute.pb.ComputeUpdate + 84, // 31: quilibrium.node.global.pb.MessageRequest.code_deploy:type_name -> quilibrium.node.compute.pb.CodeDeployment + 85, // 32: quilibrium.node.global.pb.MessageRequest.code_execute:type_name -> quilibrium.node.compute.pb.CodeExecute + 86, // 33: quilibrium.node.global.pb.MessageRequest.code_finalize:type_name -> quilibrium.node.compute.pb.CodeFinalize + 13, // 34: quilibrium.node.global.pb.MessageRequest.shard:type_name -> quilibrium.node.global.pb.FrameHeader + 10, // 35: quilibrium.node.global.pb.MessageBundle.requests:type_name -> quilibrium.node.global.pb.MessageRequest + 87, // 36: quilibrium.node.global.pb.GlobalFrameHeader.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AggregateSignature + 87, // 37: quilibrium.node.global.pb.FrameHeader.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AggregateSignature + 69, // 38: quilibrium.node.global.pb.ProverLivenessCheck.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature + 69, // 39: quilibrium.node.global.pb.ProposalVote.public_key_signature_bls48581:type_name -> quilibrium.node.keys.pb.BLS48581AddressedSignature + 87, // 40: quilibrium.node.global.pb.QuorumCertificate.aggregate_signature:type_name -> quilibrium.node.keys.pb.BLS48581AggregateSignature + 16, // 41: quilibrium.node.global.pb.TimeoutCertificate.latest_quorum_certificate:type_name -> quilibrium.node.global.pb.QuorumCertificate + 87, // 42: quilibrium.node.global.pb.TimeoutCertificate.aggregate_signature:type_name -> quilibrium.node.keys.pb.BLS48581AggregateSignature + 12, // 43: quilibrium.node.global.pb.GlobalFrame.header:type_name -> quilibrium.node.global.pb.GlobalFrameHeader + 11, // 44: quilibrium.node.global.pb.GlobalFrame.requests:type_name -> quilibrium.node.global.pb.MessageBundle + 13, // 45: quilibrium.node.global.pb.AppShardFrame.header:type_name -> quilibrium.node.global.pb.FrameHeader + 11, // 46: quilibrium.node.global.pb.AppShardFrame.requests:type_name -> quilibrium.node.global.pb.MessageBundle + 18, // 47: quilibrium.node.global.pb.GlobalFrameResponse.frame:type_name -> quilibrium.node.global.pb.GlobalFrame + 19, // 48: quilibrium.node.global.pb.AppShardFrameResponse.frame:type_name -> quilibrium.node.global.pb.AppShardFrame + 26, // 49: quilibrium.node.global.pb.GetAppShardsResponse.info:type_name -> quilibrium.node.global.pb.AppShardInfo + 31, // 50: quilibrium.node.global.pb.GetLockedAddressesResponse.transactions:type_name -> quilibrium.node.global.pb.LockedTransaction + 34, // 51: quilibrium.node.global.pb.GlobalGetWorkerInfoResponse.workers:type_name -> quilibrium.node.global.pb.GlobalGetWorkerInfoResponseItem + 88, // 52: quilibrium.node.global.pb.GetKeyRegistryResponse.registry:type_name -> quilibrium.node.keys.pb.KeyRegistry + 88, // 53: quilibrium.node.global.pb.GetKeyRegistryByProverResponse.registry:type_name -> quilibrium.node.keys.pb.KeyRegistry + 89, // 54: quilibrium.node.global.pb.PutIdentityKeyRequest.identity_key:type_name -> quilibrium.node.keys.pb.Ed448PublicKey + 68, // 55: quilibrium.node.global.pb.PutProvingKeyRequest.proving_key:type_name -> quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession + 90, // 56: quilibrium.node.global.pb.PutSignedKeyRequest.key:type_name -> quilibrium.node.keys.pb.SignedX448Key + 89, // 57: quilibrium.node.global.pb.GetIdentityKeyResponse.key:type_name -> quilibrium.node.keys.pb.Ed448PublicKey + 68, // 58: quilibrium.node.global.pb.GetProvingKeyResponse.key:type_name -> quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession + 90, // 59: quilibrium.node.global.pb.GetSignedKeyResponse.key:type_name -> quilibrium.node.keys.pb.SignedX448Key + 90, // 60: quilibrium.node.global.pb.GetSignedKeysByParentResponse.keys:type_name -> quilibrium.node.keys.pb.SignedX448Key + 68, // 61: quilibrium.node.global.pb.RangeProvingKeysResponse.key:type_name -> quilibrium.node.keys.pb.BLS48581SignatureWithProofOfPossession + 89, // 62: quilibrium.node.global.pb.RangeIdentityKeysResponse.key:type_name -> quilibrium.node.keys.pb.Ed448PublicKey + 90, // 63: quilibrium.node.global.pb.RangeSignedKeysResponse.key:type_name -> quilibrium.node.keys.pb.SignedX448Key + 64, // 64: quilibrium.node.global.pb.PutMessageRequest.message_shards:type_name -> quilibrium.node.global.pb.MessageKeyShard + 21, // 65: quilibrium.node.global.pb.GlobalService.GetGlobalFrame:input_type -> quilibrium.node.global.pb.GetGlobalFrameRequest + 25, // 66: quilibrium.node.global.pb.GlobalService.GetAppShards:input_type -> quilibrium.node.global.pb.GetAppShardsRequest + 28, // 67: quilibrium.node.global.pb.GlobalService.GetGlobalShards:input_type -> quilibrium.node.global.pb.GetGlobalShardsRequest + 30, // 68: quilibrium.node.global.pb.GlobalService.GetLockedAddresses:input_type -> quilibrium.node.global.pb.GetLockedAddressesRequest + 33, // 69: quilibrium.node.global.pb.GlobalService.GetWorkerInfo:input_type -> quilibrium.node.global.pb.GlobalGetWorkerInfoRequest + 23, // 70: quilibrium.node.global.pb.AppShardService.GetAppShardFrame:input_type -> quilibrium.node.global.pb.GetAppShardFrameRequest + 36, // 71: quilibrium.node.global.pb.OnionService.Connect:input_type -> quilibrium.node.global.pb.SendMessage + 65, // 72: quilibrium.node.global.pb.MixnetService.PutMessage:input_type -> quilibrium.node.global.pb.PutMessageRequest + 91, // 73: quilibrium.node.global.pb.MixnetService.RoundStream:input_type -> quilibrium.node.application.pb.Message + 38, // 74: quilibrium.node.global.pb.KeyRegistryService.GetKeyRegistry:input_type -> quilibrium.node.global.pb.GetKeyRegistryRequest + 40, // 75: quilibrium.node.global.pb.KeyRegistryService.GetKeyRegistryByProver:input_type -> quilibrium.node.global.pb.GetKeyRegistryByProverRequest + 42, // 76: quilibrium.node.global.pb.KeyRegistryService.PutIdentityKey:input_type -> quilibrium.node.global.pb.PutIdentityKeyRequest + 44, // 77: quilibrium.node.global.pb.KeyRegistryService.PutProvingKey:input_type -> quilibrium.node.global.pb.PutProvingKeyRequest + 46, // 78: quilibrium.node.global.pb.KeyRegistryService.PutCrossSignature:input_type -> quilibrium.node.global.pb.PutCrossSignatureRequest + 48, // 79: quilibrium.node.global.pb.KeyRegistryService.PutSignedKey:input_type -> quilibrium.node.global.pb.PutSignedKeyRequest + 50, // 80: quilibrium.node.global.pb.KeyRegistryService.GetIdentityKey:input_type -> quilibrium.node.global.pb.GetIdentityKeyRequest + 52, // 81: quilibrium.node.global.pb.KeyRegistryService.GetProvingKey:input_type -> quilibrium.node.global.pb.GetProvingKeyRequest + 54, // 82: quilibrium.node.global.pb.KeyRegistryService.GetSignedKey:input_type -> quilibrium.node.global.pb.GetSignedKeyRequest + 56, // 83: quilibrium.node.global.pb.KeyRegistryService.GetSignedKeysByParent:input_type -> quilibrium.node.global.pb.GetSignedKeysByParentRequest + 58, // 84: quilibrium.node.global.pb.KeyRegistryService.RangeProvingKeys:input_type -> quilibrium.node.global.pb.RangeProvingKeysRequest + 60, // 85: quilibrium.node.global.pb.KeyRegistryService.RangeIdentityKeys:input_type -> quilibrium.node.global.pb.RangeIdentityKeysRequest + 62, // 86: quilibrium.node.global.pb.KeyRegistryService.RangeSignedKeys:input_type -> quilibrium.node.global.pb.RangeSignedKeysRequest + 92, // 87: quilibrium.node.global.pb.DispatchService.PutInboxMessage:input_type -> quilibrium.node.channel.pb.InboxMessagePut + 93, // 88: quilibrium.node.global.pb.DispatchService.GetInboxMessages:input_type -> quilibrium.node.channel.pb.InboxMessageRequest + 94, // 89: quilibrium.node.global.pb.DispatchService.PutHub:input_type -> quilibrium.node.channel.pb.HubPut + 95, // 90: quilibrium.node.global.pb.DispatchService.GetHub:input_type -> quilibrium.node.channel.pb.HubRequest + 96, // 91: quilibrium.node.global.pb.DispatchService.Sync:input_type -> quilibrium.node.channel.pb.DispatchSyncRequest + 22, // 92: quilibrium.node.global.pb.GlobalService.GetGlobalFrame:output_type -> quilibrium.node.global.pb.GlobalFrameResponse + 27, // 93: quilibrium.node.global.pb.GlobalService.GetAppShards:output_type -> quilibrium.node.global.pb.GetAppShardsResponse + 29, // 94: quilibrium.node.global.pb.GlobalService.GetGlobalShards:output_type -> quilibrium.node.global.pb.GetGlobalShardsResponse + 32, // 95: quilibrium.node.global.pb.GlobalService.GetLockedAddresses:output_type -> quilibrium.node.global.pb.GetLockedAddressesResponse + 35, // 96: quilibrium.node.global.pb.GlobalService.GetWorkerInfo:output_type -> quilibrium.node.global.pb.GlobalGetWorkerInfoResponse + 24, // 97: quilibrium.node.global.pb.AppShardService.GetAppShardFrame:output_type -> quilibrium.node.global.pb.AppShardFrameResponse + 37, // 98: quilibrium.node.global.pb.OnionService.Connect:output_type -> quilibrium.node.global.pb.ReceiveMessage + 66, // 99: quilibrium.node.global.pb.MixnetService.PutMessage:output_type -> quilibrium.node.global.pb.PutMessageResponse + 91, // 100: quilibrium.node.global.pb.MixnetService.RoundStream:output_type -> quilibrium.node.application.pb.Message + 39, // 101: quilibrium.node.global.pb.KeyRegistryService.GetKeyRegistry:output_type -> quilibrium.node.global.pb.GetKeyRegistryResponse + 41, // 102: quilibrium.node.global.pb.KeyRegistryService.GetKeyRegistryByProver:output_type -> quilibrium.node.global.pb.GetKeyRegistryByProverResponse + 43, // 103: quilibrium.node.global.pb.KeyRegistryService.PutIdentityKey:output_type -> quilibrium.node.global.pb.PutIdentityKeyResponse + 45, // 104: quilibrium.node.global.pb.KeyRegistryService.PutProvingKey:output_type -> quilibrium.node.global.pb.PutProvingKeyResponse + 47, // 105: quilibrium.node.global.pb.KeyRegistryService.PutCrossSignature:output_type -> quilibrium.node.global.pb.PutCrossSignatureResponse + 49, // 106: quilibrium.node.global.pb.KeyRegistryService.PutSignedKey:output_type -> quilibrium.node.global.pb.PutSignedKeyResponse + 51, // 107: quilibrium.node.global.pb.KeyRegistryService.GetIdentityKey:output_type -> quilibrium.node.global.pb.GetIdentityKeyResponse + 53, // 108: quilibrium.node.global.pb.KeyRegistryService.GetProvingKey:output_type -> quilibrium.node.global.pb.GetProvingKeyResponse + 55, // 109: quilibrium.node.global.pb.KeyRegistryService.GetSignedKey:output_type -> quilibrium.node.global.pb.GetSignedKeyResponse + 57, // 110: quilibrium.node.global.pb.KeyRegistryService.GetSignedKeysByParent:output_type -> quilibrium.node.global.pb.GetSignedKeysByParentResponse + 59, // 111: quilibrium.node.global.pb.KeyRegistryService.RangeProvingKeys:output_type -> quilibrium.node.global.pb.RangeProvingKeysResponse + 61, // 112: quilibrium.node.global.pb.KeyRegistryService.RangeIdentityKeys:output_type -> quilibrium.node.global.pb.RangeIdentityKeysResponse + 63, // 113: quilibrium.node.global.pb.KeyRegistryService.RangeSignedKeys:output_type -> quilibrium.node.global.pb.RangeSignedKeysResponse + 97, // 114: quilibrium.node.global.pb.DispatchService.PutInboxMessage:output_type -> google.protobuf.Empty + 98, // 115: quilibrium.node.global.pb.DispatchService.GetInboxMessages:output_type -> quilibrium.node.channel.pb.InboxMessageResponse + 97, // 116: quilibrium.node.global.pb.DispatchService.PutHub:output_type -> google.protobuf.Empty + 99, // 117: quilibrium.node.global.pb.DispatchService.GetHub:output_type -> quilibrium.node.channel.pb.HubResponse + 100, // 118: quilibrium.node.global.pb.DispatchService.Sync:output_type -> quilibrium.node.channel.pb.DispatchSyncResponse + 92, // [92:119] is the sub-list for method output_type + 65, // [65:92] is the sub-list for method input_type + 65, // [65:65] is the sub-list for extension type_name + 65, // [65:65] is the sub-list for extension extendee + 0, // [0:65] is the sub-list for field type_name } func init() { file_global_proto_init() } @@ -5663,7 +5790,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FrameVote); i { + switch v := v.(*ProposalVote); i { case 0: return &v.state case 1: @@ -5675,7 +5802,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FrameConfirmation); i { + switch v := v.(*QuorumCertificate); i { case 0: return &v.state case 1: @@ -5687,7 +5814,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GlobalFrame); i { + switch v := v.(*TimeoutCertificate); i { case 0: return &v.state case 1: @@ -5699,7 +5826,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AppShardFrame); i { + switch v := v.(*GlobalFrame); i { case 0: return &v.state case 1: @@ -5711,7 +5838,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GlobalAlert); i { + switch v := v.(*AppShardFrame); i { case 0: return &v.state case 1: @@ -5723,7 +5850,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetGlobalFrameRequest); i { + switch v := v.(*GlobalAlert); i { case 0: return &v.state case 1: @@ -5735,7 +5862,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GlobalFrameResponse); i { + switch v := v.(*GetGlobalFrameRequest); i { case 0: return &v.state case 1: @@ -5747,7 +5874,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAppShardFrameRequest); i { + switch v := v.(*GlobalFrameResponse); i { case 0: return &v.state case 1: @@ -5759,7 +5886,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AppShardFrameResponse); i { + switch v := v.(*GetAppShardFrameRequest); i { case 0: return &v.state case 1: @@ -5771,7 +5898,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAppShardsRequest); i { + switch v := v.(*AppShardFrameResponse); i { case 0: return &v.state case 1: @@ -5783,7 +5910,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AppShardInfo); i { + switch v := v.(*GetAppShardsRequest); i { case 0: return &v.state case 1: @@ -5795,7 +5922,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAppShardsResponse); i { + switch v := v.(*AppShardInfo); i { case 0: return &v.state case 1: @@ -5807,7 +5934,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetGlobalShardsRequest); i { + switch v := v.(*GetAppShardsResponse); i { case 0: return &v.state case 1: @@ -5819,7 +5946,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetGlobalShardsResponse); i { + switch v := v.(*GetGlobalShardsRequest); i { case 0: return &v.state case 1: @@ -5831,7 +5958,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetLockedAddressesRequest); i { + switch v := v.(*GetGlobalShardsResponse); i { case 0: return &v.state case 1: @@ -5843,7 +5970,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LockedTransaction); i { + switch v := v.(*GetLockedAddressesRequest); i { case 0: return &v.state case 1: @@ -5855,7 +5982,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetLockedAddressesResponse); i { + switch v := v.(*LockedTransaction); i { case 0: return &v.state case 1: @@ -5867,7 +5994,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GlobalGetWorkerInfoRequest); i { + switch v := v.(*GetLockedAddressesResponse); i { case 0: return &v.state case 1: @@ -5879,7 +6006,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GlobalGetWorkerInfoResponseItem); i { + switch v := v.(*GlobalGetWorkerInfoRequest); i { case 0: return &v.state case 1: @@ -5891,7 +6018,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GlobalGetWorkerInfoResponse); i { + switch v := v.(*GlobalGetWorkerInfoResponseItem); i { case 0: return &v.state case 1: @@ -5903,7 +6030,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendMessage); i { + switch v := v.(*GlobalGetWorkerInfoResponse); i { case 0: return &v.state case 1: @@ -5915,7 +6042,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReceiveMessage); i { + switch v := v.(*SendMessage); i { case 0: return &v.state case 1: @@ -5927,7 +6054,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyRegistryRequest); i { + switch v := v.(*ReceiveMessage); i { case 0: return &v.state case 1: @@ -5939,7 +6066,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyRegistryResponse); i { + switch v := v.(*GetKeyRegistryRequest); i { case 0: return &v.state case 1: @@ -5951,7 +6078,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyRegistryByProverRequest); i { + switch v := v.(*GetKeyRegistryResponse); i { case 0: return &v.state case 1: @@ -5963,7 +6090,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyRegistryByProverResponse); i { + switch v := v.(*GetKeyRegistryByProverRequest); i { case 0: return &v.state case 1: @@ -5975,7 +6102,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PutIdentityKeyRequest); i { + switch v := v.(*GetKeyRegistryByProverResponse); i { case 0: return &v.state case 1: @@ -5987,7 +6114,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PutIdentityKeyResponse); i { + switch v := v.(*PutIdentityKeyRequest); i { case 0: return &v.state case 1: @@ -5999,7 +6126,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PutProvingKeyRequest); i { + switch v := v.(*PutIdentityKeyResponse); i { case 0: return &v.state case 1: @@ -6011,7 +6138,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PutProvingKeyResponse); i { + switch v := v.(*PutProvingKeyRequest); i { case 0: return &v.state case 1: @@ -6023,7 +6150,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PutCrossSignatureRequest); i { + switch v := v.(*PutProvingKeyResponse); i { case 0: return &v.state case 1: @@ -6035,7 +6162,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PutCrossSignatureResponse); i { + switch v := v.(*PutCrossSignatureRequest); i { case 0: return &v.state case 1: @@ -6047,7 +6174,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PutSignedKeyRequest); i { + switch v := v.(*PutCrossSignatureResponse); i { case 0: return &v.state case 1: @@ -6059,7 +6186,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PutSignedKeyResponse); i { + switch v := v.(*PutSignedKeyRequest); i { case 0: return &v.state case 1: @@ -6071,7 +6198,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetIdentityKeyRequest); i { + switch v := v.(*PutSignedKeyResponse); i { case 0: return &v.state case 1: @@ -6083,7 +6210,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetIdentityKeyResponse); i { + switch v := v.(*GetIdentityKeyRequest); i { case 0: return &v.state case 1: @@ -6095,7 +6222,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetProvingKeyRequest); i { + switch v := v.(*GetIdentityKeyResponse); i { case 0: return &v.state case 1: @@ -6107,7 +6234,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetProvingKeyResponse); i { + switch v := v.(*GetProvingKeyRequest); i { case 0: return &v.state case 1: @@ -6119,7 +6246,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSignedKeyRequest); i { + switch v := v.(*GetProvingKeyResponse); i { case 0: return &v.state case 1: @@ -6131,7 +6258,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSignedKeyResponse); i { + switch v := v.(*GetSignedKeyRequest); i { case 0: return &v.state case 1: @@ -6143,7 +6270,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSignedKeysByParentRequest); i { + switch v := v.(*GetSignedKeyResponse); i { case 0: return &v.state case 1: @@ -6155,7 +6282,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSignedKeysByParentResponse); i { + switch v := v.(*GetSignedKeysByParentRequest); i { case 0: return &v.state case 1: @@ -6167,7 +6294,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RangeProvingKeysRequest); i { + switch v := v.(*GetSignedKeysByParentResponse); i { case 0: return &v.state case 1: @@ -6179,7 +6306,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RangeProvingKeysResponse); i { + switch v := v.(*RangeProvingKeysRequest); i { case 0: return &v.state case 1: @@ -6191,7 +6318,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RangeIdentityKeysRequest); i { + switch v := v.(*RangeProvingKeysResponse); i { case 0: return &v.state case 1: @@ -6203,7 +6330,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RangeIdentityKeysResponse); i { + switch v := v.(*RangeIdentityKeysRequest); i { case 0: return &v.state case 1: @@ -6215,7 +6342,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RangeSignedKeysRequest); i { + switch v := v.(*RangeIdentityKeysResponse); i { case 0: return &v.state case 1: @@ -6227,7 +6354,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RangeSignedKeysResponse); i { + switch v := v.(*RangeSignedKeysRequest); i { case 0: return &v.state case 1: @@ -6239,7 +6366,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MessageKeyShard); i { + switch v := v.(*RangeSignedKeysResponse); i { case 0: return &v.state case 1: @@ -6251,7 +6378,7 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PutMessageRequest); i { + switch v := v.(*MessageKeyShard); i { case 0: return &v.state case 1: @@ -6263,6 +6390,18 @@ func file_global_proto_init() { } } file_global_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutMessageRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_global_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PutMessageResponse); i { case 0: return &v.state @@ -6308,7 +6447,7 @@ func file_global_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_global_proto_rawDesc, NumEnums: 0, - NumMessages: 66, + NumMessages: 67, NumExtensions: 0, NumServices: 6, }, diff --git a/protobufs/global.proto b/protobufs/global.proto index bb98e38..fb89721 100644 --- a/protobufs/global.proto +++ b/protobufs/global.proto @@ -195,40 +195,57 @@ message FrameHeader { message ProverLivenessCheck { // The filter for the prover's commitment in the trie bytes filter = 1; + // The rank of the consensus clique + uint64 rank = 2; // The frame number for which this liveness check is being sent - uint64 frame_number = 2; + uint64 frame_number = 3; // The timestamp when the liveness check was created - int64 timestamp = 3; + int64 timestamp = 4; // The hash of the shard commitments and prover root - bytes commitment_hash = 4; + bytes commitment_hash = 5; // The BLS signature with the prover's address - quilibrium.node.keys.pb.BLS48581AddressedSignature public_key_signature_bls48581 = 5; + quilibrium.node.keys.pb.BLS48581AddressedSignature public_key_signature_bls48581 = 6; } -message FrameVote { +message ProposalVote { // The filter for the prover's commitment in the trie bytes filter = 1; - // The frame number being voted on - uint64 frame_number = 2; - // The proposer of the frame - bytes proposer = 3; - // Whether the voter approves the frame - bool approve = 4; + // The rank of the consensus clique + uint64 rank = 2; + // The frame number for which this proposal applies + uint64 frame_number = 3; + // The selector being voted for + bytes selector = 4; // The timestamp when the vote was created int64 timestamp = 5; // The BLS signature with the voter's address quilibrium.node.keys.pb.BLS48581AddressedSignature public_key_signature_bls48581 = 6; } -message FrameConfirmation { +message QuorumCertificate { // The filter for the prover's commitment in the trie bytes filter = 1; - // The frame number that was confirmed - uint64 frame_number = 2; + // The rank of the consensus clique + uint64 rank = 2; + // The frame number for which this certificate applies + uint64 frame_number = 3; // The selector (hash) of the confirmed frame - bytes selector = 3; + bytes selector = 4; // The timestamp when the vote was created - int64 timestamp = 4; + int64 timestamp = 5; + // The aggregated BLS signature from all voters + quilibrium.node.keys.pb.BLS48581AggregateSignature aggregate_signature = 6; +} + +message TimeoutCertificate { + // The filter for the prover's commitment in the trie + bytes filter = 1; + // The rank of the consensus clique + uint64 rank = 2; + // The latest ranks in signer order + repeated uint64 latest_ranks = 3; + // The latest quorum certificate from all timeouts + QuorumCertificate latest_quorum_certificate = 4; // The aggregated BLS signature from all voters quilibrium.node.keys.pb.BLS48581AggregateSignature aggregate_signature = 5; } diff --git a/protobufs/global_test.go b/protobufs/global_test.go index 1957959..7d2b6b1 100644 --- a/protobufs/global_test.go +++ b/protobufs/global_test.go @@ -732,35 +732,23 @@ func TestProverLivenessCheck_Serialization(t *testing.T) { } } -func TestFrameVote_Serialization(t *testing.T) { +func TestProposalVote_Serialization(t *testing.T) { tests := []struct { name string - vote *FrameVote + vote *ProposalVote }{ { name: "complete frame vote approve", - vote: &FrameVote{ + vote: &ProposalVote{ FrameNumber: 77777, - Proposer: make([]byte, 32), - Approve: true, + Rank: 77777, + Selector: make([]byte, 32), PublicKeySignatureBls48581: &BLS48581AddressedSignature{ Signature: make([]byte, 74), Address: make([]byte, 32), }, }, }, - { - name: "frame vote reject", - vote: &FrameVote{ - FrameNumber: 88888, - Proposer: append([]byte{0xFF}, make([]byte, 31)...), - Approve: false, - PublicKeySignatureBls48581: &BLS48581AddressedSignature{ - Signature: append([]byte{0xAA}, make([]byte, 73)...), - Address: append([]byte{0xCC}, make([]byte, 31)...), - }, - }, - }, } for _, tt := range tests { @@ -769,13 +757,13 @@ func TestFrameVote_Serialization(t *testing.T) { require.NoError(t, err) require.NotNil(t, data) - vote2 := &FrameVote{} + vote2 := &ProposalVote{} err = vote2.FromCanonicalBytes(data) require.NoError(t, err) assert.Equal(t, tt.vote.FrameNumber, vote2.FrameNumber) - assert.Equal(t, tt.vote.Proposer, vote2.Proposer) - assert.Equal(t, tt.vote.Approve, vote2.Approve) + assert.Equal(t, tt.vote.Rank, vote2.Rank) + assert.Equal(t, tt.vote.Selector, vote2.Selector) assert.NotNil(t, vote2.PublicKeySignatureBls48581) assert.Equal(t, tt.vote.PublicKeySignatureBls48581.Signature, vote2.PublicKeySignatureBls48581.Signature) assert.Equal(t, tt.vote.PublicKeySignatureBls48581.Address, vote2.PublicKeySignatureBls48581.Address) @@ -783,15 +771,16 @@ func TestFrameVote_Serialization(t *testing.T) { } } -func TestFrameConfirmation_Serialization(t *testing.T) { +func TestQuorumCertificate_Serialization(t *testing.T) { tests := []struct { name string - conf *FrameConfirmation + conf *QuorumCertificate }{ { - name: "complete frame confirmation", - conf: &FrameConfirmation{ + name: "complete confirmation", + conf: &QuorumCertificate{ FrameNumber: 12345, + Rank: 12345, Selector: make([]byte, 32), AggregateSignature: &BLS48581AggregateSignature{ Signature: make([]byte, 74), @@ -803,9 +792,10 @@ func TestFrameConfirmation_Serialization(t *testing.T) { }, }, { - name: "minimal frame confirmation", - conf: &FrameConfirmation{ + name: "minimal confirmation", + conf: &QuorumCertificate{ FrameNumber: 0, + Rank: 0, Selector: []byte{}, AggregateSignature: nil, }, @@ -818,11 +808,12 @@ func TestFrameConfirmation_Serialization(t *testing.T) { require.NoError(t, err) require.NotNil(t, data) - conf2 := &FrameConfirmation{} + conf2 := &QuorumCertificate{} err = conf2.FromCanonicalBytes(data) require.NoError(t, err) assert.Equal(t, tt.conf.FrameNumber, conf2.FrameNumber) + assert.Equal(t, tt.conf.Rank, conf2.Rank) assert.Equal(t, tt.conf.Selector, conf2.Selector) if tt.conf.AggregateSignature != nil { assert.NotNil(t, conf2.AggregateSignature) diff --git a/protobufs/go.mod b/protobufs/go.mod index 9a46a97..f83756c 100644 --- a/protobufs/go.mod +++ b/protobufs/go.mod @@ -1,6 +1,6 @@ module source.quilibrium.com/quilibrium/monorepo/protobufs -go 1.23.0 +go 1.23.2 toolchain go1.23.4 @@ -14,6 +14,7 @@ require ( github.com/cloudflare/circl v1.6.1 github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 github.com/iden3/go-iden3-crypto v0.0.17 + github.com/libp2p/go-libp2p v0.0.0-00010101000000-000000000000 github.com/multiformats/go-multiaddr v0.16.1 github.com/pkg/errors v0.9.1 github.com/stretchr/testify v1.10.0 @@ -24,26 +25,29 @@ require ( require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/ipfs/go-cid v0.0.7 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect + github.com/ipfs/go-cid v0.5.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/kr/text v0.2.0 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.9.1 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - golang.org/x/crypto v0.37.0 // indirect - golang.org/x/exp v0.0.0-20230725012225-302865e7556b // indirect - golang.org/x/net v0.35.0 // indirect - golang.org/x/sys v0.32.0 // indirect - golang.org/x/text v0.24.0 // indirect + golang.org/x/crypto v0.39.0 // indirect + golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect + golang.org/x/net v0.41.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/text v0.26.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/blake3 v1.2.1 // indirect + lukechampine.com/blake3 v1.4.1 // indirect ) diff --git a/protobufs/go.sum b/protobufs/go.sum index af78912..f9c28cb 100644 --- a/protobufs/go.sum +++ b/protobufs/go.sum @@ -3,6 +3,10 @@ github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= +github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -17,37 +21,32 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5uk github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY= github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= -github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY= -github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= +github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= -github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= -github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= -github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo= +github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -72,23 +71,16 @@ go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= -golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= -golang.org/x/exp v0.0.0-20230725012225-302865e7556b h1:tK7yjGqVRzYdXsBcfD2MLhFAhHfDgGLm2rY1ub7FA9k= -golang.org/x/exp v0.0.0-20230725012225-302865e7556b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= -golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4= +golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4= @@ -102,5 +94,5 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= -lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=