* wip: conversion of hotstuff from flow into Q-oriented model

* bulk of tests

* remaining non-integration tests

* add integration test, adjust log interface, small tweaks

* further adjustments, restore full pacemaker shape

* add component lifecycle management+supervisor

* further refinements

* resolve timeout hanging

* mostly finalized state for consensus

* bulk of engine swap out

* lifecycle-ify most types

* wiring nearly complete, missing needed hooks for proposals

* plugged in, vetting message validation paths

* global consensus, plugged in and verified

* app shard now wired in too

* do not decode empty keys.yml (#456)

* remove obsolete engine.maxFrames config parameter (#454)

* default to Info log level unless debug is enabled (#453)

* respect config's  "logging" section params, remove obsolete single-file logging (#452)

* Trivial code cleanup aiming to reduce Go compiler warnings (#451)

* simplify range traversal

* simplify channel read for single select case

* delete rand.Seed() deprecated in Go 1.20 and no-op as of Go 1.24

* simplify range traversal

* simplify channel read for single select case

* remove redundant type from array

* simplify range traversal

* simplify channel read for single select case

* RC slate

* finalize 2.1.0.5

* Update comments in StrictMonotonicCounter

Fix comment formatting and clarify description.

---------

Co-authored-by: Black Swan <3999712+blacks1ne@users.noreply.github.com>
This commit is contained in:
Cassandra Heart 2025-11-11 05:00:17 -06:00 committed by GitHub
parent 4df761de20
commit c797d482f9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
304 changed files with 45513 additions and 10727 deletions

View File

@ -47,7 +47,6 @@ type Config struct {
Logger *LogConfig `yaml:"logger"` Logger *LogConfig `yaml:"logger"`
ListenGRPCMultiaddr string `yaml:"listenGrpcMultiaddr"` ListenGRPCMultiaddr string `yaml:"listenGrpcMultiaddr"`
ListenRestMultiaddr string `yaml:"listenRESTMultiaddr"` ListenRestMultiaddr string `yaml:"listenRESTMultiaddr"`
LogFile string `yaml:"logFile"`
} }
// WithDefaults returns a copy of the config with default values filled in. // WithDefaults returns a copy of the config with default values filled in.
@ -293,7 +292,6 @@ func LoadConfig(configPath string, proverKey string, skipGenesisCheck bool) (
ProvingKeyId: "default-proving-key", ProvingKeyId: "default-proving-key",
Filter: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", Filter: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
GenesisSeed: genesisSeed, GenesisSeed: genesisSeed,
MaxFrames: -1,
PendingCommitWorkers: 4, PendingCommitWorkers: 4,
}, },
} }

View File

@ -90,7 +90,6 @@ type EngineConfig struct {
ProvingKeyId string `yaml:"provingKeyId"` ProvingKeyId string `yaml:"provingKeyId"`
Filter string `yaml:"filter"` Filter string `yaml:"filter"`
GenesisSeed string `yaml:"genesisSeed"` GenesisSeed string `yaml:"genesisSeed"`
MaxFrames int64 `yaml:"maxFrames"`
PendingCommitWorkers int64 `yaml:"pendingCommitWorkers"` PendingCommitWorkers int64 `yaml:"pendingCommitWorkers"`
MinimumPeersRequired int `yaml:"minimumPeersRequired"` MinimumPeersRequired int `yaml:"minimumPeersRequired"`
StatsMultiaddr string `yaml:"statsMultiaddr"` StatsMultiaddr string `yaml:"statsMultiaddr"`

View File

@ -21,18 +21,15 @@ func (c *Config) CreateLogger(coreId uint, debug bool) (
io.Closer, io.Closer,
error, error,
) { ) {
filename := c.LogFile if c.Logger != nil {
if filename != "" || c.Logger != nil {
dir := ""
if c.Logger != nil {
dir = c.Logger.Path
}
logger, closer, err := logging.NewRotatingFileLogger( logger, closer, err := logging.NewRotatingFileLogger(
debug, debug,
coreId, coreId,
dir, c.Logger.Path,
filename, c.Logger.MaxSize,
c.Logger.MaxBackups,
c.Logger.MaxAge,
c.Logger.Compress,
) )
return logger, closer, errors.Wrap(err, "create logger") return logger, closer, errors.Wrap(err, "create logger")
} }

View File

@ -43,9 +43,9 @@ func FormatVersion(version []byte) string {
} }
func GetPatchNumber() byte { func GetPatchNumber() byte {
return 0x04 return 0x05
} }
func GetRCNumber() byte { func GetRCNumber() byte {
return 0x06 return 0x45
} }

18
consensus/.mockery.yaml Normal file
View File

@ -0,0 +1,18 @@
dir: "{{.InterfaceDir}}/mock"
outpkg: "mock"
filename: "{{.InterfaceName | snakecase}}.go"
mockname: "{{.InterfaceName}}"
all: True
with-expecter: False
include-auto-generated: False
disable-func-mocks: True
fail-on-missing: True
disable-version-string: True
resolve-type-alias: False
packages:
source.quilibrium.com/quilibrium/monorepo/consensus:
config:
dir: "mocks"
outpkg: "mocks"

View File

@ -1,300 +1,4 @@
# Consensus State Machine # Consensus State Machine
A generic, extensible state machine implementation for building Byzantine Fault Consensus State Machine is being swapped out with a fork of the HotStuff implementation by Flow.
Tolerant (BFT) consensus protocols. This library provides a framework for This will be updated with appropriate license details when the fork work has finished.
implementing round-based consensus algorithms with cryptographic proofs.
## Overview
The state machine manages consensus engine state transitions through a
well-defined set of states and events. It supports generic type parameters to
allow different implementations of state data, votes, peer identities, and
collected mutations.
## Features
- **Generic Implementation**: Supports custom types for state data, votes, peer
IDs, and collected data
- **Byzantine Fault Tolerance**: Provides BFT consensus with < 1/3 byzantine
nodes, flexible to other probabilistic BFT implementations
- **Round-based Consensus**: Implements a round-based state transition pattern
- **Pluggable Providers**: Extensible through provider interfaces for different
consensus behaviors
- **Event-driven Architecture**: State transitions triggered by events with
optional guard conditions
- **Concurrent Safe**: Thread-safe implementation with proper mutex usage
- **Timeout Support**: Configurable timeouts for each state with automatic
transitions
- **Transition Listeners**: Observable state transitions for monitoring and
debugging
## Core Concepts
### States
The state machine progresses through the following states:
1. **StateStopped**: Initial state, engine is not running
2. **StateStarting**: Engine is initializing
3. **StateLoading**: Loading data and syncing with network
4. **StateCollecting**: Collecting data/mutations for consensus round
5. **StateLivenessCheck**: Checking peer liveness before proving
6. **StateProving**: Generating cryptographic proof (leader only)
7. **StatePublishing**: Publishing proposed state
8. **StateVoting**: Voting on proposals
9. **StateFinalizing**: Finalizing consensus round
10. **StateVerifying**: Verifying and publishing results
11. **StateStopping**: Engine is shutting down
### Events
Events trigger state transitions:
- `EventStart`, `EventStop`: Lifecycle events
- `EventSyncComplete`: Synchronization finished
- `EventCollectionDone`: Mutation collection complete
- `EventLivenessCheckReceived`: Peer liveness confirmed
- `EventProverSignal`: Leader selection complete
- `EventProofComplete`: Proof generation finished
- `EventProposalReceived`: New proposal received
- `EventVoteReceived`: Vote received
- `EventQuorumReached`: Voting quorum achieved
- `EventConfirmationReceived`: State confirmation received
- And more...
### Type Constraints
All generic type parameters must implement the `Unique` interface:
```go
type Unique interface {
Identity() Identity // Returns a unique string identifier
}
```
## Provider Interfaces
### SyncProvider
Handles initial state synchronization:
```go
type SyncProvider[StateT Unique] interface {
Synchronize(
existing *StateT,
ctx context.Context,
) (<-chan *StateT, <-chan error)
}
```
### VotingProvider
Manages the voting process:
```go
type VotingProvider[StateT Unique, VoteT Unique, PeerIDT Unique] interface {
SendProposal(proposal *StateT, ctx context.Context) error
DecideAndSendVote(
proposals map[Identity]*StateT,
ctx context.Context,
) (PeerIDT, *VoteT, error)
IsQuorum(votes map[Identity]*VoteT, ctx context.Context) (bool, error)
FinalizeVotes(
proposals map[Identity]*StateT,
votes map[Identity]*VoteT,
ctx context.Context,
) (*StateT, PeerIDT, error)
SendConfirmation(finalized *StateT, ctx context.Context) error
}
```
### LeaderProvider
Handles leader selection and proof generation:
```go
type LeaderProvider[
StateT Unique,
PeerIDT Unique,
CollectedT Unique,
] interface {
GetNextLeaders(prior *StateT, ctx context.Context) ([]PeerIDT, error)
ProveNextState(
prior *StateT,
collected CollectedT,
ctx context.Context,
) (*StateT, error)
}
```
### LivenessProvider
Manages peer liveness checks:
```go
type LivenessProvider[
StateT Unique,
PeerIDT Unique,
CollectedT Unique,
] interface {
Collect(ctx context.Context) (CollectedT, error)
SendLiveness(prior *StateT, collected CollectedT, ctx context.Context) error
}
```
## Usage
### Basic Setup
```go
// Define your types implementing Unique
type MyState struct {
Round uint64
Hash string
}
func (s MyState) Identity() string { return s.Hash }
type MyVote struct {
Voter string
Value bool
}
func (v MyVote) Identity() string { return v.Voter }
type MyPeerID struct {
ID string
}
func (p MyPeerID) Identity() string { return p.ID }
type MyCollected struct {
Data []byte
}
func (c MyCollected) Identity() string { return string(c.Data) }
// Implement providers
syncProvider := &MySyncProvider{}
votingProvider := &MyVotingProvider{}
leaderProvider := &MyLeaderProvider{}
livenessProvider := &MyLivenessProvider{}
// Create state machine
sm := consensus.NewStateMachine[MyState, MyVote, MyPeerID, MyCollected](
MyPeerID{ID: "node1"}, // This node's ID
&MyState{Round: 0, Hash: "genesis"}, // Initial state
true, // shouldEmitReceiveEventsOnSends
3, // minimumProvers
syncProvider,
votingProvider,
leaderProvider,
livenessProvider,
nil, // Optional trace logger
)
// Add transition listener
sm.AddListener(&MyTransitionListener{})
// Start the state machine
if err := sm.Start(); err != nil {
log.Fatal(err)
}
// Receive external events
sm.ReceiveProposal(peer, proposal)
sm.ReceiveVote(voter, vote)
sm.ReceiveLivenessCheck(peer, collected)
sm.ReceiveConfirmation(peer, confirmation)
// Stop the state machine
if err := sm.Stop(); err != nil {
log.Fatal(err)
}
```
### Implementing Providers
See the `example/generic_consensus_example.go` for a complete working example
with mock provider implementations.
## State Flow
The typical consensus flow:
1. **Start****Starting** → **Loading**
2. **Loading**: Synchronize with network
3. **Collecting**: Gather mutations/changes
4. **LivenessCheck**: Verify peer availability
5. **Proving**: Leader generates proof
6. **Publishing**: Leader publishes proposal
7. **Voting**: All nodes vote on proposals
8. **Finalizing**: Aggregate votes and determine outcome
9. **Verifying**: Confirm and apply state changes
10. Loop back to **Collecting** for next round
## Configuration
### Constructor Parameters
- `id`: This node's peer ID
- `initialState`: Starting state (can be nil)
- `shouldEmitReceiveEventsOnSends`: Whether to emit receive events for own
messages
- `minimumProvers`: Minimum number of active provers required
- `traceLogger`: Optional logger for debugging state transitions
### State Timeouts
Each state can have a configured timeout that triggers an automatic transition:
- **Starting**: 1 second → `EventInitComplete`
- **Loading**: 10 minutes → `EventSyncComplete`
- **Collecting**: 1 second → `EventCollectionDone`
- **LivenessCheck**: 1 second → `EventLivenessTimeout`
- **Proving**: 120 seconds → `EventPublishTimeout`
- **Publishing**: 1 second → `EventPublishTimeout`
- **Voting**: 10 seconds → `EventVotingTimeout`
- **Finalizing**: 1 second → `EventAggregationDone`
- **Verifying**: 1 second → `EventVerificationDone`
- **Stopping**: 30 seconds → `EventCleanupComplete`
## Thread Safety
The state machine is thread-safe. All public methods properly handle concurrent
access through mutex locks. State behaviors run in separate goroutines with
proper cancellation support.
## Error Handling
- Provider errors are logged but don't crash the state machine
- The state machine continues operating and may retry operations
- Critical errors during state transitions are returned to callers
- Use the `TraceLogger` interface for debugging
## Best Practices
1. **Message Isolation**: When implementing providers, always deep-copy data
before sending to prevent shared state between state machine and other
handlers
2. **Nil Handling**: Provider implementations should handle nil prior states
gracefully
3. **Context Usage**: Respect context cancellation in long-running operations
4. **Quorum Size**: Set appropriate quorum size based on your network (typically
2f+1 for f failures)
5. **Timeout Configuration**: Adjust timeouts based on network conditions and
proof generation time
## Example
See `example/generic_consensus_example.go` for a complete working example
demonstrating:
- Mock provider implementations
- Multi-node consensus network
- Byzantine node behavior
- Message passing between nodes
- State transition monitoring
## Testing
The package includes comprehensive tests in `state_machine_test.go` covering:
- State transitions
- Event handling
- Concurrent operations
- Byzantine scenarios
- Timeout behavior

View File

@ -0,0 +1,154 @@
package consensus
import "source.quilibrium.com/quilibrium/monorepo/consensus/models"
// A committee provides a subset of the protocol.State, which is restricted to
// exactly those nodes that participate in the current HotStuff instance: the
// state of all legitimate HotStuff participants for the specified rank.
// Legitimate HotStuff participants have NON-ZERO WEIGHT.
//
// For the purposes of validating votes, timeouts, quorum certificates, and
// timeout certificates we consider a committee which is static over the course
// of an rank. Although committee members may be ejected, or have their weight
// change during an rank, we ignore these changes. For these purposes we use
// the Replicas and *ByRank methods.
//
// When validating proposals, we take into account changes to the committee
// during the course of an rank. In particular, if a node is ejected, we will
// immediately reject all future proposals from that node. For these purposes we
// use the DynamicCommittee and *ByState methods.
// Replicas defines the consensus committee for the purposes of validating
// votes, timeouts, quorum certificates, and timeout certificates. Any consensus
// committee member who was authorized to contribute to consensus AT THE
// BEGINNING of the rank may produce valid votes and timeouts for the entire
// rank, even if they are later ejected. So for validating votes/timeouts we
// use *ByRank methods.
//
// Since the voter committee is considered static over an rank:
// - we can query identities by rank
// - we don't need the full state ancestry prior to validating messages
type Replicas interface {
// LeaderForRank returns the identity of the leader for a given rank.
// CAUTION: per liveness requirement of HotStuff, the leader must be
// fork-independent. Therefore, a node retains its proposer rank
// slots even if it is slashed. Its proposal is simply considered
// invalid, as it is not from a legitimate participant.
// Returns the following expected errors for invalid inputs:
// - model.ErrRankUnknown if no rank containing the given rank is
// known
LeaderForRank(rank uint64) (models.Identity, error)
// QuorumThresholdForRank returns the minimum total weight for a supermajority
// at the given rank. This weight threshold is computed using the total weight
// of the initial committee and is static over the course of an rank.
// Returns the following expected errors for invalid inputs:
// - model.ErrRankUnknown if no rank containing the given rank is
// known
QuorumThresholdForRank(rank uint64) (uint64, error)
// TimeoutThresholdForRank returns the minimum total weight of observed
// timeout states required to safely timeout for the given rank. This weight
// threshold is computed using the total weight of the initial committee and
// is static over the course of an rank.
// Returns the following expected errors for invalid inputs:
// - model.ErrRankUnknown if no rank containing the given rank is
// known
TimeoutThresholdForRank(rank uint64) (uint64, error)
// Self returns our own node identifier.
// TODO: ultimately, the own identity of the node is necessary for signing.
// Ideally, we would move the method for checking whether an Identifier
// refers to this node to the signer. This would require some
// refactoring of EventHandler (postponed to later)
Self() models.Identity
// IdentitiesByRank returns a list of the legitimate HotStuff participants
// for the rank given by the input rank.
// The returned list of HotStuff participants:
// - contains nodes that are allowed to submit votes or timeouts within the
// given rank (un-ejected, non-zero weight at the beginning of the rank)
// - is ordered in the canonical order
// - contains no duplicates.
//
// CAUTION: DO NOT use this method for validating state proposals.
//
// Returns the following expected errors for invalid inputs:
// - model.ErrRankUnknown if no rank containing the given rank is
// known
//
IdentitiesByRank(rank uint64) ([]models.WeightedIdentity, error)
// IdentityByRank returns the full Identity for specified HotStuff
// participant. The node must be a legitimate HotStuff participant with
// NON-ZERO WEIGHT at the specified state.
//
// ERROR conditions:
// - model.InvalidSignerError if participantID does NOT correspond to an
// authorized HotStuff participant at the specified state.
//
// Returns the following expected errors for invalid inputs:
// - model.ErrRankUnknown if no rank containing the given rank is
// known
//
IdentityByRank(
rank uint64,
participantID models.Identity,
) (models.WeightedIdentity, error)
}
// DynamicCommittee extends Replicas to provide the consensus committee for the
// purposes of validating proposals. The proposer committee reflects
// state-to-state changes in the identity table to support immediately rejecting
// proposals from nodes after they are ejected. For validating proposals, we use
// *ByState methods.
//
// Since the proposer committee can change at any state:
// - we query by state ID
// - we must have incorporated the full state ancestry prior to validating
// messages
type DynamicCommittee interface {
Replicas
// IdentitiesByState returns a list of the legitimate HotStuff participants
// for the given state. The returned list of HotStuff participants:
// - contains nodes that are allowed to submit proposals, votes, and
// timeouts (un-ejected, non-zero weight at current state)
// - is ordered in the canonical order
// - contains no duplicates.
//
// ERROR conditions:
// - state.ErrUnknownSnapshotReference if the stateID is for an unknown state
IdentitiesByState(stateID models.Identity) ([]models.WeightedIdentity, error)
// IdentityByState returns the full Identity for specified HotStuff
// participant. The node must be a legitimate HotStuff participant with
// NON-ZERO WEIGHT at the specified state.
// ERROR conditions:
// - model.InvalidSignerError if participantID does NOT correspond to an
// authorized HotStuff participant at the specified state.
// - state.ErrUnknownSnapshotReference if the stateID is for an unknown state
IdentityByState(
stateID models.Identity,
participantID models.Identity,
) (models.WeightedIdentity, error)
}
// StateSignerDecoder defines how to convert the ParentSignerIndices field
// within a particular state header to the identifiers of the nodes which signed
// the state.
type StateSignerDecoder[StateT models.Unique] interface {
// DecodeSignerIDs decodes the signer indices from the given state header into
// full node IDs.
// Note: A state header contains a quorum certificate for its parent, which
// proves that the consensus committee has reached agreement on validity of
// parent state. Consequently, the returned IdentifierList contains the
// consensus participants that signed the parent state.
// Expected Error returns during normal operations:
// - consensus.InvalidSignerIndicesError if signer indices included in the
// header do not encode a valid subset of the consensus committee
DecodeSignerIDs(
state *models.State[StateT],
) ([]models.WeightedIdentity, error)
}

View File

@ -0,0 +1,453 @@
package consensus
import (
"time"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// ProposalViolationConsumer consumes outbound notifications about
// HotStuff-protocol violations. Such notifications are produced by the active
// consensus participants and consensus follower.
//
// Implementations must:
// - be concurrency safe
// - be non-blocking
// - handle repetition of the same events (with some processing overhead).
type ProposalViolationConsumer[
StateT models.Unique,
VoteT models.Unique,
] interface {
// OnInvalidStateDetected notifications are produced by components that have
// detected that a state proposal is invalid and need to report it. Most of
// the time such state can be detected by calling Validator.ValidateProposal.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking;
// and must handle repetition of the same events (with some processing
// overhead).
OnInvalidStateDetected(err *models.InvalidProposalError[StateT, VoteT])
// OnDoubleProposeDetected notifications are produced by the Finalization
// Logic whenever a double state proposal (equivocation) was detected.
// Equivocation occurs when the same leader proposes two different states for
// the same rank.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking;
// and must handle repetition of the same events (with some processing
// overhead).
OnDoubleProposeDetected(*models.State[StateT], *models.State[StateT])
}
// VoteAggregationViolationConsumer consumes outbound notifications about
// HotStuff-protocol violations specifically invalid votes during processing.
// Such notifications are produced by the Vote Aggregation logic.
//
// Implementations must:
// - be concurrency safe
// - be non-blocking
// - handle repetition of the same events (with some processing overhead).
type VoteAggregationViolationConsumer[
StateT models.Unique,
VoteT models.Unique,
] interface {
// OnDoubleVotingDetected notifications are produced by the Vote Aggregation
// logic whenever a double voting (same voter voting for different states at
// the same rank) was detected.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnDoubleVotingDetected(*VoteT, *VoteT)
// OnInvalidVoteDetected notifications are produced by the Vote Aggregation
// logic whenever an invalid vote was detected.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnInvalidVoteDetected(err models.InvalidVoteError[VoteT])
// OnVoteForInvalidStateDetected notifications are produced by the Vote
// Aggregation logic whenever vote for invalid proposal was detected.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnVoteForInvalidStateDetected(
vote *VoteT,
invalidProposal *models.SignedProposal[StateT, VoteT],
)
}
// TimeoutAggregationViolationConsumer consumes outbound notifications about
// Active Pacemaker violations specifically invalid timeouts during processing.
// Such notifications are produced by the Timeout Aggregation logic.
//
// Implementations must:
// - be concurrency safe
// - be non-blocking
// - handle repetition of the same events (with some processing overhead).
type TimeoutAggregationViolationConsumer[VoteT models.Unique] interface {
// OnDoubleTimeoutDetected notifications are produced by the Timeout
// Aggregation logic whenever a double timeout (same replica producing two
// different timeouts at the same rank) was detected.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnDoubleTimeoutDetected(
*models.TimeoutState[VoteT],
*models.TimeoutState[VoteT],
)
// OnInvalidTimeoutDetected notifications are produced by the Timeout
// Aggregation logic whenever an invalid timeout was detected.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnInvalidTimeoutDetected(err models.InvalidTimeoutError[VoteT])
}
// FinalizationConsumer consumes outbound notifications produced by the logic
// tracking forks and finalization. Such notifications are produced by the
// active consensus participants, and generally potentially relevant to the
// larger node. The notifications are emitted in the order in which the
// finalization algorithm makes the respective steps.
//
// Implementations must:
// - be concurrency safe
// - be non-blocking
// - handle repetition of the same events (with some processing overhead).
type FinalizationConsumer[StateT models.Unique] interface {
// OnStateIncorporated notifications are produced by the Finalization Logic
// whenever a state is incorporated into the consensus state.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnStateIncorporated(*models.State[StateT])
// OnFinalizedState notifications are produced by the Finalization Logic
// whenever a state has been finalized. They are emitted in the order the
// states are finalized.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnFinalizedState(*models.State[StateT])
}
// ParticipantConsumer consumes outbound notifications produced by consensus
// participants actively proposing states, voting, collecting & aggregating
// votes to QCs, and participating in the pacemaker (sending timeouts,
// collecting & aggregating timeouts to TCs).
// Implementations must:
// - be concurrency safe
// - be non-blocking
// - handle repetition of the same events (with some processing overhead).
type ParticipantConsumer[
StateT models.Unique,
VoteT models.Unique,
] interface {
// OnEventProcessed notifications are produced by the EventHandler when it is
// done processing and hands control back to the EventLoop to wait for the
// next event.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnEventProcessed()
// OnStart notifications are produced by the EventHandler when it starts
// blocks recovery and prepares for handling incoming events from EventLoop.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnStart(currentRank uint64)
// OnReceiveProposal notifications are produced by the EventHandler when it
// starts processing a state.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnReceiveProposal(
currentRank uint64,
proposal *models.SignedProposal[StateT, VoteT],
)
// OnReceiveQuorumCertificate notifications are produced by the EventHandler
// when it starts processing a QuorumCertificate [QC] constructed by the
// node's internal vote aggregator.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnReceiveQuorumCertificate(currentRank uint64, qc models.QuorumCertificate)
// OnReceiveTimeoutCertificate notifications are produced by the EventHandler
// when it starts processing a TimeoutCertificate [TC] constructed by the
// node's internal timeout aggregator.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnReceiveTimeoutCertificate(currentRank uint64, tc models.TimeoutCertificate)
// OnPartialTimeoutCertificate notifications are produced by the EventHandler
// when it starts processing partial TC constructed by local timeout
// aggregator.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnPartialTimeoutCertificate(
currentRank uint64,
partialTimeoutCertificate *PartialTimeoutCertificateCreated,
)
// OnLocalTimeout notifications are produced by the EventHandler when it
// reacts to expiry of round duration timer. Such a notification indicates
// that the Pacemaker's timeout was processed by the system.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnLocalTimeout(currentRank uint64)
// OnRankChange notifications are produced by Pacemaker when it transitions to
// a new rank based on processing a QC or TC. The arguments specify the
// oldRank (first argument), and the newRank to which the Pacemaker
// transitioned (second argument).
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnRankChange(oldRank, newRank uint64)
// OnQuorumCertificateTriggeredRankChange notifications are produced by
// Pacemaker when it moves to a new rank based on processing a QC. The
// arguments specify the qc (first argument), which triggered the rank change,
// and the newRank to which the Pacemaker transitioned (second argument).
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking;
// and must handle repetition of the same events (with some processing
// overhead).
OnQuorumCertificateTriggeredRankChange(
oldRank uint64,
newRank uint64,
qc models.QuorumCertificate,
)
// OnTimeoutCertificateTriggeredRankChange notifications are produced by
// Pacemaker when it moves to a new rank based on processing a TC. The
// arguments specify the tc (first argument), which triggered the rank change,
// and the newRank to which the Pacemaker transitioned (second argument).
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnTimeoutCertificateTriggeredRankChange(
oldRank uint64,
newRank uint64,
tc models.TimeoutCertificate,
)
// OnStartingTimeout notifications are produced by Pacemaker. Such a
// notification indicates that the Pacemaker is now waiting for the system to
// (receive and) process states or votes. The specific timeout type is
// contained in the TimerInfo.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnStartingTimeout(startTime, endTime time.Time)
// OnCurrentRankDetails notifications are produced by the EventHandler during
// the course of a rank with auxiliary information. These notifications are
// generally not produced for all ranks (for example skipped ranks). These
// notifications are guaranteed to be produced for all ranks we enter after
// fully processing a message.
// Example 1:
// - We are in rank 8. We process a QC with rank 10, causing us to enter
// rank 11.
// - Then this notification will be produced for rank 11.
// Example 2:
// - We are in rank 8. We process a proposal with rank 10, which contains a
// TC for rank 9 and TC.NewestQC for rank 8.
// - The QC would allow us to enter rank 9 and the TC would allow us to
// enter rank 10, so after fully processing the message we are in rank 10.
// - Then this notification will be produced for rank 10, but not rank 9
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnCurrentRankDetails(
currentRank, finalizedRank uint64,
currentLeader models.Identity,
)
}
// VoteCollectorConsumer consumes outbound notifications produced by HotStuff's
// vote aggregation component. These events are primarily intended for the
// HotStuff-internal state machine (EventHandler), but might also be relevant to
// the larger node in which HotStuff is running.
//
// Implementations must:
// - be concurrency safe
// - be non-blocking
// - handle repetition of the same events (with some processing overhead).
type VoteCollectorConsumer[VoteT models.Unique] interface {
// OnQuorumCertificateConstructedFromVotes notifications are produced by the
// VoteAggregator component, whenever it constructs a QC from votes.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnQuorumCertificateConstructedFromVotes(models.QuorumCertificate)
// OnVoteProcessed notifications are produced by the Vote Aggregation logic,
// each time we successfully ingest a valid vote.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnVoteProcessed(vote *VoteT)
}
// TimeoutCollectorConsumer consumes outbound notifications produced by
// HotStuff's timeout aggregation component. These events are primarily intended
// for the HotStuff-internal state machine (EventHandler), but might also be
// relevant to the larger node in which HotStuff is running.
//
// Caution: the events are not strictly ordered by increasing ranks! The
// notifications are emitted by concurrent processing logic. Over larger time
// scales, the emitted events are for statistically increasing ranks. However,
// on short time scales there are _no_ monotonicity guarantees w.r.t. the
// events' ranks.
//
// Implementations must:
// - be concurrency safe
// - be non-blocking
// - handle repetition of the same events (with some processing overhead).
type TimeoutCollectorConsumer[VoteT models.Unique] interface {
// OnTimeoutCertificateConstructedFromTimeouts notifications are produced by
// the TimeoutProcessor component, whenever it constructs a TC based on
// TimeoutStates from a supermajority of consensus participants.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnTimeoutCertificateConstructedFromTimeouts(
certificate models.TimeoutCertificate,
)
// OnPartialTimeoutCertificateCreated notifications are produced by the
// TimeoutProcessor component, whenever it collected TimeoutStates from a
// superminority of consensus participants for a specific rank. Along with the
// rank, it reports the newest QC and TC (for previous rank) discovered in
// process of timeout collection. Per convention, the newest QC is never nil,
// while the TC for the previous rank might be nil.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnPartialTimeoutCertificateCreated(
rank uint64,
newestQC models.QuorumCertificate,
lastRankTC models.TimeoutCertificate,
)
// OnNewQuorumCertificateDiscovered notifications are produced by the
// TimeoutCollector component, whenever it discovers new QC included in
// timeout state.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnNewQuorumCertificateDiscovered(certificate models.QuorumCertificate)
// OnNewTimeoutCertificateDiscovered notifications are produced by the
// TimeoutCollector component, whenever it discovers new TC included in
// timeout state.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnNewTimeoutCertificateDiscovered(certificate models.TimeoutCertificate)
// OnTimeoutProcessed notifications are produced by the Timeout Aggregation
// logic, each time we successfully ingest a valid timeout.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnTimeoutProcessed(timeout *models.TimeoutState[VoteT])
}
// CommunicatorConsumer consumes outbound notifications produced by HotStuff and
// it's components. Notifications allow the HotStuff core algorithm to
// communicate with the other actors of the consensus process.
// Implementations must:
// - be concurrency safe
// - be non-blocking
// - handle repetition of the same events (with some processing overhead).
type CommunicatorConsumer[StateT models.Unique, VoteT models.Unique] interface {
// OnOwnVote notifies about intent to send a vote for the given parameters to
// the specified recipient.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnOwnVote(vote *VoteT, recipientID models.Identity)
// OnOwnTimeout notifies about intent to broadcast the given timeout
// state to all actors of the consensus process.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking; and must handle
// repetition of the same events (with some processing overhead).
OnOwnTimeout(timeout *models.TimeoutState[VoteT])
// OnOwnProposal notifies about intent to broadcast the given state proposal
// to all actors of the consensus process. delay is to hold the proposal
// before broadcasting it. Useful to control the state production rate.
// Prerequisites:
// Implementation must be concurrency safe; Non-blocking;
// and must handle repetition of the same events (with some processing
// overhead).
OnOwnProposal(
proposal *models.SignedProposal[StateT, VoteT],
targetPublicationTime time.Time,
)
}
// FollowerConsumer consumes outbound notifications produced by consensus
// followers. It is a subset of the notifications produced by consensus
// participants.
// Implementations must:
// - be concurrency safe
// - be non-blocking
// - handle repetition of the same events (with some processing overhead).
type FollowerConsumer[StateT models.Unique, VoteT models.Unique] interface {
ProposalViolationConsumer[StateT, VoteT]
FinalizationConsumer[StateT]
}
// Consumer consumes outbound notifications produced by consensus participants.
// Notifications are consensus-internal state changes which are potentially
// relevant to the larger node in which HotStuff is running. The notifications
// are emitted in the order in which the HotStuff algorithm makes the respective
// steps.
//
// Implementations must:
// - be concurrency safe
// - be non-blocking
// - handle repetition of the same events (with some processing overhead).
type Consumer[StateT models.Unique, VoteT models.Unique] interface {
FollowerConsumer[StateT, VoteT]
CommunicatorConsumer[StateT, VoteT]
ParticipantConsumer[StateT, VoteT]
}
// VoteAggregationConsumer consumes outbound notifications produced by Vote
// Aggregation logic. It is a subset of the notifications produced by consensus
// participants.
// Implementations must:
// - be concurrency safe
// - be non-blocking
// - handle repetition of the same events (with some processing overhead).
type VoteAggregationConsumer[
StateT models.Unique,
VoteT models.Unique,
] interface {
VoteAggregationViolationConsumer[StateT, VoteT]
VoteCollectorConsumer[VoteT]
}
// TimeoutAggregationConsumer consumes outbound notifications produced by Vote
// Aggregation logic. It is a subset of the notifications produced by consensus
// participants.
// Implementations must:
// - be concurrency safe
// - be non-blocking
// - handle repetition of the same events (with some processing overhead).
type TimeoutAggregationConsumer[VoteT models.Unique] interface {
TimeoutAggregationViolationConsumer[VoteT]
TimeoutCollectorConsumer[VoteT]
}

View File

@ -0,0 +1,84 @@
package consensus
import (
"context"
"time"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
"source.quilibrium.com/quilibrium/monorepo/lifecycle"
)
// PartialTimeoutCertificateCreated represents a notification emitted by the
// TimeoutProcessor component, whenever it has collected TimeoutStates from a
// superminority of consensus participants for a specific rank. Along with the
// rank, it reports the newest QuorumCertificate and TimeoutCertificate (for
// previous rank) discovered during timeout collection. Per convention, the
// newest QuorumCertificate is never nil, while the TimeoutCertificate for the
// previous rank might be nil.
type PartialTimeoutCertificateCreated struct {
Rank uint64
NewestQuorumCertificate models.QuorumCertificate
PriorRankTimeoutCertificate models.TimeoutCertificate
}
// EventHandler runs a state machine to process proposals, QuorumCertificate and
// local timeouts. Not concurrency safe.
type EventHandler[StateT models.Unique, VoteT models.Unique] interface {
// OnReceiveQuorumCertificate processes a valid quorumCertificate constructed
// by internal vote aggregator or discovered in TimeoutState. All inputs
// should be validated before feeding into this function. Assuming trusted
// data. No errors are expected during normal operation.
OnReceiveQuorumCertificate(quorumCertificate models.QuorumCertificate) error
// OnReceiveTimeoutCertificate processes a valid timeoutCertificate
// constructed by internal timeout aggregator, discovered in TimeoutState or
// broadcast over the network. All inputs should be validated before feeding
// into this function. Assuming trusted data. No errors are expected during
// normal operation.
OnReceiveTimeoutCertificate(
timeoutCertificate models.TimeoutCertificate,
) error
// OnReceiveProposal processes a state proposal received from another HotStuff
// consensus participant. All inputs should be validated before feeding into
// this function. Assuming trusted data. No errors are expected during normal
// operation.
OnReceiveProposal(proposal *models.SignedProposal[StateT, VoteT]) error
// OnLocalTimeout handles a local timeout event by creating a
// models.TimeoutState and broadcasting it. No errors are expected during
// normal operation.
OnLocalTimeout() error
// OnPartialTimeoutCertificateCreated handles notification produces by the
// internal timeout aggregator. If the notification is for the current rank,
// a corresponding models.TimeoutState is broadcast to the consensus
// committee. No errors are expected during normal operation.
OnPartialTimeoutCertificateCreated(
partialTimeoutCertificate *PartialTimeoutCertificateCreated,
) error
// TimeoutChannel returns a channel that sends a signal on timeout.
TimeoutChannel() <-chan time.Time
// Start starts the event handler. No errors are expected during normal
// operation.
// CAUTION: EventHandler is not concurrency safe. The Start method must be
// executed by the same goroutine that also calls the other business logic
// methods, or concurrency safety has to be implemented externally.
Start(ctx context.Context) error
}
// EventLoop performs buffer and processing of incoming proposals and QCs.
type EventLoop[StateT models.Unique, VoteT models.Unique] interface {
lifecycle.Component
TimeoutCollectorConsumer[VoteT]
VoteCollectorConsumer[VoteT]
SubmitProposal(proposal *models.SignedProposal[StateT, VoteT])
}
// FollowerLoop only follows certified states, does not actively process the
// collection of proposals and QC/TCs.
type FollowerLoop[StateT models.Unique, VoteT models.Unique] interface {
AddCertifiedState(certifiedState *models.CertifiedState[StateT])
}

View File

@ -0,0 +1,23 @@
package consensus
import "source.quilibrium.com/quilibrium/monorepo/consensus/models"
// Finalizer is used by the consensus algorithm to inform other components for
// (such as the protocol state) about finalization of states.
//
// Since we have two different protocol states: one for the main consensus,
// the other for the collection cluster consensus, the Finalizer interface
// allows the two different protocol states to provide different implementations
// for updating its state when a state has been finalized.
//
// Updating the protocol state should always succeed when the data is
// consistent. However, in case the protocol state is corrupted, error should be
// returned and the consensus algorithm should halt. So the error returned from
// MakeFinal is for the protocol state to report exceptions.
type Finalizer interface {
// MakeFinal will declare a state and all of its ancestors as finalized, which
// makes it an immutable part of the time reel. Returning an error indicates
// some fatal condition and will cause the finalization logic to terminate.
MakeFinal(stateID models.Identity) error
}

View File

@ -0,0 +1,106 @@
package consensus
import "source.quilibrium.com/quilibrium/monorepo/consensus/models"
// FinalityProof represents a finality proof for a State. By convention, a
// FinalityProof is immutable. Finality in Jolteon/HotStuff is determined by the
// 2-chain rule:
//
// There exists a _certified_ state C, such that State.Rank + 1 = C.Rank
type FinalityProof[StateT models.Unique] struct {
State *models.State[StateT]
CertifiedChild *models.CertifiedState[StateT]
}
// Forks maintains an in-memory data-structure of all states whose rank-number
// is larger or equal to the latest finalized state. The latest finalized state
// is defined as the finalized state with the largest rank number. When adding
// states, Forks automatically updates its internal state (including finalized
// states). Furthermore, states whose rank number is smaller than the latest
// finalized state are pruned automatically.
//
// PREREQUISITES:
// Forks expects that only states are added that can be connected to its latest
// finalized state (without missing interim ancestors). If this condition is
// violated, Forks will raise an error and ignore the state.
type Forks[StateT models.Unique] interface {
// GetStatesForRank returns all known states for the given rank
GetStatesForRank(rank uint64) []*models.State[StateT]
// GetState returns (*models.State[StateT], true) if the state with the
// specified id was found and (nil, false) otherwise.
GetState(stateID models.Identity) (*models.State[StateT], bool)
// FinalizedRank returns the largest rank number where a finalized state is
// known
FinalizedRank() uint64
// FinalizedState returns the finalized state with the largest rank number
FinalizedState() *models.State[StateT]
// FinalityProof returns the latest finalized state and a certified child from
// the subsequent rank, which proves finality.
// CAUTION: method returns (nil, false), when Forks has not yet finalized any
// states beyond the finalized root state it was initialized with.
FinalityProof() (*FinalityProof[StateT], bool)
// AddValidatedState appends the validated state to the tree of pending
// states and updates the latest finalized state (if applicable). Unless the
// parent is below the pruning threshold (latest finalized rank), we require
// that the parent is already stored in Forks. Calling this method with
// previously processed states leaves the consensus state invariant (though,
// it will potentially cause some duplicate processing).
// Notes:
// - Method `AddCertifiedState(..)` should be used preferably, if a QC
// certifying `state` is already known. This is generally the case for the
// consensus follower.
// - Method `AddValidatedState` is intended for active consensus
// participants, which fully validate states (incl. payload), i.e. QCs are
// processed as part of validated proposals.
//
// Possible error returns:
// - model.MissingStateError if the parent does not exist in the forest (but
// is above the pruned rank). From the perspective of Forks, this error is
// benign (no-op).
// - model.InvalidStateError if the state is invalid (see
// `Forks.EnsureStateIsValidExtension` for details). From the perspective
// of Forks, this error is benign (no-op). However, we assume all states
// are fully verified, i.e. they should satisfy all consistency
// requirements. Hence, this error is likely an indicator of a bug in the
// compliance layer.
// - model.ByzantineThresholdExceededError if conflicting QCs or conflicting
// finalized states have been detected (violating a foundational consensus
// guarantees). This indicates that there are 1/3+ Byzantine nodes
// (weighted by seniority) in the network, breaking the safety guarantees
// of HotStuff (or there is a critical bug / data corruption). Forks
// cannot recover from this exception.
// - All other errors are potential symptoms of bugs or state corruption.
AddValidatedState(proposal *models.State[StateT]) error
// AddCertifiedState appends the given certified state to the tree of pending
// states and updates the latest finalized state (if finalization progressed).
// Unless the parent is below the pruning threshold (latest finalized rank),
// we require that the parent is already stored in Forks. Calling this method
// with previously processed states leaves the consensus state invariant
// (though, it will potentially cause some duplicate processing).
//
// Possible error returns:
// - model.MissingStateError if the parent does not exist in the forest (but
// is above the pruned rank). From the perspective of Forks, this error is
// benign (no-op).
// - model.InvalidStateError if the state is invalid (see
// `Forks.EnsureStateIsValidExtension` for details). From the perspective
// of Forks, this error is benign (no-op). However, we assume all states
// are fully verified, i.e. they should satisfy all consistency
// requirements. Hence, this error is likely an indicator of a bug in the
// compliance layer.
// - model.ByzantineThresholdExceededError if conflicting QCs or conflicting
// finalized states have been detected (violating a foundational consensus
// guarantees). This indicates that there are 1/3+ Byzantine nodes
// (weighted by seniority) in the network, breaking the safety guarantees
// of HotStuff (or there is a critical bug / data corruption). Forks
// cannot recover from this exception.
// - All other errors are potential symptoms of bugs or state corruption.
AddCertifiedState(certifiedState *models.CertifiedState[StateT]) error
}

View File

@ -0,0 +1,30 @@
package consensus
import (
"context"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// LeaderProvider handles leader selection. State is provided, if relevant to
// the upstream consensus engine.
type LeaderProvider[
StateT models.Unique,
PeerIDT models.Unique,
CollectedT models.Unique,
] interface {
// GetNextLeaders returns a list of node indices, in priority order. Note that
// it is assumed that if no error is returned, GetNextLeaders should produce
// a non-empty list. If a list of size smaller than minimumProvers is
// provided, the liveness check will loop until the list is greater than that.
GetNextLeaders(ctx context.Context, prior *StateT) ([]PeerIDT, error)
// ProveNextState prepares a non-finalized new state from the prior, to be
// proposed and voted upon. Provided context may be canceled, should be used
// to halt long-running prover operations.
ProveNextState(
ctx context.Context,
rank uint64,
filter []byte,
priorState models.Identity,
) (*StateT, error)
}

View File

@ -0,0 +1,25 @@
package consensus
import (
"context"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// LivenessProvider handles liveness announcements ahead of proving, to
// pre-emptively choose the next prover. In expected leader scenarios, this
// enables a peer to determine if an honest next prover is offline, so that it
// can publish the next state without waiting.
type LivenessProvider[
StateT models.Unique,
PeerIDT models.Unique,
CollectedT models.Unique,
] interface {
// Collect returns the collected mutation operations ahead of liveness
// announcements.
Collect(ctx context.Context) (CollectedT, error)
// SendLiveness announces liveness ahead of the next prover deterimination and
// subsequent proving. Provides prior state and collected mutation operations
// if relevant.
SendLiveness(ctx context.Context, prior *StateT, collected CollectedT) error
}

View File

@ -0,0 +1,65 @@
package consensus
import (
"context"
"time"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// Pacemaker defines a standard set of methods for handling pacemaker behaviors
// in the consensus engine.
type Pacemaker interface {
ProposalDurationProvider
// CurrentRank returns the current rank
CurrentRank() uint64
// LatestQuorumCertificate returns the latest quorum certificate seen.
LatestQuorumCertificate() models.QuorumCertificate
// PriorRankTimeoutCertificate returns the prior rank's timeout certificate,
// if it exists.
PriorRankTimeoutCertificate() models.TimeoutCertificate
// ReceiveQuorumCertificate handles an incoming quorum certificate, advancing
// to a new rank if applicable.
ReceiveQuorumCertificate(
quorumCertificate models.QuorumCertificate,
) (*models.NextRank, error)
// ReceiveTimeoutCertificate handles an incoming timeout certificate,
// advancing to a new rank if applicable.
ReceiveTimeoutCertificate(
timeoutCertificate models.TimeoutCertificate,
) (*models.NextRank, error)
// TimeoutCh provides a channel for timing out on the current rank.
TimeoutCh() <-chan time.Time
// Start starts the pacemaker, takes a cancellable context.
Start(ctx context.Context)
}
// ProposalDurationProvider generates the target publication time for state
// proposals.
type ProposalDurationProvider interface {
// TargetPublicationTime is intended to be called by the EventHandler,
// whenever it wants to publish a new proposal. The event handler inputs
// - proposalRank: the rank it is proposing for,
// - timeRankEntered: the time when the EventHandler entered this rank
// - parentStateId: the ID of the parent state, which the EventHandler is
// building on
// TargetPublicationTime returns the time stamp when the new proposal should
// be broadcasted. For a given rank where we are the primary, suppose the
// actual time we are done building our proposal is P:
// - if P < TargetPublicationTime(..), then the EventHandler should wait
// until `TargetPublicationTime` to broadcast the proposal
// - if P >= TargetPublicationTime(..), then the EventHandler should
// immediately broadcast the proposal
//
// Note: Technically, our metrics capture the publication delay relative to
// this function's _latest_ call. Currently, the EventHandler is the only
// caller of this function, and only calls it once.
//
// Concurrency safe.
TargetPublicationTime(
proposalRank uint64,
timeRankEntered time.Time,
parentStateId models.Identity,
) time.Time
}

View File

@ -0,0 +1,25 @@
package consensus
import "source.quilibrium.com/quilibrium/monorepo/consensus/models"
// StateProducer is responsible for producing new state proposals. It is a
// service component to HotStuff's main state machine (implemented in the
// EventHandler). The StateProducer's central purpose is to mediate concurrent
// signing requests to its embedded `hotstuff.SafetyRules` during state
// production. The actual work of producing a state proposal is delegated to the
// embedded `consensus.LeaderProvider`.
type StateProducer[StateT models.Unique, VoteT models.Unique] interface {
// MakeStateProposal builds a new HotStuff state proposal using the given
// rank, the given quorum certificate for its parent and [optionally] a
// timeout certificate for last rank (could be nil).
// Error Returns:
// - model.NoVoteError if it is not safe for us to vote (our proposal
// includes our vote) for this rank. This can happen if we have already
// proposed or timed out this rank.
// - generic error in case of unexpected failure
MakeStateProposal(
rank uint64,
qc models.QuorumCertificate,
lastRankTC models.TimeoutCertificate,
) (*models.SignedProposal[StateT, VoteT], error)
}

View File

@ -0,0 +1,73 @@
package consensus
import "source.quilibrium.com/quilibrium/monorepo/consensus/models"
// SafetyRules enforces all consensus rules that guarantee safety. It produces
// votes for the given states or TimeoutState for the given ranks, only if all
// safety rules are satisfied. In particular, SafetyRules guarantees a
// foundational security theorem for HotStuff, which we utilize also outside of
// consensus (e.g. queuing pending states for execution, verification, sealing
// etc):
//
// THEOREM: For each rank, there can be at most 1 certified state.
//
// Implementations are generally *not* concurrency safe.
type SafetyRules[StateT models.Unique, VoteT models.Unique] interface {
// ProduceVote takes a state proposal and current rank, and decides whether to
// vote for the state. Voting is deterministic, i.e. voting for same proposal
// will always result in the same vote.
// Returns:
// * (vote, nil): On the _first_ state for the current rank that is safe to
// vote for. Subsequently, voter does _not_ vote for any _other_ state with
// the same (or lower) rank. SafetyRules internally caches and persists its
// latest vote. As long as the SafetyRules' internal state remains
// unchanged, ProduceVote will return its cached for identical inputs.
// * (nil, model.NoVoteError): If the safety module decides that it is not
// safe to vote for the given state. This is a sentinel error and
// _expected_ during normal operation.
// All other errors are unexpected and potential symptoms of uncovered edge
// cases or corrupted internal state (fatal).
ProduceVote(
proposal *models.SignedProposal[StateT, VoteT],
curRank uint64,
) (*VoteT, error)
// ProduceTimeout takes current rank, highest locally known QC and TC
// (optional, must be nil if and only if QC is for previous rank) and decides
// whether to produce timeout for current rank.
// Returns:
// * (timeout, nil): It is safe to timeout for current rank using newestQC
// and lastRankTC.
// * (nil, model.NoTimeoutError): If replica is not part of the authorized
// consensus committee (anymore) and therefore is not authorized to produce
// a valid timeout state. This sentinel error is _expected_ during normal
// operation, e.g. during the grace-period after Rank switchover or after
// the replica self-ejected.
// All other errors are unexpected and potential symptoms of uncovered edge
// cases or corrupted internal state (fatal).
ProduceTimeout(
curRank uint64,
newestQC models.QuorumCertificate,
lastRankTC models.TimeoutCertificate,
) (*models.TimeoutState[VoteT], error)
// SignOwnProposal takes an unsigned state proposal and produces a vote for
// it. Vote is a cryptographic commitment to the proposal. By adding the vote
// to an unsigned proposal, the caller constructs a signed state proposal.
// This method has to be used only by the leader, which must be the proposer
// of the state (or an exception is returned).
// Implementors must guarantee that:
// - vote on the proposal satisfies safety rules
// - maximum one proposal is signed per rank
// Returns:
// * (vote, nil): the passed unsigned proposal is a valid one, and it's safe
// to make a proposal. Subsequently, leader does _not_ produce any _other_
// proposal with the same (or lower) rank.
// * (nil, model.NoVoteError): according to HotStuff's Safety Rules, it is
// not safe to sign the given proposal. This could happen because we have
// already proposed or timed out for the given rank. This is a sentinel
// error and _expected_ during normal operation.
// All other errors are unexpected and potential symptoms of uncovered edge
// cases or corrupted internal state (fatal).
SignOwnProposal(unsignedProposal *models.Proposal[StateT]) (*VoteT, error)
}

View File

@ -0,0 +1,161 @@
package consensus
import (
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// WeightedSignatureAggregator aggregates signatures of the same signature
// scheme and the same message from different signers. The public keys and
// message are agreed upon upfront. It is also recommended to only aggregate
// signatures generated with keys representing equivalent security-bit level.
// Furthermore, a weight [unsigned int64] is assigned to each signer ID. The
// WeightedSignatureAggregator internally tracks the total weight of all
// collected signatures. Implementations must be concurrency safe.
type WeightedSignatureAggregator interface {
// Verify verifies the signature under the stored public keys and message.
// Expected errors during normal operations:
// - model.InvalidSignerError if signerID is invalid (not a consensus
// participant)
// - model.ErrInvalidSignature if signerID is valid but signature is
// cryptographically invalid
Verify(signerID models.Identity, sig []byte) error
// TrustedAdd adds a signature to the internal set of signatures and adds the
// signer's weight to the total collected weight, iff the signature is _not_ a
// duplicate. The total weight of all collected signatures (excluding
// duplicates) is returned regardless of any returned error.
// Expected errors during normal operations:
// - model.InvalidSignerError if signerID is invalid (not a consensus
// participant)
// - model.DuplicatedSignerError if the signer has been already added
TrustedAdd(signerID models.Identity, sig []byte) (
totalWeight uint64,
exception error,
)
// TotalWeight returns the total weight presented by the collected signatures.
TotalWeight() uint64
// Aggregate aggregates the signatures and returns the aggregated consensus.
// The function performs a final verification and errors if the aggregated
// signature is invalid. This is required for the function safety since
// `TrustedAdd` allows adding invalid signatures.
// The function errors with:
// - model.InsufficientSignaturesError if no signatures have been added yet
// - model.InvalidSignatureIncludedError if:
// -- some signature(s), included via TrustedAdd, fail to deserialize
// (regardless of the aggregated public key)
// -- or all signatures deserialize correctly but some signature(s),
// included via TrustedAdd, are invalid (while aggregated public key is
// valid)
// - model.InvalidAggregatedKeyError if all signatures deserialize correctly
// but the signer's proving public keys sum up to an invalid key (BLS
// identity public key). Any aggregated signature would fail the
// cryptographic verification under the identity public key and therefore
// such signature is considered invalid. Such scenario can only happen if
// proving public keys of signers were forged to add up to the identity
// public key. Under the assumption that all proving key PoPs are valid,
// this error case can only happen if all signers are malicious and
// colluding. If there is at least one honest signer, there is a
// negligible probability that the aggregated key is identity.
//
// The function is thread-safe.
Aggregate() ([]models.WeightedIdentity, models.AggregatedSignature, error)
}
// TimeoutSignatureAggregator aggregates timeout signatures for one particular
// rank. When instantiating a TimeoutSignatureAggregator, the following
// information is supplied:
// - The rank for which the aggregator collects timeouts.
// - For each replicas that is authorized to send a timeout at this particular
// rank: the node ID, public proving keys, and weight
//
// Timeouts for other ranks or from non-authorized replicas are rejected.
// In their TimeoutStates, replicas include a signature over the pair (rank,
// newestQCRank), where `rank` is the rank number the timeout is for and
// `newestQCRank` is the rank of the newest QC known to the replica.
// TimeoutSignatureAggregator collects these signatures, internally tracks the
// total weight of all collected signatures. Note that in general the signed
// messages are different, which makes the aggregation a comparatively expensive
// operation. Upon calling `Aggregate`, the TimeoutSignatureAggregator
// aggregates all valid signatures collected up to this point. The aggregate
// signature is guaranteed to be correct, as only valid signatures are accepted
// as inputs.
// TimeoutSignatureAggregator internally tracks the total weight of all
// collected signatures. Implementations must be concurrency safe.
type TimeoutSignatureAggregator interface {
// VerifyAndAdd verifies the signature under the stored public keys and adds
// the signature and the corresponding highest QC to the internal set.
// Internal set and collected weight is modified iff signature _is_ valid.
// The total weight of all collected signatures (excluding duplicates) is
// returned regardless of any returned error.
// Expected errors during normal operations:
// - model.InvalidSignerError if signerID is invalid (not a consensus
// participant)
// - model.DuplicatedSignerError if the signer has been already added
// - model.ErrInvalidSignature if signerID is valid but signature is
// cryptographically invalid
VerifyAndAdd(
signerID models.Identity,
sig []byte,
newestQCRank uint64,
) (totalWeight uint64, exception error)
// TotalWeight returns the total weight presented by the collected signatures.
TotalWeight() uint64
// Rank returns the rank that this instance is aggregating signatures for.
Rank() uint64
// Aggregate aggregates the signatures and returns with additional data.
// Aggregated signature will be returned as SigData of timeout certificate.
// Caller can be sure that resulting signature is valid.
// Expected errors during normal operations:
// - model.InsufficientSignaturesError if no signatures have been added yet
Aggregate() (
signersInfo []TimeoutSignerInfo,
aggregatedSig models.AggregatedSignature,
exception error,
)
}
// TimeoutSignerInfo is a helper structure that stores the QC ranks that each
// signer contributed to a TC. Used as result of
// TimeoutSignatureAggregator.Aggregate()
type TimeoutSignerInfo struct {
NewestQCRank uint64
Signer models.Identity
}
// StateSignatureData is an intermediate struct for Packer to pack the
// aggregated signature data into raw bytes or unpack from raw bytes.
type StateSignatureData struct {
Signers []models.WeightedIdentity
Signature []byte
}
// Packer packs aggregated signature data into raw bytes to be used in state
// header.
type Packer interface {
// Pack serializes the provided StateSignatureData into a precursor format of
// a QC. rank is the rank of the state that the aggregated signature is for.
// sig is the aggregated signature data.
// Expected error returns during normal operations:
// * none; all errors are symptoms of inconsistent input data or corrupted
// internal state.
Pack(rank uint64, sig *StateSignatureData) (
signerIndices []byte,
sigData []byte,
err error,
)
// Unpack de-serializes the provided signature data.
// sig is the aggregated signature data
// It returns:
// - (sigData, nil) if successfully unpacked the signature data
// - (nil, model.InvalidFormatError) if failed to unpack the signature data
Unpack(signerIdentities []models.WeightedIdentity, sigData []byte) (
*StateSignatureData,
error,
)
}

View File

@ -0,0 +1,39 @@
package consensus
import (
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// Signer is responsible for creating votes, proposals for a given state.
type Signer[StateT models.Unique, VoteT models.Unique] interface {
// CreateVote creates a vote for the given state. No error returns are
// expected during normal operations (incl. presence of byz. actors).
CreateVote(state *models.State[StateT]) (*VoteT, error)
// CreateTimeout creates a timeout for given rank. No errors return are
// expected during normal operations(incl presence of byz. actors).
CreateTimeout(
curRank uint64,
newestQC models.QuorumCertificate,
previousRankTimeoutCert models.TimeoutCertificate,
) (*models.TimeoutState[VoteT], error)
}
type SignatureAggregator interface {
VerifySignatureMultiMessage(
publicKeys [][]byte,
signature []byte,
messages [][]byte,
context []byte,
) bool
VerifySignatureRaw(
publicKey []byte,
signature []byte,
message []byte,
context []byte,
) bool
Aggregate(
publicKeys [][]byte,
signatures [][]byte,
) (models.AggregatedSignature, error)
}

View File

@ -0,0 +1,18 @@
package consensus
import "source.quilibrium.com/quilibrium/monorepo/consensus/models"
// ConsensusStore defines the methods required for internal state that should
// persist between restarts of the consensus engine.
type ConsensusStore[VoteT models.Unique] interface {
ReadOnlyConsensusStore[VoteT]
PutConsensusState(state *models.ConsensusState[VoteT]) error
PutLivenessState(state *models.LivenessState) error
}
// ReadOnlyConsensusStore defines the methods required for reading internal
// state persisted between restarts of the consensus engine.
type ReadOnlyConsensusStore[VoteT models.Unique] interface {
GetConsensusState(filter []byte) (*models.ConsensusState[VoteT], error)
GetLivenessState(filter []byte) (*models.LivenessState, error)
}

View File

@ -0,0 +1,20 @@
package consensus
import (
"context"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// SyncProvider handles synchronization management
type SyncProvider[StateT models.Unique] interface {
// Performs synchronization to set internal state. Note that it is assumed
// that errors are transient and synchronization should be reattempted on
// failure. If some other process for synchronization is used and this should
// be bypassed, send nil on the error channel. Provided context may be
// canceled, should be used to halt long-running sync operations.
Synchronize(
ctx context.Context,
existing *StateT,
) (<-chan *StateT, <-chan error)
}

View File

@ -0,0 +1,127 @@
package consensus
import (
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
"source.quilibrium.com/quilibrium/monorepo/lifecycle"
)
// TimeoutAggregator verifies and aggregates timeout states to build timeout
// certificates [TCs]. When enough timeout states are collected, it builds a TC
// and sends it to the EventLoop TimeoutAggregator also detects protocol
// violation, including invalid timeouts, double timeout, etc and notifies a
// HotStuff consumer for slashing.
type TimeoutAggregator[VoteT models.Unique] interface {
lifecycle.Component
// AddTimeout verifies and aggregates a timeout state.
// This method can be called concurrently, timeouts will be queued and
// processed asynchronously.
AddTimeout(timeoutState *models.TimeoutState[VoteT])
// PruneUpToRank deletes all `TimeoutCollector`s _below_ to the given rank, as
// well as related indices. We only retain and process `TimeoutCollector`s,
// whose rank is equal or larger than `lowestRetainedRank`. If
// `lowestRetainedRank` is smaller than the previous value, the previous value
// is kept and the method call is a NoOp. This value should be set to the
// latest active rank maintained by `Pacemaker`.
PruneUpToRank(lowestRetainedRank uint64)
}
// TimeoutCollector collects all timeout states for a specified rank. On the
// happy path, it generates a TimeoutCertificate when enough timeouts have been
// collected. The TimeoutCollector is a higher-level structure that orchestrates
// deduplication, caching and processing of timeouts, delegating those tasks to
// underlying modules (such as TimeoutProcessor). Implementations of
// TimeoutCollector must be concurrency safe.
type TimeoutCollector[VoteT models.Unique] interface {
// AddTimeout adds a Timeout State to the collector. When TSs from
// strictly more than 1/3 of consensus participants (measured by weight) were
// collected, the callback for partial TC will be triggered. After collecting
// TSs from a supermajority, a TC will be created and passed to the EventLoop.
// Expected error returns during normal operations:
// * timeoutcollector.ErrTimeoutForIncompatibleRank - submitted timeout for
// incompatible rank
// All other exceptions are symptoms of potential state corruption.
AddTimeout(timeoutState *models.TimeoutState[VoteT]) error
// Rank returns the rank that this instance is collecting timeouts for.
// This method is useful when adding the newly created timeout collector to
// timeout collectors map.
Rank() uint64
}
// TimeoutProcessor ingests Timeout States for a particular rank. It
// implements the algorithms for validating TSs, orchestrates their low-level
// aggregation and emits `OnPartialTimeoutCertificateCreated` and `OnTimeoutCertificateConstructedFromTimeouts`
// notifications. TimeoutProcessor cannot deduplicate TSs (this should be
// handled by the higher-level TimeoutCollector) and errors instead. Depending
// on their implementation, a TimeoutProcessor might drop timeouts or attempt to
// construct a TC.
type TimeoutProcessor[VoteT models.Unique] interface {
// Process performs processing of single timeout state. This function is safe
// to call from multiple goroutines. Expected error returns during normal
// operations:
// * timeoutcollector.ErrTimeoutForIncompatibleRank - submitted timeout for
// incompatible rank
// * models.InvalidTimeoutError - submitted invalid timeout(invalid structure
// or invalid signature)
// * models.DuplicatedSignerError if a timeout from the same signer was
// previously already added. It does _not necessarily_ imply that the
// timeout is invalid or the sender is equivocating.
// All other errors should be treated as exceptions.
Process(timeout *models.TimeoutState[VoteT]) error
}
// TimeoutCollectorFactory performs creation of TimeoutCollector for a given
// rank
type TimeoutCollectorFactory[VoteT models.Unique] interface {
// Create is a factory method to generate a TimeoutCollector for a given rank
// Expected error returns during normal operations:
// * models.ErrRankUnknown no rank containing the given rank is known
// All other errors should be treated as exceptions.
Create(rank uint64) (TimeoutCollector[VoteT], error)
}
// TimeoutProcessorFactory performs creation of TimeoutProcessor for a given
// rank
type TimeoutProcessorFactory[VoteT models.Unique] interface {
// Create is a factory method to generate a TimeoutProcessor for a given rank
// Expected error returns during normal operations:
// * models.ErrRankUnknown no rank containing the given rank is known
// All other errors should be treated as exceptions.
Create(rank uint64) (TimeoutProcessor[VoteT], error)
}
// TimeoutCollectors encapsulates the functionality to generate, store and prune
// `TimeoutCollector` instances (one per rank). Its main purpose is to provide a
// higher-level API to `TimeoutAggregator` for managing and interacting with the
// rank-specific `TimeoutCollector` instances. Implementations are concurrency
// safe.
type TimeoutCollectors[VoteT models.Unique] interface {
// GetOrCreateCollector retrieves the TimeoutCollector for the specified
// rank or creates one if none exists. When creating a timeout collector,
// the rank is used to query the consensus committee for the respective
// Rank the rank belongs to.
// It returns:
// - (collector, true, nil) if no collector can be found by the rank, and a
// new collector was created.
// - (collector, false, nil) if the collector can be found by the rank.
// - (nil, false, error) if running into any exception creating the timeout
// collector.
// Expected error returns during normal operations:
// * models.BelowPrunedThresholdError if rank is below the pruning threshold
// * models.ErrRankUnknown if rank is not yet pruned but no rank containing
// the given rank is known
GetOrCreateCollector(rank uint64) (
collector TimeoutCollector[VoteT],
created bool,
err error,
)
// PruneUpToRank prunes the timeout collectors with ranks _below_ the given
// value, i.e. we only retain and process timeout collectors, whose ranks are
// equal or larger than `lowestRetainedRank`. If `lowestRetainedRank` is
// smaller than the previous value, the previous value is kept and the method
// call is a NoOp.
PruneUpToRank(lowestRetainedRank uint64)
}

View File

@ -0,0 +1,102 @@
package consensus
import (
"encoding/hex"
"time"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// TraceLogger defines a simple tracing interface
type TraceLogger interface {
Trace(message string, params ...LogParam)
Error(message string, err error, params ...LogParam)
With(params ...LogParam) TraceLogger
}
type LogParam struct {
key string
value any
kind string
}
func StringParam(key string, value string) LogParam {
return LogParam{
key: key,
value: value,
kind: "string",
}
}
func Uint64Param(key string, value uint64) LogParam {
return LogParam{
key: key,
value: value,
kind: "uint64",
}
}
func Uint32Param(key string, value uint32) LogParam {
return LogParam{
key: key,
value: value,
kind: "uint32",
}
}
func Int64Param(key string, value int64) LogParam {
return LogParam{
key: key,
value: value,
kind: "int64",
}
}
func Int32Param(key string, value int32) LogParam {
return LogParam{
key: key,
value: value,
kind: "int32",
}
}
func IdentityParam(key string, value models.Identity) LogParam {
return LogParam{
key: key,
value: hex.EncodeToString([]byte(value)),
kind: "string",
}
}
func HexParam(key string, value []byte) LogParam {
return LogParam{
key: key,
value: hex.EncodeToString(value),
kind: "string",
}
}
func TimeParam(key string, value time.Time) LogParam {
return LogParam{
key: key,
value: value,
kind: "time",
}
}
func (l LogParam) GetKey() string {
return l.key
}
func (l LogParam) GetValue() any {
return l.value
}
func (l LogParam) GetKind() string {
return l.kind
}
type nilTracer struct{}
func (nilTracer) Trace(message string) {}
func (nilTracer) Error(message string, err error) {}

View File

@ -0,0 +1,32 @@
package consensus
import (
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// Validator provides functions to validate QuorumCertificate, proposals and
// votes.
type Validator[StateT models.Unique, VoteT models.Unique] interface {
// ValidateQuorumCertificate checks the validity of a QuorumCertificate.
// During normal operations, the following error returns are expected:
// * models.InvalidQuorumCertificateError if the QuorumCertificate is invalid
ValidateQuorumCertificate(qc models.QuorumCertificate) error
// ValidateTimeoutCertificate checks the validity of a TimeoutCertificate.
// During normal operations, the following error returns are expected:
// * models.InvalidTimeoutCertificateError if the TimeoutCertificate is
// invalid
ValidateTimeoutCertificate(tc models.TimeoutCertificate) error
// ValidateProposal checks the validity of a proposal.
// During normal operations, the following error returns are expected:
// * models.InvalidProposalError if the state is invalid
ValidateProposal(proposal *models.SignedProposal[StateT, VoteT]) error
// ValidateVote checks the validity of a vote.
// Returns the full entity for the voter. During normal operations,
// the following errors are expected:
// * models.InvalidVoteError for invalid votes
ValidateVote(vote *VoteT) (*models.WeightedIdentity, error)
}

View File

@ -0,0 +1,45 @@
package consensus
import "source.quilibrium.com/quilibrium/monorepo/consensus/models"
// Verifier is the component responsible for the cryptographic integrity of
// votes, proposals and QC's against the state they are signing.
type Verifier[VoteT models.Unique] interface {
// VerifyVote checks the cryptographic validity of a vote's `SigData` w.r.t.
// the rank and stateID. It is the responsibility of the calling code to
// ensure that `voter` is authorized to vote.
// Return values:
// * nil if `sigData` is cryptographically valid
// * models.InvalidFormatError if the signature has an incompatible format.
// * models.ErrInvalidSignature is the signature is invalid
// * unexpected errors should be treated as symptoms of bugs or uncovered
// edge cases in the logic (i.e. as fatal)
VerifyVote(vote *VoteT) error
// VerifyQC checks the cryptographic validity of a QC's `SigData` w.r.t. the
// given rank and stateID. It is the responsibility of the calling code to
// ensure that all `signers` are authorized, without duplicates.
// Return values:
// * nil if `sigData` is cryptographically valid
// * models.InvalidFormatError if `sigData` has an incompatible format
// * models.InsufficientSignaturesError if `signers is empty.
// Depending on the order of checks in the higher-level logic this error
// might be an indicator of a external byzantine input or an internal bug.
// * models.ErrInvalidSignature if a signature is invalid
// * unexpected errors should be treated as symptoms of bugs or uncovered
// edge cases in the logic (i.e. as fatal)
VerifyQuorumCertificate(quorumCertificate models.QuorumCertificate) error
// VerifyTimeoutCertificate checks cryptographic validity of the TC's
// `sigData` w.r.t. the given rank. It is the responsibility of the calling
// code to ensure that all `signers` are authorized, without duplicates.
// Return values:
// * nil if `sigData` is cryptographically valid
// * models.InsufficientSignaturesError if `signers is empty.
// * models.InvalidFormatError if `signers`/`highQCRanks` have differing
// lengths
// * models.ErrInvalidSignature if a signature is invalid
// * unexpected errors should be treated as symptoms of bugs or uncovered
// edge cases in the logic (i.e. as fatal)
VerifyTimeoutCertificate(timeoutCertificate models.TimeoutCertificate) error
}

View File

@ -0,0 +1,43 @@
package consensus
import (
"context"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// VotingProvider handles voting logic by deferring decisions, collection, and
// state finalization to an outside implementation.
type VotingProvider[
StateT models.Unique,
VoteT models.Unique,
PeerIDT models.Unique,
] interface {
// SignVote signs a proposal, produces an output vote for aggregation and
// broadcasting.
SignVote(
ctx context.Context,
state *models.State[StateT],
) (*VoteT, error)
// SignVote signs a proposal, produces an output vote for aggregation and
// broadcasting.
SignTimeoutVote(
ctx context.Context,
filter []byte,
currentRank uint64,
newestQuorumCertificateRank uint64,
) (*VoteT, error)
FinalizeQuorumCertificate(
ctx context.Context,
state *models.State[StateT],
aggregatedSignature models.AggregatedSignature,
) (models.QuorumCertificate, error)
// Produces a timeout certificate
FinalizeTimeout(
ctx context.Context,
rank uint64,
latestQuorumCertificate models.QuorumCertificate,
latestQuorumCertificateRanks []uint64,
aggregatedSignature models.AggregatedSignature,
) (models.TimeoutCertificate, error)
}

View File

@ -0,0 +1,10 @@
package consensus
// WeightProvider defines the methods for handling weighted differentiation of
// voters, such as seniority, or stake.
type WeightProvider interface {
// GetWeightForBitmask returns the total weight of the given bitmask for the
// prover set under the filter. Bitmask is expected to be in ascending ring
// order.
GetWeightForBitmask(filter []byte, bitmask []byte) uint64
}

View File

@ -0,0 +1,50 @@
package counters
import "sync/atomic"
// StrictMonotonicCounter is a helper struct which implements a strict monotonic
// counter. StrictMonotonicCounter is implemented using atomic operations and
// doesn't allow to set a value which is lower or equal to the already stored
// one. The counter is implemented solely with non-blocking atomic operations
// for concurrency safety.
type StrictMonotonicCounter struct {
atomicCounter uint64
}
// NewMonotonicCounter creates new counter with initial value
func NewMonotonicCounter(initialValue uint64) StrictMonotonicCounter {
return StrictMonotonicCounter{
atomicCounter: initialValue,
}
}
// Set updates value of counter if and only if it's strictly larger than value
// which is already stored. Returns true if update was successful or false if
// stored value is larger.
func (c *StrictMonotonicCounter) Set(newValue uint64) bool {
for {
oldValue := c.Value()
if newValue <= oldValue {
return false
}
if atomic.CompareAndSwapUint64(&c.atomicCounter, oldValue, newValue) {
return true
}
}
}
// Value returns value which is stored in atomic variable
func (c *StrictMonotonicCounter) Value() uint64 {
return atomic.LoadUint64(&c.atomicCounter)
}
// Increment atomically increments counter and returns updated value
func (c *StrictMonotonicCounter) Increment() uint64 {
for {
oldValue := c.Value()
newValue := oldValue + 1
if atomic.CompareAndSwapUint64(&c.atomicCounter, oldValue, newValue) {
return newValue
}
}
}

View File

@ -0,0 +1,825 @@
package eventhandler
import (
"context"
"errors"
"fmt"
"time"
"source.quilibrium.com/quilibrium/monorepo/consensus"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// EventHandler is the main handler for individual events that trigger state
// transition. It exposes API to handle one event at a time synchronously.
// EventHandler is *not concurrency safe*. Please use the EventLoop to ensure
// that only a single go-routine executes the EventHandler's algorithms.
// EventHandler is implemented in event-driven way, it reacts to incoming events
// and performs certain actions. It doesn't perform any actions on its own.
// There are 3 main responsibilities of EventHandler, vote, propose, timeout.
// There are specific scenarios that lead to each of those actions.
// - create vote: voting logic is triggered by OnReceiveProposal, after
// receiving proposal we have all required information to create a valid
// vote. Compliance engine makes sure that we receive proposals, whose
// parents are known. Creating a vote can be triggered ONLY by receiving
// proposal.
// - create timeout: creating models.TimeoutState is triggered by
// OnLocalTimeout, after reaching deadline for current round. EventHandler
// gets notified about it and has to create a models.TimeoutState and
// broadcast it to other replicas. Creating a TO can be triggered by
// reaching round deadline or triggered as part of Bracha broadcast when
// superminority of replicas have contributed to TC creation and created a
// partial TC.
// - create a proposal: proposing logic is more complicated. Creating a
// proposal is triggered by the EventHandler receiving a QC or TC that
// induces a rank change to a rank where the replica is primary. As an edge
// case, the EventHandler can receive a QC or TC that triggers the rank
// change, but we can't create a proposal in case we are missing parent
// state the newest QC refers to. In case we already have the QC, but are
// still missing the respective parent, OnReceiveProposal can trigger the
// proposing logic as well, but only when receiving proposal for rank lower
// than active rank. To summarize, to make a valid proposal for rank N we
// need to have a QC or TC for N-1 and know the proposal with stateID
// NewestQC.Identifier.
//
// Not concurrency safe.
type EventHandler[
StateT models.Unique,
VoteT models.Unique,
PeerIDT models.Unique,
CollectedT models.Unique,
] struct {
tracer consensus.TraceLogger
paceMaker consensus.Pacemaker
stateProducer consensus.StateProducer[StateT, VoteT]
forks consensus.Forks[StateT]
store consensus.ConsensusStore[VoteT]
committee consensus.Replicas
safetyRules consensus.SafetyRules[StateT, VoteT]
notifier consensus.Consumer[StateT, VoteT]
}
var _ consensus.EventHandler[*nilUnique, *nilUnique] = (*EventHandler[
*nilUnique, *nilUnique, *nilUnique, *nilUnique,
])(nil)
// NewEventHandler creates an EventHandler instance with initial components.
func NewEventHandler[
StateT models.Unique,
VoteT models.Unique,
PeerIDT models.Unique,
CollectedT models.Unique,
](
paceMaker consensus.Pacemaker,
stateProducer consensus.StateProducer[StateT, VoteT],
forks consensus.Forks[StateT],
store consensus.ConsensusStore[VoteT],
committee consensus.Replicas,
safetyRules consensus.SafetyRules[StateT, VoteT],
notifier consensus.Consumer[StateT, VoteT],
tracer consensus.TraceLogger,
) (*EventHandler[StateT, VoteT, PeerIDT, CollectedT], error) {
e := &EventHandler[StateT, VoteT, PeerIDT, CollectedT]{
paceMaker: paceMaker,
stateProducer: stateProducer,
forks: forks,
store: store,
safetyRules: safetyRules,
committee: committee,
notifier: notifier,
tracer: tracer,
}
return e, nil
}
// OnReceiveQuorumCertificate processes a valid qc constructed by internal vote
// aggregator or discovered in TimeoutState. All inputs should be validated
// before feeding into this function. Assuming trusted data. No errors are
// expected during normal operation.
func (e *EventHandler[
StateT,
VoteT,
PeerIDT,
CollectedT,
]) OnReceiveQuorumCertificate(qc models.QuorumCertificate) error {
curRank := e.paceMaker.CurrentRank()
e.tracer.Trace(
"received QC",
consensus.Uint64Param("current_rank", curRank),
consensus.Uint64Param("qc_rank", qc.GetRank()),
consensus.IdentityParam("state_id", qc.Identity()),
)
e.notifier.OnReceiveQuorumCertificate(curRank, qc)
defer e.notifier.OnEventProcessed()
newRankEvent, err := e.paceMaker.ReceiveQuorumCertificate(qc)
if err != nil {
return fmt.Errorf("could not process QC: %w", err)
}
if newRankEvent == nil {
e.tracer.Trace("QC didn't trigger rank change, nothing to do")
return nil
}
// current rank has changed, go to new rank
e.tracer.Trace("QC triggered rank change, starting new rank now")
return e.proposeForNewRankIfPrimary()
}
// OnReceiveTimeoutCertificate processes a valid tc constructed by internal
// timeout aggregator, discovered in TimeoutState or broadcast over the network.
// All inputs should be validated before feeding into this function. Assuming
// trusted data. No errors are expected during normal operation.
func (e *EventHandler[
StateT,
VoteT,
PeerIDT,
CollectedT,
]) OnReceiveTimeoutCertificate(tc models.TimeoutCertificate) error {
curRank := e.paceMaker.CurrentRank()
e.tracer.Trace(
"received TC",
consensus.Uint64Param("current_rank", curRank),
consensus.Uint64Param("tc_rank", tc.GetRank()),
consensus.Uint64Param(
"tc_newest_qc_rank",
tc.GetLatestQuorumCert().GetRank(),
),
consensus.IdentityParam(
"tc_newest_qc_state_id",
tc.GetLatestQuorumCert().Identity(),
),
)
e.notifier.OnReceiveTimeoutCertificate(curRank, tc)
defer e.notifier.OnEventProcessed()
newRankEvent, err := e.paceMaker.ReceiveTimeoutCertificate(tc)
if err != nil {
return fmt.Errorf("could not process TC for rank %d: %w", tc.GetRank(), err)
}
if newRankEvent == nil {
e.tracer.Trace("TC didn't trigger rank change, nothing to do",
consensus.Uint64Param("current_rank", curRank),
consensus.Uint64Param("tc_rank", tc.GetRank()),
consensus.Uint64Param(
"tc_newest_qc_rank",
tc.GetLatestQuorumCert().GetRank(),
),
consensus.IdentityParam(
"tc_newest_qc_state_id",
tc.GetLatestQuorumCert().Identity(),
))
return nil
}
// current rank has changed, go to new rank
e.tracer.Trace("TC triggered rank change, starting new rank now",
consensus.Uint64Param("current_rank", curRank),
consensus.Uint64Param("tc_rank", tc.GetRank()),
consensus.Uint64Param(
"tc_newest_qc_rank",
tc.GetLatestQuorumCert().GetRank(),
),
consensus.IdentityParam(
"tc_newest_qc_state_id",
tc.GetLatestQuorumCert().Identity(),
))
return e.proposeForNewRankIfPrimary()
}
// OnReceiveProposal processes a state proposal received from another HotStuff
// consensus participant.
// All inputs should be validated before feeding into this function. Assuming
// trusted data. No errors are expected during normal operation.
func (e *EventHandler[
StateT,
VoteT,
PeerIDT,
CollectedT,
]) OnReceiveProposal(proposal *models.SignedProposal[StateT, VoteT]) error {
state := proposal.State
curRank := e.paceMaker.CurrentRank()
e.tracer.Trace(
"proposal received from compliance engine",
consensus.Uint64Param("current_rank", curRank),
consensus.Uint64Param("state_rank", state.Rank),
consensus.IdentityParam("state_id", state.Identifier),
consensus.Uint64Param("qc_rank", state.ParentQuorumCertificate.GetRank()),
consensus.IdentityParam("proposer_id", state.ProposerID),
)
e.notifier.OnReceiveProposal(curRank, proposal)
defer e.notifier.OnEventProcessed()
// ignore stale proposals
if (*state).Rank < e.forks.FinalizedRank() {
e.tracer.Trace(
"stale proposal",
consensus.Uint64Param("current_rank", curRank),
consensus.Uint64Param("state_rank", state.Rank),
consensus.IdentityParam("state_id", state.Identifier),
consensus.Uint64Param("qc_rank", state.ParentQuorumCertificate.GetRank()),
consensus.IdentityParam("proposer_id", state.ProposerID),
)
return nil
}
// store the state.
err := e.forks.AddValidatedState(proposal.State)
if err != nil {
return fmt.Errorf(
"cannot add proposal to forks (%x): %w",
state.Identifier,
err,
)
}
_, err = e.paceMaker.ReceiveQuorumCertificate(
proposal.State.ParentQuorumCertificate,
)
if err != nil {
return fmt.Errorf(
"could not process QC for state %x: %w",
state.Identifier,
err,
)
}
_, err = e.paceMaker.ReceiveTimeoutCertificate(
proposal.PreviousRankTimeoutCertificate,
)
if err != nil {
return fmt.Errorf(
"could not process TC for state %x: %w",
state.Identifier,
err,
)
}
// if the state is for the current rank, then try voting for this state
err = e.processStateForCurrentRank(proposal)
if err != nil {
return fmt.Errorf("failed processing current state: %w", err)
}
e.tracer.Trace(
"proposal processed from compliance engine",
consensus.Uint64Param("current_rank", curRank),
consensus.Uint64Param("state_rank", state.Rank),
consensus.IdentityParam("state_id", state.Identifier),
consensus.Uint64Param("qc_rank", state.ParentQuorumCertificate.GetRank()),
consensus.IdentityParam("proposer_id", state.ProposerID),
)
// nothing to do if this proposal is for current rank
if proposal.State.Rank == e.paceMaker.CurrentRank() {
return nil
}
return e.proposeForNewRankIfPrimary()
}
// TimeoutChannel returns the channel for subscribing the waiting timeout on
// receiving state or votes for the current rank.
func (e *EventHandler[
StateT,
VoteT,
PeerIDT,
CollectedT,
]) TimeoutChannel() <-chan time.Time {
return e.paceMaker.TimeoutCh()
}
// OnLocalTimeout handles a local timeout event by creating a
// models.TimeoutState and broadcasting it. No errors are expected during normal
// operation.
func (e *EventHandler[
StateT,
VoteT,
PeerIDT,
CollectedT,
]) OnLocalTimeout() error {
curRank := e.paceMaker.CurrentRank()
e.tracer.Trace(
"timeout received from event loop",
consensus.Uint64Param("current_rank", curRank),
)
e.notifier.OnLocalTimeout(curRank)
defer e.notifier.OnEventProcessed()
err := e.broadcastTimeoutStateIfAuthorized()
if err != nil {
return fmt.Errorf(
"unexpected exception while processing timeout in rank %d: %w",
curRank,
err,
)
}
return nil
}
// OnPartialTimeoutCertificateCreated handles notification produces by the
// internal timeout aggregator. If the notification is for the current rank, a
// corresponding models.TimeoutState is broadcast to the consensus committee. No
// errors are expected during normal operation.
func (e *EventHandler[
StateT,
VoteT,
PeerIDT,
CollectedT,
]) OnPartialTimeoutCertificateCreated(
partialTC *consensus.PartialTimeoutCertificateCreated,
) error {
curRank := e.paceMaker.CurrentRank()
previousRankTimeoutCert := partialTC.PriorRankTimeoutCertificate
e.tracer.Trace(
"constructed partial TC",
consensus.Uint64Param("current_rank", curRank),
consensus.Uint64Param(
"qc_rank",
partialTC.NewestQuorumCertificate.GetRank(),
),
)
e.notifier.OnPartialTimeoutCertificate(curRank, partialTC)
defer e.notifier.OnEventProcessed()
// process QC, this might trigger rank change
_, err := e.paceMaker.ReceiveQuorumCertificate(
partialTC.NewestQuorumCertificate,
)
if err != nil {
return fmt.Errorf("could not process newest QC: %w", err)
}
// process TC, this might trigger rank change
_, err = e.paceMaker.ReceiveTimeoutCertificate(previousRankTimeoutCert)
if err != nil {
return fmt.Errorf(
"could not process TC for rank %d: %w",
previousRankTimeoutCert.GetRank(),
err,
)
}
// NOTE: in other cases when we have observed a rank change we will trigger
// proposing logic, this is desired logic for handling proposal, QC and TC.
// However, observing a partial TC means that superminority have timed out and
// there was at least one honest replica in that set. Honest replicas will
// never vote after timing out for current rank meaning we won't be able to
// collect supermajority of votes for a proposal made after observing partial
// TC.
// by definition, we are allowed to produce timeout state if we have received
// partial TC for current rank
if e.paceMaker.CurrentRank() != partialTC.Rank {
return nil
}
e.tracer.Trace(
"partial TC generated for current rank, broadcasting timeout",
consensus.Uint64Param("current_rank", curRank),
consensus.Uint64Param(
"qc_rank",
partialTC.NewestQuorumCertificate.GetRank(),
),
)
err = e.broadcastTimeoutStateIfAuthorized()
if err != nil {
return fmt.Errorf(
"unexpected exception while processing partial TC in rank %d: %w",
partialTC.Rank,
err,
)
}
return nil
}
// Start starts the event handler. No errors are expected during normal
// operation. CAUTION: EventHandler is not concurrency safe. The Start method
// must be executed by the same goroutine that also calls the other business
// logic methods, or concurrency safety has to be implemented externally.
func (e *EventHandler[
StateT,
VoteT,
PeerIDT,
CollectedT,
]) Start(ctx context.Context) error {
e.notifier.OnStart(e.paceMaker.CurrentRank())
defer e.notifier.OnEventProcessed()
e.paceMaker.Start(ctx)
err := e.proposeForNewRankIfPrimary()
if err != nil {
return fmt.Errorf("could not start new rank: %w", err)
}
return nil
}
// broadcastTimeoutStateIfAuthorized attempts to generate a
// models.TimeoutState, adds it to `timeoutAggregator` and broadcasts it to the
// consensus commettee. We check, whether this node, at the current rank, is
// part of the consensus committee. Otherwise, this method is functionally a
// no-op. For example, right after an rank switchover a consensus node might
// still be online but not part of the _active_ consensus committee anymore.
// Consequently, it should not broadcast timeouts anymore. No errors are
// expected during normal operation.
func (e *EventHandler[
StateT,
VoteT,
PeerIDT,
CollectedT,
]) broadcastTimeoutStateIfAuthorized() error {
curRank := e.paceMaker.CurrentRank()
newestQC := e.paceMaker.LatestQuorumCertificate()
previousRankTimeoutCert := e.paceMaker.PriorRankTimeoutCertificate()
if newestQC.GetRank()+1 == curRank {
// in case last rank has ended with QC and TC, make sure that only QC is
// included otherwise such timeout is invalid. This case is possible if TC
// has included QC with the same rank as the TC itself, meaning that
// newestQC.Rank == previousRankTimeoutCert.Rank
previousRankTimeoutCert = nil
}
timeout, err := e.safetyRules.ProduceTimeout(
curRank,
newestQC,
previousRankTimeoutCert,
)
if err != nil {
if models.IsNoTimeoutError(err) {
e.tracer.Error(
"not generating timeout as this node is not part of the active committee",
err,
consensus.Uint64Param("current_rank", curRank),
)
return nil
}
return fmt.Errorf("could not produce timeout: %w", err)
}
// raise a notification to broadcast timeout
e.notifier.OnOwnTimeout(timeout)
e.tracer.Trace(
"broadcast TimeoutState done",
consensus.Uint64Param("current_rank", curRank),
)
return nil
}
// proposeForNewRankIfPrimary will only be called when we may be able to propose
// a state, after processing a new event.
// - after entering a new rank as a result of processing a QC or TC, then we
// may propose for the newly entered rank
// - after receiving a proposal (but not changing rank), if that proposal is
// referenced by our highest known QC, and the proposal was previously
// unknown, then we can propose a state in the current rank
//
// Enforced INVARIANTS:
// - There will at most be `OnOwnProposal` notification emitted for ranks
// where this node is the leader, and none if another node is the leader.
// This holds irrespective of restarts. Formally, this prevents proposal
// equivocation.
//
// It reads the current rank, and generates a proposal if we are the leader.
// No errors are expected during normal operation.
func (e *EventHandler[
StateT,
VoteT,
PeerIDT,
CollectedT,
]) proposeForNewRankIfPrimary() error {
start := time.Now() // track the start time
curRank := e.paceMaker.CurrentRank()
e.tracer.Trace(
"deciding to propose",
consensus.Uint64Param("current_rank", curRank),
consensus.IdentityParam("self", e.committee.Self()),
)
currentLeader, err := e.committee.LeaderForRank(curRank)
if err != nil {
return fmt.Errorf(
"failed to determine primary for new rank %d: %w",
curRank,
err,
)
}
finalizedRank := e.forks.FinalizedRank()
e.notifier.OnCurrentRankDetails(curRank, finalizedRank, currentLeader)
// check that I am the primary for this rank
if e.committee.Self() != currentLeader {
e.tracer.Trace(
"not current leader, waiting",
consensus.Uint64Param("current_rank", curRank),
consensus.Uint64Param("finalized_rank", finalizedRank),
consensus.IdentityParam("leader_id", currentLeader),
)
return nil
}
// attempt to generate proposal:
newestQC := e.paceMaker.LatestQuorumCertificate()
previousRankTimeoutCert := e.paceMaker.PriorRankTimeoutCertificate()
_, found := e.forks.GetState(newestQC.Identity())
if !found {
// we don't know anything about state referenced by our newest QC, in this
// case we can't create a valid proposal since we can't guarantee validity
// of state payload.
e.tracer.Trace(
"haven't synced the latest state yet; can't propose",
consensus.Uint64Param("current_rank", curRank),
consensus.Uint64Param("finalized_rank", finalizedRank),
consensus.IdentityParam("leader_id", currentLeader),
)
return nil
}
e.tracer.Trace(
"generating proposal as leader",
consensus.Uint64Param("current_rank", curRank),
consensus.Uint64Param("finalized_rank", finalizedRank),
consensus.IdentityParam("leader_id", currentLeader),
)
// Sanity checks to make sure that resulting proposal is valid:
// In its proposal, the leader for rank N needs to present evidence that it
// has legitimately entered rank N. As evidence, we include a QC or TC for
// rank N-1, which should always be available as the PaceMaker advances to
// rank N only after observing a QC or TC from rank N-1. Moreover, QC and TC
// are always processed together. As EventHandler is strictly single-threaded
// without reentrancy, we must have a QC or TC for the prior rank (curRank-1).
// Failing one of these sanity checks is a symptom of state corruption or a
// severe implementation bug.
if newestQC.GetRank()+1 != curRank {
if previousRankTimeoutCert == nil {
return fmt.Errorf("possible state corruption, expected previousRankTimeoutCert to be not nil")
}
if previousRankTimeoutCert.GetRank()+1 != curRank {
return fmt.Errorf(
"possible state corruption, don't have QC(rank=%d) and TC(rank=%d) for previous rank(currentRank=%d)",
newestQC.GetRank(),
previousRankTimeoutCert.GetRank(),
curRank,
)
}
} else {
// In case last rank has ended with QC and TC, make sure that only QC is
// included, otherwise such proposal is invalid. This case is possible if TC
// has included QC with the same rank as the TC itself, meaning that
// newestQC.Rank == previousRankTimeoutCert.Rank
previousRankTimeoutCert = nil
}
// Construct Own SignedProposal
// CAUTION, design constraints:
// (i) We cannot process our own proposal within the `EventHandler` right
// away.
// (ii) We cannot add our own proposal to Forks here right away.
// (iii) Metrics for the PaceMaker/CruiseControl assume that the EventHandler
// is the only caller of `TargetPublicationTime`. Technically,
// `TargetPublicationTime` records the publication delay relative to
// its _latest_ call.
//
// To satisfy all constraints, we construct the proposal here and query
// (once!) its `TargetPublicationTime`. Though, we do _not_ process our own
// states right away and instead ingest them into the EventHandler the same
// way as proposals from other consensus participants. Specifically, on the
// path through the HotStuff state machine leading to state construction, the
// node's own proposal is largely ephemeral. The proposal is handed to the
// `MessageHub` (via the `OnOwnProposal` notification including the
// `TargetPublicationTime`). The `MessageHub` waits until
// `TargetPublicationTime` and only then broadcast the proposal and puts it
// into the EventLoop's queue for inbound states. This is exactly the same way
// as proposals from other nodes are ingested by the `EventHandler`, except
// that we are skipping the ComplianceEngine (assuming that our own proposals
// are protocol-compliant).
//
// Context:
// • On constraint (i): We want to support consensus committees only
// consisting of a *single* node. If the EventHandler internally processed
// the state right away via a direct message call, the call-stack would be
// ever-growing and the node would crash eventually (we experienced this
// with a very early HotStuff implementation). Specifically, if we wanted
// to process the state directly without taking a detour through the
// EventLoop's inbound queue, we would call `OnReceiveProposal` here. The
// function `OnReceiveProposal` would then end up calling
// `proposeForNewRankIfPrimary` (this function) to generate the next
// proposal, which again would result in calling `OnReceiveProposal` and so
// on so forth until the call stack or memory limit is reached and the node
// crashes. This is only a problem for consensus committees of size 1.
// • On constraint (ii): When adding a proposal to Forks, Forks emits a
// `StateIncorporatedEvent` notification, which is observed by Cruise
// Control and would change its state. However, note that Cruise Control
// is trying to estimate the point in time when _other_ nodes are observing
// the proposal. The time when we broadcast the proposal (i.e.
// `TargetPublicationTime`) is a reasonably good estimator, but *not* the
// time the proposer constructed the state (because there is potentially
// still a significant wait until `TargetPublicationTime`).
//
// The current approach is for a node to process its own proposals at the same
// time and through the same code path as proposals from other nodes. This
// satisfies constraints (i) and (ii) and generates very strong consistency,
// from a software design perspective.
// Just hypothetically, if we changed Cruise Control to be notified about
// own state proposals _only_ when they are broadcast (satisfying constraint
// (ii) without relying on the EventHandler), then we could add a proposal to
// Forks here right away. Nevertheless, the restriction remains that we cannot
// process that proposal right away within the EventHandler and instead need
// to put it into the EventLoop's inbound queue to support consensus
// committees of size 1.
stateProposal, err := e.stateProducer.MakeStateProposal(
curRank,
newestQC,
previousRankTimeoutCert,
)
if err != nil {
if models.IsNoVoteError(err) {
e.tracer.Error(
"aborting state proposal to prevent equivocation (likely re-entered proposal logic due to crash)",
err,
consensus.Uint64Param("current_rank", curRank),
consensus.Uint64Param("finalized_rank", finalizedRank),
consensus.IdentityParam("leader_id", currentLeader),
)
return nil
}
return fmt.Errorf(
"can not make state proposal for curRank %d: %w",
curRank,
err,
)
}
targetPublicationTime := e.paceMaker.TargetPublicationTime(
stateProposal.State.Rank,
start,
stateProposal.State.ParentQuorumCertificate.Identity(),
) // determine target publication time
e.tracer.Trace(
"forwarding proposal to communicator for broadcasting",
consensus.Uint64Param("state_rank", stateProposal.State.Rank),
consensus.TimeParam("target_publication", targetPublicationTime),
consensus.IdentityParam("state_id", stateProposal.State.Identifier),
consensus.Uint64Param("parent_rank", newestQC.GetRank()),
consensus.IdentityParam("parent_id", newestQC.Identity()),
consensus.IdentityParam("signer", stateProposal.State.ProposerID),
)
// emit notification with own proposal (also triggers broadcast)
e.notifier.OnOwnProposal(stateProposal, targetPublicationTime)
return nil
}
// processStateForCurrentRank processes the state for the current rank.
// It is called AFTER the state has been stored or found in Forks
// It checks whether to vote for this state.
// No errors are expected during normal operation.
func (e *EventHandler[
StateT,
VoteT,
PeerIDT,
CollectedT,
]) processStateForCurrentRank(
proposal *models.SignedProposal[StateT, VoteT],
) error {
// sanity check that state is really for the current rank:
curRank := e.paceMaker.CurrentRank()
state := proposal.State
if state.Rank != curRank {
// ignore outdated proposals in case we have moved forward
return nil
}
// leader (node ID) for next rank
nextLeader, err := e.committee.LeaderForRank(curRank + 1)
if errors.Is(err, models.ErrRankUnknown) {
// We are attempting process a state in an unknown rank
// This should never happen, because:
// * the compliance layer ensures proposals are passed to the event loop
// strictly after their parent
// * the protocol state ensures that, before incorporating the first state
// of an rank R, either R is known or we have triggered fallback mode - in
// either case the current rank is known
return fmt.Errorf("attempting to process a state for unknown rank")
}
if err != nil {
return fmt.Errorf(
"failed to determine primary for next rank %d: %w",
curRank+1,
err,
)
}
// safetyRules performs all the checks to decide whether to vote for this
// state or not.
err = e.ownVote(proposal, curRank, nextLeader)
if err != nil {
return fmt.Errorf("unexpected error in voting logic: %w", err)
}
return nil
}
// ownVote generates and forwards the own vote, if we decide to vote.
// Any errors are potential symptoms of uncovered edge cases or corrupted
// internal state (fatal). No errors are expected during normal operation.
func (e *EventHandler[
StateT,
VoteT,
PeerIDT,
CollectedT,
]) ownVote(
proposal *models.SignedProposal[StateT, VoteT],
curRank uint64,
nextLeader models.Identity,
) error {
_, found := e.forks.GetState(
proposal.State.ParentQuorumCertificate.Identity(),
)
if !found {
// we don't have parent for this proposal, we can't vote since we can't
// guarantee validity of proposals payload. Strictly speaking this shouldn't
// ever happen because compliance engine makes sure that we receive
// proposals with valid parents.
return fmt.Errorf(
"won't vote for proposal, no parent state for this proposal",
)
}
// safetyRules performs all the checks to decide whether to vote for this
// state or not.
ownVote, err := e.safetyRules.ProduceVote(proposal, curRank)
if err != nil {
if !models.IsNoVoteError(err) {
// unknown error, exit the event loop
return fmt.Errorf("could not produce vote: %w", err)
}
e.tracer.Trace(
"should not vote for this state",
consensus.Uint64Param("state_rank", proposal.State.Rank),
consensus.IdentityParam("state_id", proposal.State.Identifier),
consensus.Uint64Param(
"parent_rank",
proposal.State.ParentQuorumCertificate.GetRank(),
),
consensus.IdentityParam(
"parent_id",
proposal.State.ParentQuorumCertificate.Identity(),
),
consensus.IdentityParam("signer", proposal.State.ProposerID[:]),
)
return nil
}
e.tracer.Trace(
"forwarding vote to compliance engine",
consensus.Uint64Param("state_rank", proposal.State.Rank),
consensus.IdentityParam("state_id", proposal.State.Identifier),
consensus.Uint64Param(
"parent_rank",
proposal.State.ParentQuorumCertificate.GetRank(),
),
consensus.IdentityParam(
"parent_id",
proposal.State.ParentQuorumCertificate.Identity(),
),
consensus.IdentityParam("signer", proposal.State.ProposerID[:]),
)
e.notifier.OnOwnVote(ownVote, nextLeader)
return nil
}
// Type used to satisfy generic arguments in compiler time type assertion check
type nilUnique struct{}
// GetSignature implements models.Unique.
func (n *nilUnique) GetSignature() []byte {
panic("unimplemented")
}
// GetTimestamp implements models.Unique.
func (n *nilUnique) GetTimestamp() uint64 {
panic("unimplemented")
}
// Source implements models.Unique.
func (n *nilUnique) Source() models.Identity {
panic("unimplemented")
}
// Clone implements models.Unique.
func (n *nilUnique) Clone() models.Unique {
panic("unimplemented")
}
// GetRank implements models.Unique.
func (n *nilUnique) GetRank() uint64 {
panic("unimplemented")
}
// Identity implements models.Unique.
func (n *nilUnique) Identity() models.Identity {
panic("unimplemented")
}
var _ models.Unique = (*nilUnique)(nil)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,382 @@
package eventloop
import (
"context"
"fmt"
"time"
"source.quilibrium.com/quilibrium/monorepo/consensus"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
"source.quilibrium.com/quilibrium/monorepo/consensus/tracker"
"source.quilibrium.com/quilibrium/monorepo/lifecycle"
)
// queuedProposal is a helper structure that is used to transmit proposal in
// channel it contains an attached insertionTime that is used to measure how
// long we have waited between queening proposal and actually processing by
// `EventHandler`.
type queuedProposal[StateT models.Unique, VoteT models.Unique] struct {
proposal *models.SignedProposal[StateT, VoteT]
insertionTime time.Time
}
// EventLoop buffers all incoming events to the hotstuff EventHandler, and feeds
// EventHandler one event at a time.
type EventLoop[StateT models.Unique, VoteT models.Unique] struct {
*lifecycle.ComponentManager
eventHandler consensus.EventHandler[StateT, VoteT]
proposals chan queuedProposal[StateT, VoteT]
newestSubmittedTimeoutCertificate *tracker.NewestTCTracker
newestSubmittedQc *tracker.NewestQCTracker
newestSubmittedPartialTimeoutCertificate *tracker.NewestPartialTimeoutCertificateTracker
tcSubmittedNotifier chan struct{}
qcSubmittedNotifier chan struct{}
partialTimeoutCertificateCreatedNotifier chan struct{}
startTime time.Time
tracer consensus.TraceLogger
}
var _ consensus.EventLoop[*nilUnique, *nilUnique] = (*EventLoop[*nilUnique, *nilUnique])(nil)
// NewEventLoop creates an instance of EventLoop.
func NewEventLoop[StateT models.Unique, VoteT models.Unique](
tracer consensus.TraceLogger,
eventHandler consensus.EventHandler[StateT, VoteT],
startTime time.Time,
) (*EventLoop[StateT, VoteT], error) {
// we will use a buffered channel to avoid blocking of caller
// we can't afford to drop messages since it undermines liveness, but we also
// want to avoid blocking of compliance engine. We assume that we should be
// able to process proposals faster than compliance engine feeds them, worst
// case we will fill the buffer and state compliance engine worker but that
// should happen only if compliance engine receives large number of states in
// short period of time(when catching up for instance).
proposals := make(chan queuedProposal[StateT, VoteT], 1000)
el := &EventLoop[StateT, VoteT]{
tracer: tracer,
eventHandler: eventHandler,
proposals: proposals,
tcSubmittedNotifier: make(chan struct{}, 1),
qcSubmittedNotifier: make(chan struct{}, 1),
partialTimeoutCertificateCreatedNotifier: make(chan struct{}, 1),
newestSubmittedTimeoutCertificate: tracker.NewNewestTCTracker(),
newestSubmittedQc: tracker.NewNewestQCTracker(),
newestSubmittedPartialTimeoutCertificate: tracker.NewNewestPartialTimeoutCertificateTracker(),
startTime: startTime,
}
componentBuilder := lifecycle.NewComponentManagerBuilder()
componentBuilder.AddWorker(func(
ctx lifecycle.SignalerContext,
ready lifecycle.ReadyFunc,
) {
ready()
// launch when scheduled by el.startTime
el.tracer.Trace(fmt.Sprintf("event loop will start at: %v", el.startTime))
select {
case <-ctx.Done():
return
case <-time.After(time.Until(el.startTime)):
el.tracer.Trace("starting event loop")
err := el.loop(ctx)
if err != nil {
el.tracer.Error("irrecoverable event loop error", err)
ctx.Throw(err)
}
}
})
el.ComponentManager = componentBuilder.Build()
return el, nil
}
// loop executes the core HotStuff logic in a single thread. It picks inputs
// from the various inbound channels and executes the EventHandler's respective
// method for processing this input. During normal operations, the EventHandler
// is not expected to return any errors, as all inputs are assumed to be fully
// validated (or produced by trusted components within the node). Therefore,
// any error is a symptom of state corruption, bugs or violation of API
// contracts. In all cases, continuing operations is not an option, i.e. we exit
// the event loop and return an exception.
func (el *EventLoop[StateT, VoteT]) loop(ctx context.Context) error {
err := el.eventHandler.Start(ctx)
if err != nil {
return fmt.Errorf("could not start event handler: %w", err)
}
shutdownSignaled := ctx.Done()
timeoutCertificates := el.tcSubmittedNotifier
quorumCertificates := el.qcSubmittedNotifier
partialTCs := el.partialTimeoutCertificateCreatedNotifier
for {
// Giving timeout events the priority to be processed first.
// This is to prevent attacks from malicious nodes that attempt
// to block honest nodes' pacemaker from progressing by sending
// other events.
timeoutChannel := el.eventHandler.TimeoutChannel()
// the first select makes sure we process timeouts with priority
select {
// if we receive the shutdown signal, exit the loop
case <-shutdownSignaled:
el.tracer.Trace("shutting down event loop")
return nil
// processing timeout or partial TC event are top priority since
// they allow node to contribute to TC aggregation when replicas can't
// make progress on happy path
case <-timeoutChannel:
el.tracer.Trace("received timeout")
err = el.eventHandler.OnLocalTimeout()
if err != nil {
return fmt.Errorf("could not process timeout: %w", err)
}
// At this point, we have received and processed an event from the timeout
// channel. A timeout also means that we have made progress. A new timeout
// will have been started and el.eventHandler.TimeoutChannel() will be a
// NEW channel (for the just-started timeout). Very important to start the
// for loop from the beginning, to continue the with the new timeout
// channel!
continue
case <-partialTCs:
el.tracer.Trace("received partial timeout")
err = el.eventHandler.OnPartialTimeoutCertificateCreated(
el.newestSubmittedPartialTimeoutCertificate.NewestPartialTimeoutCertificate(),
)
if err != nil {
return fmt.Errorf("could not process partial created TC event: %w", err)
}
// At this point, we have received and processed partial TC event, it
// could have resulted in several scenarios:
// 1. a rank change with potential voting or proposal creation
// 2. a created and broadcast timeout state
// 3. QC and TC didn't result in rank change and no timeout was created
// since we have already timed out or the partial TC was created for rank
// different from current one.
continue
default:
el.tracer.Trace("non-priority event")
// fall through to non-priority events
}
// select for state headers/QCs here
select {
// same as before
case <-shutdownSignaled:
el.tracer.Trace("shutting down event loop")
return nil
// same as before
case <-timeoutChannel:
el.tracer.Trace("received timeout")
err = el.eventHandler.OnLocalTimeout()
if err != nil {
return fmt.Errorf("could not process timeout: %w", err)
}
// if we have a new proposal, process it
case queuedItem := <-el.proposals:
el.tracer.Trace("received proposal")
proposal := queuedItem.proposal
err = el.eventHandler.OnReceiveProposal(proposal)
if err != nil {
return fmt.Errorf(
"could not process proposal %x: %w",
proposal.State.Identifier,
err,
)
}
el.tracer.Trace(
"state proposal has been processed successfully",
consensus.Uint64Param("rank", proposal.State.Rank),
)
// if we have a new QC, process it
case <-quorumCertificates:
el.tracer.Trace("received quorum certificate")
err = el.eventHandler.OnReceiveQuorumCertificate(
*el.newestSubmittedQc.NewestQC(),
)
if err != nil {
return fmt.Errorf("could not process QC: %w", err)
}
// if we have a new TC, process it
case <-timeoutCertificates:
el.tracer.Trace("received timeout certificate")
err = el.eventHandler.OnReceiveTimeoutCertificate(
*el.newestSubmittedTimeoutCertificate.NewestTC(),
)
if err != nil {
return fmt.Errorf("could not process TC: %w", err)
}
case <-partialTCs:
el.tracer.Trace("received partial timeout certificate")
err = el.eventHandler.OnPartialTimeoutCertificateCreated(
el.newestSubmittedPartialTimeoutCertificate.NewestPartialTimeoutCertificate(),
)
if err != nil {
return fmt.Errorf("could no process partial created TC event: %w", err)
}
}
}
}
// SubmitProposal pushes the received state to the proposals channel
func (el *EventLoop[StateT, VoteT]) SubmitProposal(
proposal *models.SignedProposal[StateT, VoteT],
) {
queueItem := queuedProposal[StateT, VoteT]{
proposal: proposal,
insertionTime: time.Now(),
}
select {
case el.proposals <- queueItem:
case <-el.ComponentManager.ShutdownSignal():
return
}
}
// onTrustedQC pushes the received QC (which MUST be validated) to the
// quorumCertificates channel
func (el *EventLoop[StateT, VoteT]) onTrustedQC(qc *models.QuorumCertificate) {
if el.newestSubmittedQc.Track(qc) {
select {
case el.qcSubmittedNotifier <- struct{}{}:
default:
}
}
}
// onTrustedTC pushes the received TC (which MUST be validated) to the
// timeoutCertificates channel
func (el *EventLoop[StateT, VoteT]) onTrustedTC(tc *models.TimeoutCertificate) {
if el.newestSubmittedTimeoutCertificate.Track(tc) {
select {
case el.tcSubmittedNotifier <- struct{}{}:
default:
}
} else {
qc := (*tc).GetLatestQuorumCert()
if el.newestSubmittedQc.Track(&qc) {
select {
case el.qcSubmittedNotifier <- struct{}{}:
default:
}
}
}
}
// OnTimeoutCertificateConstructedFromTimeouts pushes the received TC to the
// timeoutCertificates channel
func (el *EventLoop[StateT, VoteT]) OnTimeoutCertificateConstructedFromTimeouts(
tc models.TimeoutCertificate,
) {
el.onTrustedTC(&tc)
}
// OnPartialTimeoutCertificateCreated created a
// consensus.PartialTimeoutCertificateCreated payload and pushes it into
// partialTimeoutCertificateCreated buffered channel for further processing by
// EventHandler. Since we use buffered channel this function can block if buffer
// is full.
func (el *EventLoop[StateT, VoteT]) OnPartialTimeoutCertificateCreated(
rank uint64,
newestQC models.QuorumCertificate,
previousRankTimeoutCert models.TimeoutCertificate,
) {
event := &consensus.PartialTimeoutCertificateCreated{
Rank: rank,
NewestQuorumCertificate: newestQC,
PriorRankTimeoutCertificate: previousRankTimeoutCert,
}
if el.newestSubmittedPartialTimeoutCertificate.Track(event) {
select {
case el.partialTimeoutCertificateCreatedNotifier <- struct{}{}:
default:
}
}
}
// OnNewQuorumCertificateDiscovered pushes already validated QCs that were
// submitted from TimeoutAggregator to the event handler
func (el *EventLoop[StateT, VoteT]) OnNewQuorumCertificateDiscovered(
qc models.QuorumCertificate,
) {
el.onTrustedQC(&qc)
}
// OnNewTimeoutCertificateDiscovered pushes already validated TCs that were
// submitted from TimeoutAggregator to the event handler
func (el *EventLoop[StateT, VoteT]) OnNewTimeoutCertificateDiscovered(
tc models.TimeoutCertificate,
) {
el.onTrustedTC(&tc)
}
// OnQuorumCertificateConstructedFromVotes implements
// consensus.VoteCollectorConsumer and pushes received qc into processing
// pipeline.
func (el *EventLoop[StateT, VoteT]) OnQuorumCertificateConstructedFromVotes(
qc models.QuorumCertificate,
) {
el.onTrustedQC(&qc)
}
// OnTimeoutProcessed implements consensus.TimeoutCollectorConsumer and is no-op
func (el *EventLoop[StateT, VoteT]) OnTimeoutProcessed(
timeout *models.TimeoutState[VoteT],
) {
}
// OnVoteProcessed implements consensus.VoteCollectorConsumer and is no-op
func (el *EventLoop[StateT, VoteT]) OnVoteProcessed(vote *VoteT) {}
// Type used to satisfy generic arguments in compiler time type assertion check
type nilUnique struct{}
// GetSignature implements models.Unique.
func (n *nilUnique) GetSignature() []byte {
panic("unimplemented")
}
// GetTimestamp implements models.Unique.
func (n *nilUnique) GetTimestamp() uint64 {
panic("unimplemented")
}
// Source implements models.Unique.
func (n *nilUnique) Source() models.Identity {
panic("unimplemented")
}
// Clone implements models.Unique.
func (n *nilUnique) Clone() models.Unique {
panic("unimplemented")
}
// GetRank implements models.Unique.
func (n *nilUnique) GetRank() uint64 {
panic("unimplemented")
}
// Identity implements models.Unique.
func (n *nilUnique) Identity() models.Identity {
panic("unimplemented")
}
var _ models.Unique = (*nilUnique)(nil)

View File

@ -0,0 +1,262 @@
package eventloop
import (
"context"
"sync"
"testing"
"time"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"go.uber.org/atomic"
"source.quilibrium.com/quilibrium/monorepo/consensus"
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
"source.quilibrium.com/quilibrium/monorepo/consensus/mocks"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
"source.quilibrium.com/quilibrium/monorepo/lifecycle/unittest"
)
// TestEventLoop performs unit testing of event loop, checks if submitted events are propagated
// to event handler as well as handling of timeouts.
func TestEventLoop(t *testing.T) {
suite.Run(t, new(EventLoopTestSuite))
}
type EventLoopTestSuite struct {
suite.Suite
eh *mocks.EventHandler[*helper.TestState, *helper.TestVote]
cancel context.CancelFunc
eventLoop *EventLoop[*helper.TestState, *helper.TestVote]
}
func (s *EventLoopTestSuite) SetupTest() {
s.eh = mocks.NewEventHandler[*helper.TestState, *helper.TestVote](s.T())
s.eh.On("Start", mock.Anything).Return(nil).Maybe()
s.eh.On("TimeoutChannel").Return(make(<-chan time.Time, 1)).Maybe()
s.eh.On("OnLocalTimeout").Return(nil).Maybe()
eventLoop, err := NewEventLoop(helper.Logger(), s.eh, time.Time{})
require.NoError(s.T(), err)
s.eventLoop = eventLoop
ctx, cancel := context.WithCancel(context.Background())
s.cancel = cancel
signalerCtx := unittest.NewMockSignalerContext(s.T(), ctx)
s.eventLoop.Start(signalerCtx)
unittest.RequireCloseBefore(s.T(), s.eventLoop.Ready(), 100*time.Millisecond, "event loop not started")
}
func (s *EventLoopTestSuite) TearDownTest() {
s.cancel()
unittest.RequireCloseBefore(s.T(), s.eventLoop.Done(), 100*time.Millisecond, "event loop not stopped")
}
// TestReadyDone tests if event loop stops internal worker thread
func (s *EventLoopTestSuite) TestReadyDone() {
time.Sleep(1 * time.Second)
go func() {
s.cancel()
}()
unittest.RequireCloseBefore(s.T(), s.eventLoop.Done(), 100*time.Millisecond, "event loop not stopped")
}
// Test_SubmitQC tests that submitted proposal is eventually sent to event handler for processing
func (s *EventLoopTestSuite) Test_SubmitProposal() {
proposal := helper.MakeSignedProposal[*helper.TestState, *helper.TestVote]()
processed := atomic.NewBool(false)
s.eh.On("OnReceiveProposal", proposal).Run(func(args mock.Arguments) {
processed.Store(true)
}).Return(nil).Once()
s.eventLoop.SubmitProposal(proposal)
require.Eventually(s.T(), processed.Load, time.Millisecond*100, time.Millisecond*10)
}
// Test_SubmitQC tests that submitted QC is eventually sent to `EventHandler.OnReceiveQuorumCertificate` for processing
func (s *EventLoopTestSuite) Test_SubmitQC() {
// qcIngestionFunction is the archetype for EventLoop.OnQuorumCertificateConstructedFromVotes and EventLoop.OnNewQuorumCertificateDiscovered
type qcIngestionFunction func(models.QuorumCertificate)
testQCIngestionFunction := func(f qcIngestionFunction, qcRank uint64) {
qc := helper.MakeQC(helper.WithQCRank(qcRank))
processed := atomic.NewBool(false)
s.eh.On("OnReceiveQuorumCertificate", qc).Run(func(args mock.Arguments) {
processed.Store(true)
}).Return(nil).Once()
f(qc)
require.Eventually(s.T(), processed.Load, time.Millisecond*100, time.Millisecond*10)
}
s.Run("QCs handed to EventLoop.OnQuorumCertificateConstructedFromVotes are forwarded to EventHandler", func() {
testQCIngestionFunction(s.eventLoop.OnQuorumCertificateConstructedFromVotes, 100)
})
s.Run("QCs handed to EventLoop.OnNewQuorumCertificateDiscovered are forwarded to EventHandler", func() {
testQCIngestionFunction(s.eventLoop.OnNewQuorumCertificateDiscovered, 101)
})
}
// Test_SubmitTC tests that submitted TC is eventually sent to `EventHandler.OnReceiveTimeoutCertificate` for processing
func (s *EventLoopTestSuite) Test_SubmitTC() {
// tcIngestionFunction is the archetype for EventLoop.OnTimeoutCertificateConstructedFromTimeouts and EventLoop.OnNewTimeoutCertificateDiscovered
type tcIngestionFunction func(models.TimeoutCertificate)
testTCIngestionFunction := func(f tcIngestionFunction, tcRank uint64) {
tc := helper.MakeTC(helper.WithTCRank(tcRank))
processed := atomic.NewBool(false)
s.eh.On("OnReceiveTimeoutCertificate", tc).Run(func(args mock.Arguments) {
processed.Store(true)
}).Return(nil).Once()
f(tc)
require.Eventually(s.T(), processed.Load, time.Millisecond*100, time.Millisecond*10)
}
s.Run("TCs handed to EventLoop.OnTimeoutCertificateConstructedFromTimeouts are forwarded to EventHandler", func() {
testTCIngestionFunction(s.eventLoop.OnTimeoutCertificateConstructedFromTimeouts, 100)
})
s.Run("TCs handed to EventLoop.OnNewTimeoutCertificateDiscovered are forwarded to EventHandler", func() {
testTCIngestionFunction(s.eventLoop.OnNewTimeoutCertificateDiscovered, 101)
})
}
// Test_SubmitTC_IngestNewestQC tests that included QC in TC is eventually sent to `EventHandler.OnReceiveQuorumCertificate` for processing
func (s *EventLoopTestSuite) Test_SubmitTC_IngestNewestQC() {
// tcIngestionFunction is the archetype for EventLoop.OnTimeoutCertificateConstructedFromTimeouts and EventLoop.OnNewTimeoutCertificateDiscovered
type tcIngestionFunction func(models.TimeoutCertificate)
testTCIngestionFunction := func(f tcIngestionFunction, tcRank, qcRank uint64) {
tc := helper.MakeTC(helper.WithTCRank(tcRank),
helper.WithTCNewestQC(helper.MakeQC(helper.WithQCRank(qcRank))))
processed := atomic.NewBool(false)
s.eh.On("OnReceiveQuorumCertificate", tc.GetLatestQuorumCert()).Run(func(args mock.Arguments) {
processed.Store(true)
}).Return(nil).Once()
f(tc)
require.Eventually(s.T(), processed.Load, time.Millisecond*100, time.Millisecond*10)
}
// process initial TC, this will track the newest TC
s.eh.On("OnReceiveTimeoutCertificate", mock.Anything).Return(nil).Once()
s.eventLoop.OnTimeoutCertificateConstructedFromTimeouts(helper.MakeTC(
helper.WithTCRank(100),
helper.WithTCNewestQC(
helper.MakeQC(
helper.WithQCRank(80),
),
),
))
s.Run("QCs handed to EventLoop.OnTimeoutCertificateConstructedFromTimeouts are forwarded to EventHandler", func() {
testTCIngestionFunction(s.eventLoop.OnTimeoutCertificateConstructedFromTimeouts, 100, 99)
})
s.Run("QCs handed to EventLoop.OnNewTimeoutCertificateDiscovered are forwarded to EventHandler", func() {
testTCIngestionFunction(s.eventLoop.OnNewTimeoutCertificateDiscovered, 100, 100)
})
}
// Test_OnPartialTimeoutCertificateCreated tests that event loop delivers partialTimeoutCertificateCreated events to event handler.
func (s *EventLoopTestSuite) Test_OnPartialTimeoutCertificateCreated() {
rank := uint64(1000)
newestQC := helper.MakeQC(helper.WithQCRank(rank - 10))
previousRankTimeoutCert := helper.MakeTC(helper.WithTCRank(rank-1), helper.WithTCNewestQC(newestQC))
processed := atomic.NewBool(false)
partialTimeoutCertificateCreated := &consensus.PartialTimeoutCertificateCreated{
Rank: rank,
NewestQuorumCertificate: newestQC,
PriorRankTimeoutCertificate: previousRankTimeoutCert,
}
s.eh.On("OnPartialTimeoutCertificateCreated", partialTimeoutCertificateCreated).Run(func(args mock.Arguments) {
processed.Store(true)
}).Return(nil).Once()
s.eventLoop.OnPartialTimeoutCertificateCreated(rank, newestQC, previousRankTimeoutCert)
require.Eventually(s.T(), processed.Load, time.Millisecond*100, time.Millisecond*10)
}
// TestEventLoop_Timeout tests that event loop delivers timeout events to event handler under pressure
func TestEventLoop_Timeout(t *testing.T) {
eh := &mocks.EventHandler[*helper.TestState, *helper.TestVote]{}
processed := atomic.NewBool(false)
eh.On("Start", mock.Anything).Return(nil).Once()
eh.On("OnReceiveQuorumCertificate", mock.Anything).Return(nil).Maybe()
eh.On("OnReceiveProposal", mock.Anything).Return(nil).Maybe()
eh.On("OnLocalTimeout").Run(func(args mock.Arguments) {
processed.Store(true)
}).Return(nil).Once()
eventLoop, err := NewEventLoop(helper.Logger(), eh, time.Time{})
require.NoError(t, err)
eh.On("TimeoutChannel").Return(time.After(100 * time.Millisecond))
ctx, cancel := context.WithCancel(context.Background())
signalerCtx := unittest.NewMockSignalerContext(t, ctx)
eventLoop.Start(signalerCtx)
unittest.RequireCloseBefore(t, eventLoop.Ready(), 100*time.Millisecond, "event loop not stopped")
time.Sleep(10 * time.Millisecond)
var wg sync.WaitGroup
wg.Add(2)
// spam with proposals and QCs
go func() {
defer wg.Done()
for !processed.Load() {
qc := helper.MakeQC()
eventLoop.OnQuorumCertificateConstructedFromVotes(qc)
}
}()
go func() {
defer wg.Done()
for !processed.Load() {
eventLoop.SubmitProposal(helper.MakeSignedProposal[*helper.TestState, *helper.TestVote]())
}
}()
require.Eventually(t, processed.Load, time.Millisecond*200, time.Millisecond*10)
unittest.AssertReturnsBefore(t, func() { wg.Wait() }, time.Millisecond*200)
cancel()
unittest.RequireCloseBefore(t, eventLoop.Done(), 100*time.Millisecond, "event loop not stopped")
}
// TestReadyDoneWithStartTime tests that event loop correctly starts and schedules start of processing
// when startTime argument is used
func TestReadyDoneWithStartTime(t *testing.T) {
eh := &mocks.EventHandler[*helper.TestState, *helper.TestVote]{}
eh.On("Start", mock.Anything).Return(nil)
eh.On("TimeoutChannel").Return(make(<-chan time.Time, 1))
eh.On("OnLocalTimeout").Return(nil)
startTimeDuration := 2 * time.Second
startTime := time.Now().Add(startTimeDuration)
eventLoop, err := NewEventLoop(helper.Logger(), eh, startTime)
require.NoError(t, err)
done := make(chan struct{})
eh.On("OnReceiveProposal", mock.Anything).Run(func(args mock.Arguments) {
require.True(t, time.Now().After(startTime))
close(done)
}).Return(nil).Once()
ctx, cancel := context.WithCancel(context.Background())
signalerCtx := unittest.NewMockSignalerContext(t, ctx)
eventLoop.Start(signalerCtx)
unittest.RequireCloseBefore(t, eventLoop.Ready(), 100*time.Millisecond, "event loop not started")
eventLoop.SubmitProposal(helper.MakeSignedProposal[*helper.TestState, *helper.TestVote]())
unittest.RequireCloseBefore(t, done, startTimeDuration+100*time.Millisecond, "proposal wasn't received")
cancel()
unittest.RequireCloseBefore(t, eventLoop.Done(), 100*time.Millisecond, "event loop not stopped")
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,394 @@
package forest
import (
"fmt"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// LevelledForest contains multiple trees (which is a potentially disconnected
// planar graph). Each vertex in the graph has a level and a hash. A vertex can
// only have one parent, which must have strictly smaller level. A vertex can
// have multiple children, all with strictly larger level.
// A LevelledForest provides the ability to prune all vertices up to a specific
// level. A tree whose root is below the pruning threshold might decompose into
// multiple disconnected subtrees as a result of pruning.
// By design, the LevelledForest does _not_ touch the parent information for
// vertices that are on the lowest retained level. Thereby, it is possible to
// initialize the LevelledForest with a root vertex at the lowest retained
// level, without this root needing to have a parent. Furthermore, the root
// vertex can be at level 0 and in absence of a parent still satisfy the
// condition that any parent must be of lower level (mathematical principle of
// acuous truth) without the implementation needing to worry about unsigned
// integer underflow.
//
// LevelledForest is NOT safe for concurrent use by multiple goroutines.
type LevelledForest struct {
vertices VertexSet
verticesAtLevel map[uint64]VertexList
size uint64
LowestLevel uint64
}
type VertexList []*vertexContainer
type VertexSet map[models.Identity]*vertexContainer
// vertexContainer holds information about a tree vertex. Internally, we
// distinguish between
// - FULL container: has non-nil value for vertex.
// Used for vertices, which have been added to the tree.
// - EMPTY container: has NIL value for vertex.
// Used for vertices, which have NOT been added to the tree, but are
// referenced by vertices in the tree. An empty container is converted to a
// full container when the respective vertex is added to the tree
type vertexContainer struct {
id models.Identity
level uint64
children VertexList
// the following are only set if the state is actually known
vertex Vertex
}
// NewLevelledForest initializes a LevelledForest
func NewLevelledForest(lowestLevel uint64) *LevelledForest {
return &LevelledForest{
vertices: make(VertexSet),
verticesAtLevel: make(map[uint64]VertexList),
LowestLevel: lowestLevel,
}
}
// PruneUpToLevel prunes all states UP TO but NOT INCLUDING `level`.
func (f *LevelledForest) PruneUpToLevel(level uint64) error {
if level < f.LowestLevel {
return fmt.Errorf(
"new lowest level %d cannot be smaller than previous last retained level %d",
level,
f.LowestLevel,
)
}
if len(f.vertices) == 0 {
f.LowestLevel = level
return nil
}
elementsPruned := 0
// to optimize the pruning large level-ranges, we compare:
// * the number of levels for which we have stored vertex containers:
// len(f.verticesAtLevel)
// * the number of levels that need to be pruned: level-f.LowestLevel
// We iterate over the dimension which is smaller.
if uint64(len(f.verticesAtLevel)) < level-f.LowestLevel {
for l, vertices := range f.verticesAtLevel {
if l < level {
for _, v := range vertices {
if !f.isEmptyContainer(v) {
elementsPruned++
}
delete(f.vertices, v.id)
}
delete(f.verticesAtLevel, l)
}
}
} else {
for l := f.LowestLevel; l < level; l++ {
verticesAtLevel := f.verticesAtLevel[l]
for _, v := range verticesAtLevel {
if !f.isEmptyContainer(v) {
elementsPruned++
}
delete(f.vertices, v.id)
}
delete(f.verticesAtLevel, l)
}
}
f.LowestLevel = level
f.size -= uint64(elementsPruned)
return nil
}
// HasVertex returns true iff full vertex exists.
func (f *LevelledForest) HasVertex(id models.Identity) bool {
container, exists := f.vertices[id]
return exists && !f.isEmptyContainer(container)
}
// isEmptyContainer returns true iff vertexContainer container is empty, i.e.
// full vertex itself has not been added
func (f *LevelledForest) isEmptyContainer(
vertexContainer *vertexContainer,
) bool {
return vertexContainer.vertex == nil
}
// GetVertex returns (<full vertex>, true) if the vertex with `id` and `level`
// was found (nil, false) if full vertex is unknown
func (f *LevelledForest) GetVertex(id models.Identity) (Vertex, bool) {
container, exists := f.vertices[id]
if !exists || f.isEmptyContainer(container) {
return nil, false
}
return container.vertex, true
}
// GetSize returns the total number of vertices above the lowest pruned level.
// Note this call is not concurrent-safe, caller is responsible to ensure
// concurrency safety.
func (f *LevelledForest) GetSize() uint64 {
return f.size
}
// GetChildren returns a VertexIterator to iterate over the children
// An empty VertexIterator is returned, if no vertices are known whose parent is
// `id`.
func (f *LevelledForest) GetChildren(id models.Identity) VertexIterator {
// if vertex does not exist, container will be nil
if container, ok := f.vertices[id]; ok {
return newVertexIterator(container.children)
}
return newVertexIterator(nil) // VertexIterator gracefully handles nil slices
}
// GetNumberOfChildren returns number of children of given vertex
func (f *LevelledForest) GetNumberOfChildren(id models.Identity) int {
// if vertex does not exist, container is the default zero value for
// vertexContainer, which contains a nil-slice for its children
container := f.vertices[id]
num := 0
for _, child := range container.children {
if child.vertex != nil {
num++
}
}
return num
}
// GetVerticesAtLevel returns a VertexIterator to iterate over the Vertices at
// the specified level. An empty VertexIterator is returned, if no vertices are
// known at the specified level. If `level` is already pruned, an empty
// VertexIterator is returned.
func (f *LevelledForest) GetVerticesAtLevel(level uint64) VertexIterator {
return newVertexIterator(f.verticesAtLevel[level])
}
// GetNumberOfVerticesAtLevel returns the number of full vertices at given
// level. A full vertex is a vertex that was explicitly added to the forest. In
// contrast, an empty vertex container represents a vertex that is _referenced_
// as parent by one or more full vertices, but has not been added itself to the
// forest. We only count vertices that have been explicitly added to the forest
// and not yet pruned. (In comparision, we do _not_ count vertices that are
// _referenced_ as parent by vertices, but have not been added themselves).
func (f *LevelledForest) GetNumberOfVerticesAtLevel(level uint64) int {
num := 0
for _, container := range f.verticesAtLevel[level] {
if !f.isEmptyContainer(container) {
num++
}
}
return num
}
// AddVertex adds vertex to forest if vertex is within non-pruned levels
// Handles repeated addition of same vertex (keeps first added vertex).
// If vertex is at or below pruning level: method is NoOp.
// UNVALIDATED:
// requires that vertex would pass validity check LevelledForest.VerifyVertex(vertex).
func (f *LevelledForest) AddVertex(vertex Vertex) {
if vertex.Level() < f.LowestLevel {
return
}
container := f.getOrCreateVertexContainer(vertex.VertexID(), vertex.Level())
if !f.isEmptyContainer(container) { // the vertex was already stored
return
}
// container is empty, i.e. full vertex is new and should be stored in container
container.vertex = vertex // add vertex to container
f.registerWithParent(container)
f.size += 1
}
// registerWithParent retrieves the parent and registers the given vertex as a
// child. For a state, whose level equal to the pruning threshold, we do not
// inspect the parent at all. Thereby, this implementation can gracefully handle
// the corner case where the tree has a defined end vertex (distinct root). This
// is commonly the case in statechain (genesis, or spork root state).
// Mathematically, this means that this library can also represent bounded
// trees.
func (f *LevelledForest) registerWithParent(vertexContainer *vertexContainer) {
// caution, necessary for handling bounded trees:
// For root vertex (genesis state) the rank is _exactly_ at LowestLevel. For
// these states, a parent does not exist. In the implementation, we
// deliberately do not call the `Parent()` method, as its output is
// conceptually undefined. Thereby, we can gracefully handle the corner case
// of
// vertex.level = vertex.Parent().Level = LowestLevel = 0
if vertexContainer.level <= f.LowestLevel { // check (a)
return
}
_, parentRank := vertexContainer.vertex.Parent()
if parentRank < f.LowestLevel {
return
}
parentContainer := f.getOrCreateVertexContainer(
vertexContainer.vertex.Parent(),
)
parentContainer.children = append(parentContainer.children, vertexContainer)
}
// getOrCreateVertexContainer returns the vertexContainer if there exists one
// or creates a new vertexContainer and adds it to the internal data structures.
// (i.e. there exists an empty or full container with the same id but different
// level).
func (f *LevelledForest) getOrCreateVertexContainer(
id models.Identity,
level uint64,
) *vertexContainer {
container, exists := f.vertices[id]
if !exists {
container = &vertexContainer{
id: id,
level: level,
}
f.vertices[container.id] = container
vertices := f.verticesAtLevel[container.level]
f.verticesAtLevel[container.level] = append(vertices, container)
}
return container
}
// VerifyVertex verifies that adding vertex `v` would yield a valid Levelled
// Forest. Specifically, we verify that _all_ of the following conditions are
// satisfied:
//
// 1. `v.Level()` must be strictly larger than the level that `v` reports
// for its parent (maintains an acyclic graph).
//
// 2. If a vertex with the same ID as `v.VertexID()` exists in the graph or is
// referenced by another vertex within the graph, the level must be
// identical. (In other words, we don't have vertices with the same ID but
// different level)
//
// 3. Let `ParentLevel`, `ParentID` denote the level, ID that `v` reports for
// its parent. If a vertex with `ParentID` exists (or is referenced by other
// vertices as their parent), we require that the respective level is
// identical to `ParentLevel`.
//
// Notes:
// - If `v.Level()` has already been pruned, adding it to the forest is a
// NoOp. Hence, any vertex with level below the pruning threshold
// automatically passes.
// - By design, the LevelledForest does _not_ touch the parent information for
// vertices that are on the lowest retained level. Thereby, it is possible
// to initialize the LevelledForest with a root vertex at the lowest
// retained level, without this root needing to have a parent. Furthermore,
// the root vertex can be at level 0 and in absence of a parent still
// satisfy the condition that any parent must be of lower level
// (mathematical principle of vacuous truth) without the implementation
// needing to worry about unsigned integer underflow.
//
// Error returns:
// - InvalidVertexError if the input vertex is invalid for insertion to the
// forest.
func (f *LevelledForest) VerifyVertex(v Vertex) error {
if v.Level() < f.LowestLevel {
return nil
}
storedContainer, haveVertexContainer := f.vertices[v.VertexID()]
if !haveVertexContainer { // have no vertex with same id stored
// the only thing remaining to check is the parent information
return f.ensureConsistentParent(v)
}
// Found a vertex container, i.e. `v` already exists, or it is referenced by
// some other vertex. In all cases, `v.Level()` should match the
// vertexContainer's information
if v.Level() != storedContainer.level {
return NewInvalidVertexErrorf(
v,
"level conflicts with stored vertex with same id (%d!=%d)",
v.Level(),
storedContainer.level,
)
}
// vertex container is empty, i.e. `v` is referenced by some other vertex as
// its parent:
if f.isEmptyContainer(storedContainer) {
// the only thing remaining to check is the parent information
return f.ensureConsistentParent(v)
}
// vertex container holds a vertex with the same ID as `v`:
// The parent information from vertexContainer has already been checked for
// consistency. So we simply compare with the existing vertex for
// inconsistencies
// the vertex is at or below the lowest retained level, so we can't check the
// parent (it's pruned)
if v.Level() == f.LowestLevel {
return nil
}
newParentId, newParentLevel := v.Parent()
storedParentId, storedParentLevel := storedContainer.vertex.Parent()
if newParentId != storedParentId {
return NewInvalidVertexErrorf(
v,
"parent ID conflicts with stored parent (%x!=%x)",
newParentId,
storedParentId,
)
}
if newParentLevel != storedParentLevel {
return NewInvalidVertexErrorf(
v,
"parent level conflicts with stored parent (%d!=%d)",
newParentLevel,
storedParentLevel,
)
}
// all _relevant_ fields identical
return nil
}
// ensureConsistentParent verifies that vertex.Parent() is consistent with
// current forest.
// Returns InvalidVertexError if:
// * there is a parent with the same ID but different level;
// * the parent's level is _not_ smaller than the vertex's level
func (f *LevelledForest) ensureConsistentParent(vertex Vertex) error {
if vertex.Level() <= f.LowestLevel {
// the vertex is at or below the lowest retained level, so we can't check
// the parent (it's pruned)
return nil
}
// verify parent
parentID, parentLevel := vertex.Parent()
if !(vertex.Level() > parentLevel) {
return NewInvalidVertexErrorf(
vertex,
"vertex parent level (%d) must be smaller than proposed vertex level (%d)",
parentLevel,
vertex.Level(),
)
}
storedParent, haveParentStored := f.GetVertex(parentID)
if !haveParentStored {
return nil
}
if storedParent.Level() != parentLevel {
return NewInvalidVertexErrorf(
vertex,
"parent level conflicts with stored parent (%d!=%d)",
parentLevel,
storedParent.Level(),
)
}
return nil
}

103
consensus/forest/vertex.go Normal file
View File

@ -0,0 +1,103 @@
package forest
import (
"errors"
"fmt"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
type Vertex interface {
// VertexID returns the vertex's ID (in most cases its hash)
VertexID() models.Identity
// Level returns the vertex's level
Level() uint64
// Parent returns the parent's (level, ID)
Parent() (models.Identity, uint64)
}
// VertexToString returns a string representation of the vertex.
func VertexToString(v Vertex) string {
parentID, parentLevel := v.Parent()
return fmt.Sprintf(
"<id=%x level=%d parent_id=%s parent_level=%d>",
v.VertexID(),
v.Level(),
parentID,
parentLevel,
)
}
// VertexIterator is a stateful iterator for VertexList.
// Internally operates directly on the Vertex Containers
// It has one-element look ahead for skipping empty vertex containers.
type VertexIterator struct {
data VertexList
idx int
next Vertex
}
func (it *VertexIterator) preLoad() {
for it.idx < len(it.data) {
v := it.data[it.idx].vertex
it.idx++
if v != nil {
it.next = v
return
}
}
it.next = nil
}
// NextVertex returns the next Vertex or nil if there is none
func (it *VertexIterator) NextVertex() Vertex {
res := it.next
it.preLoad()
return res
}
// HasNext returns true if and only if there is a next Vertex
func (it *VertexIterator) HasNext() bool {
return it.next != nil
}
func newVertexIterator(vertexList VertexList) VertexIterator {
it := VertexIterator{
data: vertexList,
}
it.preLoad()
return it
}
// InvalidVertexError indicates that a proposed vertex is invalid for insertion
// to the forest.
type InvalidVertexError struct {
// Vertex is the invalid vertex
Vertex Vertex
// msg provides additional context
msg string
}
func (err InvalidVertexError) Error() string {
return fmt.Sprintf(
"invalid vertex %s: %s",
VertexToString(err.Vertex),
err.msg,
)
}
func IsInvalidVertexError(err error) bool {
var target InvalidVertexError
return errors.As(err, &target)
}
func NewInvalidVertexErrorf(
vertex Vertex,
msg string,
args ...interface{},
) InvalidVertexError {
return InvalidVertexError{
Vertex: vertex,
msg: fmt.Sprintf(msg, args...),
}
}

657
consensus/forks/forks.go Normal file
View File

@ -0,0 +1,657 @@
package forks
import (
"fmt"
"source.quilibrium.com/quilibrium/monorepo/consensus"
"source.quilibrium.com/quilibrium/monorepo/consensus/forest"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// Forks enforces structural validity of the consensus state and implements
// finalization rules as defined in Jolteon consensus
// https://arxiv.org/abs/2106.10362 The same approach has later been adopted by
// the Diem team resulting in DiemBFT v4:
// https://developers.diem.com/papers/diem-consensus-state-machine-replication-in-the-diem-blockchain/2021-08-17.pdf
// Forks is NOT safe for concurrent use by multiple goroutines.
type Forks[StateT models.Unique, VoteT models.Unique] struct {
finalizationCallback consensus.Finalizer
notifier consensus.FollowerConsumer[StateT, VoteT]
forest forest.LevelledForest
trustedRoot *models.CertifiedState[StateT]
// finalityProof holds the latest finalized state including the certified
// child as proof of finality. CAUTION: is nil, when Forks has not yet
// finalized any states beyond the finalized root state it was initialized
// with
finalityProof *consensus.FinalityProof[StateT]
}
var _ consensus.Forks[*nilUnique] = (*Forks[*nilUnique, *nilUnique])(nil)
func NewForks[StateT models.Unique, VoteT models.Unique](
trustedRoot *models.CertifiedState[StateT],
finalizationCallback consensus.Finalizer,
notifier consensus.FollowerConsumer[StateT, VoteT],
) (*Forks[StateT, VoteT], error) {
if trustedRoot == nil {
return nil,
models.NewConfigurationErrorf("invalid root: root is nil")
}
if (trustedRoot.State.Identifier != trustedRoot.CertifyingQuorumCertificate.Identity()) ||
(trustedRoot.State.Rank != trustedRoot.CertifyingQuorumCertificate.GetRank()) {
return nil,
models.NewConfigurationErrorf(
"invalid root: root QC is not pointing to root state",
)
}
forks := Forks[StateT, VoteT]{
finalizationCallback: finalizationCallback,
notifier: notifier,
forest: *forest.NewLevelledForest(trustedRoot.State.Rank),
trustedRoot: trustedRoot,
finalityProof: nil,
}
// verify and add root state to levelled forest
err := forks.EnsureStateIsValidExtension(trustedRoot.State)
if err != nil {
return nil, fmt.Errorf(
"invalid root state %x: %w",
trustedRoot.Identifier(),
err,
)
}
forks.forest.AddVertex(ToStateContainer2[StateT](trustedRoot.State))
return &forks, nil
}
// FinalizedRank returns the largest rank number where a finalized state is
// known
func (f *Forks[StateT, VoteT]) FinalizedRank() uint64 {
if f.finalityProof == nil {
return f.trustedRoot.State.Rank
}
return f.finalityProof.State.Rank
}
// FinalizedState returns the finalized state with the largest rank number
func (f *Forks[StateT, VoteT]) FinalizedState() *models.State[StateT] {
if f.finalityProof == nil {
return f.trustedRoot.State
}
return f.finalityProof.State
}
// FinalityProof returns the latest finalized state and a certified child from
// the subsequent rank, which proves finality.
// CAUTION: method returns (nil, false), when Forks has not yet finalized any
// states beyond the finalized root state it was initialized with.
func (f *Forks[StateT, VoteT]) FinalityProof() (
*consensus.FinalityProof[StateT],
bool,
) {
return f.finalityProof, f.finalityProof != nil
}
// GetState returns (*models.State, true) if the state with the specified
// id was found and (nil, false) otherwise.
func (f *Forks[StateT, VoteT]) GetState(stateID models.Identity) (
*models.State[StateT],
bool,
) {
stateContainer, hasState := f.forest.GetVertex(stateID)
if !hasState {
return nil, false
}
return stateContainer.(*StateContainer[StateT]).GetState(), true
}
// GetStatesForRank returns all known states for the given rank
func (f *Forks[StateT, VoteT]) GetStatesForRank(
rank uint64,
) []*models.State[StateT] {
vertexIterator := f.forest.GetVerticesAtLevel(rank)
// in the vast majority of cases, there will only be one proposal for a
// particular rank
states := make([]*models.State[StateT], 0, 1)
for vertexIterator.HasNext() {
v := vertexIterator.NextVertex()
states = append(states, v.(*StateContainer[StateT]).GetState())
}
return states
}
// IsKnownState checks whether state is known.
func (f *Forks[StateT, VoteT]) IsKnownState(stateID models.Identity) bool {
_, hasState := f.forest.GetVertex(stateID)
return hasState
}
// IsProcessingNeeded determines whether the given state needs processing,
// based on the state's rank and hash.
// Returns false if any of the following conditions applies
// - state rank is _below_ the most recently finalized state
// - the state already exists in the consensus state
//
// UNVALIDATED: expects state to pass Forks.EnsureStateIsValidExtension(state)
func (f *Forks[StateT, VoteT]) IsProcessingNeeded(state *models.State[StateT]) bool {
if state.Rank < f.FinalizedRank() || f.IsKnownState(state.Identifier) {
return false
}
return true
}
// EnsureStateIsValidExtension checks that the given state is a valid extension
// to the tree of states already stored (no state modifications). Specifically,
// the following conditions are enforced, which are critical to the correctness
// of Forks:
//
// 1. If a state with the same ID is already stored, their ranks must be
// identical.
// 2. The state's rank must be strictly larger than the rank of its parent.
// 3. The parent must already be stored (or below the pruning height).
//
// Exclusions to these rules (by design):
// Let W denote the rank of state's parent (i.e. W := state.QC.Rank) and F the
// latest finalized rank.
//
// (i) If state.Rank < F, adding the state would be a no-op. Such states are
// considered compatible (principle of vacuous truth), i.e. we skip
// checking 1, 2, 3.
// (ii) If state.Rank == F, we do not inspect the QC / parent at all (skip 2
// and 3). This exception is important for compatability with genesis or
// spork-root states, which do not contain a QC.
// (iii) If state.Rank > F, but state.QC.Rank < F the parent has already been
// pruned. In this case, we omit rule 3. (principle of vacuous truth
// applied to the parent)
//
// We assume that all states are fully verified. A valid state must satisfy all
// consistency requirements; otherwise we have a bug in the compliance layer.
//
// Error returns:
// - models.MissingStateError if the parent of the input proposal does not
// exist in the forest (but is above the pruned rank). Represents violation
// of condition 3.
// - models.InvalidStateError if the state violates condition 1. or 2.
// - generic error in case of unexpected bug or internal state corruption
func (f *Forks[StateT, VoteT]) EnsureStateIsValidExtension(
state *models.State[StateT],
) error {
if state.Rank < f.forest.LowestLevel { // exclusion (i)
return nil
}
// LevelledForest enforces conditions 1. and 2. including the respective
// exclusions (ii) and (iii).
stateContainer := ToStateContainer2[StateT](state)
err := f.forest.VerifyVertex(stateContainer)
if err != nil {
if forest.IsInvalidVertexError(err) {
return models.NewInvalidStateErrorf(
state,
"not a valid vertex for state tree: %w",
err,
)
}
return fmt.Errorf(
"state tree generated unexpected error validating vertex: %w",
err,
)
}
// Condition 3:
// LevelledForest implements a more generalized algorithm that also works for
// disjoint graphs. Therefore, LevelledForest _not_ enforce condition 3. Here,
// we additionally require that the pending states form a tree (connected
// graph), i.e. we need to enforce condition 3
if (state.Rank == f.forest.LowestLevel) ||
(state.ParentQuorumCertificate.GetRank() < f.forest.LowestLevel) { // exclusion (ii) and (iii)
return nil
}
// For a state whose parent is _not_ below the pruning height, we expect the
// parent to be known.
_, isParentKnown := f.forest.GetVertex(
state.ParentQuorumCertificate.Identity(),
)
if !isParentKnown { // missing parent
return models.MissingStateError{
Rank: state.ParentQuorumCertificate.GetRank(),
Identifier: state.ParentQuorumCertificate.Identity(),
}
}
return nil
}
// AddCertifiedState[StateT] appends the given certified state to the tree of
// pending states and updates the latest finalized state (if finalization
// progressed). Unless the parent is below the pruning threshold (latest
// finalized rank), we require that the parent is already stored in Forks.
// Calling this method with previously processed states leaves the consensus
// state invariant (though, it will potentially cause some duplicate
// processing).
//
// Possible error returns:
// - models.MissingStateError if the parent does not exist in the forest (but
// is above the pruned rank). From the perspective of Forks, this error is
// benign (no-op).
// - models.InvalidStateError if the state is invalid (see
// `Forks.EnsureStateIsValidExtension` for details). From the perspective of
// Forks, this error is benign (no-op). However, we assume all states are
// fully verified, i.e. they should satisfy all consistency requirements.
// Hence, this error is likely an indicator of a bug in the compliance
// layer.
// - models.ByzantineThresholdExceededError if conflicting QCs or conflicting
// finalized states have been detected (violating a foundational consensus
// guarantees). This indicates that there are 1/3+ Byzantine nodes (weighted
// by seniority) in the network, breaking the safety guarantees of HotStuff
// (or there is a critical bug / data corruption). Forks cannot recover from
// this exception.
// - All other errors are potential symptoms of bugs or state corruption.
func (f *Forks[StateT, VoteT]) AddCertifiedState(
certifiedState *models.CertifiedState[StateT],
) error {
if !f.IsProcessingNeeded(certifiedState.State) {
return nil
}
// Check proposal for byzantine evidence, store it and emit
// `OnStateIncorporated` notification. Note: `checkForByzantineEvidence` only
// inspects the state, but _not_ its certifying QC. Hence, we have to
// additionally check here, whether the certifying QC conflicts with any known
// QCs.
err := f.checkForByzantineEvidence(certifiedState.State)
if err != nil {
return fmt.Errorf(
"cannot check for Byzantine evidence in certified state %x: %w",
certifiedState.State.Identifier,
err,
)
}
err = f.checkForConflictingQCs(&certifiedState.CertifyingQuorumCertificate)
if err != nil {
return fmt.Errorf(
"certifying QC for state %x failed check for conflicts: %w",
certifiedState.State.Identifier,
err,
)
}
f.forest.AddVertex(ToStateContainer2[StateT](certifiedState.State))
f.notifier.OnStateIncorporated(certifiedState.State)
// Update finality status:
err = f.checkForAdvancingFinalization(certifiedState)
if err != nil {
return fmt.Errorf("updating finalization failed: %w", err)
}
return nil
}
// AddValidatedState appends the validated state to the tree of pending
// states and updates the latest finalized state (if applicable). Unless the
// parent is below the pruning threshold (latest finalized rank), we require
// that the parent is already stored in Forks. Calling this method with
// previously processed states leaves the consensus state invariant (though, it
// will potentially cause some duplicate processing).
// Notes:
// - Method `AddCertifiedState[StateT](..)` should be used preferably, if a QC
// certifying `state` is already known. This is generally the case for the
// consensus follower. Method `AddValidatedState` is intended for active
// consensus participants, which fully validate states (incl. payload), i.e.
// QCs are processed as part of validated proposals.
//
// Possible error returns:
// - models.MissingStateError if the parent does not exist in the forest (but
// is above the pruned rank). From the perspective of Forks, this error is
// benign (no-op).
// - models.InvalidStateError if the state is invalid (see
// `Forks.EnsureStateIsValidExtension` for details). From the perspective of
// Forks, this error is benign (no-op). However, we assume all states are
// fully verified, i.e. they should satisfy all consistency requirements.
// Hence, this error is likely an indicator of a bug in the compliance
// layer.
// - models.ByzantineThresholdExceededError if conflicting QCs or conflicting
// finalized states have been detected (violating a foundational consensus
// guarantees). This indicates that there are 1/3+ Byzantine nodes (weighted
// by seniority) in the network, breaking the safety guarantees of HotStuff
// (or there is a critical bug / data corruption). Forks cannot recover from
// this exception.
// - All other errors are potential symptoms of bugs or state corruption.
func (f *Forks[StateT, VoteT]) AddValidatedState(
proposal *models.State[StateT],
) error {
if !f.IsProcessingNeeded(proposal) {
return nil
}
// Check proposal for byzantine evidence, store it and emit
// `OnStateIncorporated` notification:
err := f.checkForByzantineEvidence(proposal)
if err != nil {
return fmt.Errorf(
"cannot check Byzantine evidence for state %x: %w",
proposal.Identifier,
err,
)
}
f.forest.AddVertex(ToStateContainer2[StateT](proposal))
f.notifier.OnStateIncorporated(proposal)
// Update finality status: In the implementation, our notion of finality is
// based on certified states.
// The certified parent essentially combines the parent, with the QC contained
// in state, to drive finalization.
parent, found := f.GetState(proposal.ParentQuorumCertificate.Identity())
if !found {
// Not finding the parent means it is already pruned; hence this state does
// not change the finalization state.
return nil
}
certifiedParent, err := models.NewCertifiedState[StateT](
parent,
proposal.ParentQuorumCertificate,
)
if err != nil {
return fmt.Errorf(
"mismatching QC with parent (corrupted Forks state):%w",
err,
)
}
err = f.checkForAdvancingFinalization(certifiedParent)
if err != nil {
return fmt.Errorf("updating finalization failed: %w", err)
}
return nil
}
// checkForByzantineEvidence inspects whether the given `state` together with
// the already known information yields evidence of byzantine behaviour.
// Furthermore, the method enforces that `state` is a valid extension of the
// tree of pending states. If the state is a double proposal, we emit an
// `OnStateIncorporated` notification. Though, provided the state is a valid
// extension of the state tree by itself, it passes this method without an
// error.
//
// Possible error returns:
// - models.MissingStateError if the parent does not exist in the forest (but
// is above the pruned rank). From the perspective of Forks, this error is
// benign (no-op).
// - models.InvalidStateError if the state is invalid (see
// `Forks.EnsureStateIsValidExtension` for details). From the perspective of
// Forks, this error is benign (no-op). However, we assume all states are
// fully verified, i.e. they should satisfy all consistency requirements.
// Hence, this error is likely an indicator of a bug in the compliance
// layer.
// - models.ByzantineThresholdExceededError if conflicting QCs have been
// detected. Forks cannot recover from this exception.
// - All other errors are potential symptoms of bugs or state corruption.
func (f *Forks[StateT, VoteT]) checkForByzantineEvidence(
state *models.State[StateT],
) error {
err := f.EnsureStateIsValidExtension(state)
if err != nil {
return fmt.Errorf("consistency check on state failed: %w", err)
}
err = f.checkForConflictingQCs(&state.ParentQuorumCertificate)
if err != nil {
return fmt.Errorf("checking QC for conflicts failed: %w", err)
}
f.checkForDoubleProposal(state)
return nil
}
// checkForConflictingQCs checks if QC conflicts with a stored Quorum
// Certificate. In case a conflicting QC is found, an
// ByzantineThresholdExceededError is returned. Two Quorum Certificates q1 and
// q2 are defined as conflicting iff:
//
// q1.Rank == q2.Rank AND q1.Identifier ≠ q2.Identifier
//
// This means there are two Quorums for conflicting states at the same rank.
// Per 'Observation 1' from the Jolteon paper https://arxiv.org/pdf/2106.10362v1.pdf,
// two conflicting QCs can exist if and only if the Byzantine threshold is
// exceeded.
// Error returns:
// - models.ByzantineThresholdExceededError if conflicting QCs have been
// detected. Forks cannot recover from this exception.
// - All other errors are potential symptoms of bugs or state corruption.
func (f *Forks[StateT, VoteT]) checkForConflictingQCs(
qc *models.QuorumCertificate,
) error {
it := f.forest.GetVerticesAtLevel((*qc).GetRank())
for it.HasNext() {
otherState := it.NextVertex() // by construction, must have same rank as qc.Rank
if (*qc).Identity() != otherState.VertexID() {
// * we have just found another state at the same rank number as qc.Rank
// but with different hash
// * if this state has a child c, this child will have
// c.qc.rank = parentRank
// c.qc.ID != parentIdentifier
// => conflicting qc
otherChildren := f.forest.GetChildren(otherState.VertexID())
if otherChildren.HasNext() {
otherChild := otherChildren.NextVertex().(*StateContainer[StateT]).GetState()
conflictingQC := otherChild.ParentQuorumCertificate
return models.ByzantineThresholdExceededError{Evidence: fmt.Sprintf(
"conflicting QCs at rank %d: %x and %x",
(*qc).GetRank(), (*qc).Identity(), conflictingQC.Identity(),
)}
}
}
}
return nil
}
// checkForDoubleProposal checks if the input proposal is a double proposal.
// A double proposal occurs when two proposals with the same rank exist in
// Forks. If there is a double proposal, notifier.OnDoubleProposeDetected is
// triggered.
func (f *Forks[StateT, VoteT]) checkForDoubleProposal(
state *models.State[StateT],
) {
it := f.forest.GetVerticesAtLevel(state.Rank)
for it.HasNext() {
otherVertex := it.NextVertex() // by construction, must have same rank as state
otherState := otherVertex.(*StateContainer[StateT]).GetState()
if state.Identifier != otherState.Identifier {
f.notifier.OnDoubleProposeDetected(state, otherState)
}
}
}
// checkForAdvancingFinalization checks whether observing certifiedState leads
// to progress of finalization. This function should be called every time a new
// state is added to Forks. If the new state is the head of a 2-chain satisfying
// the finalization rule, we update `Forks.finalityProof` to the new latest
// finalized state. Calling this method with previously-processed states leaves
// the consensus state invariant.
// UNVALIDATED: assumes that relevant state properties are consistent with
// previous states
// Error returns:
// - models.MissingStateError if the parent does not exist in the forest (but
// is above the pruned rank). From the perspective of Forks, this error is
// benign (no-op).
// - models.ByzantineThresholdExceededError in case we detect a finalization
// fork (violating a foundational consensus guarantee). This indicates that
// there are 1/3+ Byzantine nodes (weighted by seniority) in the network,
// breaking the safety guarantees of HotStuff (or there is a critical bug /
// data corruption). Forks cannot recover from this exception.
// - generic error in case of unexpected bug or internal state corruption
func (f *Forks[StateT, VoteT]) checkForAdvancingFinalization(
certifiedState *models.CertifiedState[StateT],
) error {
// We prune all states in forest which are below the most recently finalized
// state. Hence, we have a pruned ancestry if and only if either of the
// following conditions applies:
// (a) If a state's parent rank (i.e. state.QC.Rank) is below the most
// recently finalized state.
// (b) If a state's rank is equal to the most recently finalized state.
// Caution:
// * Under normal operation, case (b) is covered by the logic for case (a)
// * However, the existence of a genesis state requires handling case (b)
// explicitly:
// The root state is specified and trusted by the node operator. If the root
// state is the genesis state, it might not contain a QC pointing to a
// parent (as there is no parent). In this case, condition (a) cannot be
// evaluated.
lastFinalizedRank := f.FinalizedRank()
if (certifiedState.Rank() <= lastFinalizedRank) ||
(certifiedState.State.ParentQuorumCertificate.GetRank() < lastFinalizedRank) {
// Repeated states are expected during normal operations. We enter this code
// state if and only if the parent's rank is _below_ the last finalized
// state. It is straight forward to show:
// Lemma: Let B be a state whose 2-chain reaches beyond the last finalized
// state => B will not update the locked or finalized state
return nil
}
// retrieve parent; always expected to succeed, because we passed the checks
// above
qcForParent := certifiedState.State.ParentQuorumCertificate
parentVertex, parentStateKnown := f.forest.GetVertex(
qcForParent.Identity(),
)
if !parentStateKnown {
return models.MissingStateError{
Rank: qcForParent.GetRank(),
Identifier: qcForParent.Identity(),
}
}
parentState := parentVertex.(*StateContainer[StateT]).GetState()
// Note: we assume that all stored states pass
// Forks.EnsureStateIsValidExtension(state); specifically, that state's
// RankNumber is strictly monotonically increasing which is enforced by
// LevelledForest.VerifyVertex(...)
// We denote:
// * a DIRECT 1-chain as '<-'
// * a general 1-chain as '<~' (direct or indirect)
// Jolteon's rule for finalizing `parentState` is
// parentState <- State <~ certifyingQC (i.e. a DIRECT 1-chain PLUS
// ╰─────────────────────╯ any 1-chain)
// certifiedState
// Hence, we can finalize `parentState` as head of a 2-chain,
// if and only if `State.Rank` is exactly 1 higher than the rank of
// `parentState`
if parentState.Rank+1 != certifiedState.Rank() {
return nil
}
// `parentState` is now finalized:
// * While Forks is single-threaded, there is still the possibility of
// reentrancy. Specifically, the consumers of our finalization events are
// served by the goroutine executing Forks. It is conceivable that a
// consumer might access Forks and query the latest finalization proof.
// This would be legal, if the component supplying the goroutine to Forks
// also consumes the notifications.
// * Therefore, for API safety, we want to first update Fork's
// `finalityProof` before we emit any notifications.
// Advancing finalization step (i): we collect all states for finalization (no
// notifications are emitted)
statesToBeFinalized, err := f.collectStatesForFinalization(&qcForParent)
if err != nil {
return fmt.Errorf(
"advancing finalization to state %x from rank %d failed: %w",
qcForParent.Identity(),
qcForParent.GetRank(),
err,
)
}
// Advancing finalization step (ii): update `finalityProof` and prune
// `LevelledForest`
f.finalityProof = &consensus.FinalityProof[StateT]{
State: parentState,
CertifiedChild: certifiedState,
}
err = f.forest.PruneUpToLevel(f.FinalizedRank())
if err != nil {
return fmt.Errorf("pruning levelled forest failed unexpectedly: %w", err)
}
// Advancing finalization step (iii): iterate over the states from (i) and
// emit finalization events
for _, b := range statesToBeFinalized {
// first notify other critical components about finalized state - all errors
// returned here are fatal exceptions
err = f.finalizationCallback.MakeFinal(b.Identifier)
if err != nil {
return fmt.Errorf("finalization error in other component: %w", err)
}
// notify less important components about finalized state
f.notifier.OnFinalizedState(b)
}
return nil
}
// collectStatesForFinalization collects and returns all newly finalized states
// up to (and including) the state pointed to by `qc`. The states are listed in
// order of increasing height.
// Error returns:
// - models.ByzantineThresholdExceededError in case we detect a finalization
// fork (violating a foundational consensus guarantee). This indicates that
// there are 1/3+ Byzantine nodes (weighted by seniority) in the network,
// breaking the safety guarantees of HotStuff (or there is a critical bug /
// data corruption). Forks cannot recover from this exception.
// - generic error in case of bug or internal state corruption
func (f *Forks[StateT, VoteT]) collectStatesForFinalization(
qc *models.QuorumCertificate,
) ([]*models.State[StateT], error) {
lastFinalized := f.FinalizedState()
if (*qc).GetRank() < lastFinalized.Rank {
return nil, models.ByzantineThresholdExceededError{Evidence: fmt.Sprintf(
"finalizing state with rank %d which is lower than previously finalized state at rank %d",
(*qc).GetRank(), lastFinalized.Rank,
)}
}
if (*qc).GetRank() == lastFinalized.Rank { // no new states to be finalized
return nil, nil
}
// Collect all states that are pending finalization in slice. While we crawl
// the states starting from the newest finalized state backwards (decreasing
// ranks), we would like to return them in order of _increasing_ rank.
// Therefore, we fill the slice starting with the highest index.
l := (*qc).GetRank() - lastFinalized.Rank // l is an upper limit to the number of states that can be maximally finalized
statesToBeFinalized := make([]*models.State[StateT], l)
for (*qc).GetRank() > lastFinalized.Rank {
b, ok := f.GetState((*qc).Identity())
if !ok {
return nil, fmt.Errorf(
"failed to get state (rank=%d, stateID=%x) for finalization",
(*qc).GetRank(),
(*qc).Identity(),
)
}
l--
statesToBeFinalized[l] = b
qc = &b.ParentQuorumCertificate // move to parent
}
// Now, `l` is the index where we stored the oldest state that should be
// finalized. Note that `l` might be larger than zero, if some ranks have no
// finalized states. Hence, `statesToBeFinalized` might start with nil
// entries, which we remove:
statesToBeFinalized = statesToBeFinalized[l:]
// qc should now point to the latest finalized state. Otherwise, the
// consensus committee is compromised (or we have a critical internal bug).
if (*qc).GetRank() < lastFinalized.Rank {
return nil, models.ByzantineThresholdExceededError{Evidence: fmt.Sprintf(
"finalizing state with rank %d which is lower than previously finalized state at rank %d",
(*qc).GetRank(), lastFinalized.Rank,
)}
}
if (*qc).GetRank() == lastFinalized.Rank &&
lastFinalized.Identifier != (*qc).Identity() {
return nil, models.ByzantineThresholdExceededError{Evidence: fmt.Sprintf(
"finalizing states with rank %d at conflicting forks: %x and %x",
(*qc).GetRank(), (*qc).Identity(), lastFinalized.Identifier,
)}
}
return statesToBeFinalized, nil
}

View File

@ -0,0 +1,950 @@
package forks
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"source.quilibrium.com/quilibrium/monorepo/consensus"
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
"source.quilibrium.com/quilibrium/monorepo/consensus/mocks"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
/*****************************************************************************
* NOTATION: *
* A state is denoted as [(<qc_number>) <state_rank_number>]. *
* For example, [(1) 2] means: a state of rank 2 that has a QC for rank 1. *
*****************************************************************************/
// TestInitialization verifies that at initialization, Forks reports:
// - the root / genesis state as finalized
// - it has no finalization proof for the root / genesis state (state and its finalization is trusted)
func TestInitialization(t *testing.T) {
forks, _ := newForks(t)
requireOnlyGenesisStateFinalized(t, forks)
_, hasProof := forks.FinalityProof()
require.False(t, hasProof)
}
// TestFinalize_Direct1Chain tests adding a direct 1-chain on top of the genesis state:
// - receives [◄(1) 2] [◄(2) 5]
//
// Expected behaviour:
// - On the one hand, Forks should not finalize any _additional_ states, because there is
// no finalizable 2-chain for [◄(1) 2]. Hence, finalization no events should be emitted.
// - On the other hand, after adding the two states, Forks has enough knowledge to construct
// a FinalityProof for the genesis state.
func TestFinalize_Direct1Chain(t *testing.T) {
builder := NewStateBuilder().
Add(1, 2).
Add(2, 3)
states, err := builder.States()
require.NoError(t, err)
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
forks, _ := newForks(t)
// adding state [◄(1) 2] should not finalize anything
// as the genesis state is trusted, there should be no FinalityProof available for it
require.NoError(t, forks.AddValidatedState(states[0]))
requireOnlyGenesisStateFinalized(t, forks)
_, hasProof := forks.FinalityProof()
require.False(t, hasProof)
// After adding state [◄(2) 3], Forks has enough knowledge to construct a FinalityProof for the
// genesis state. However, finalization remains at the genesis state, so no events should be emitted.
expectedFinalityProof := makeFinalityProof(t, builder.GenesisState().State, states[0], states[1].ParentQuorumCertificate)
require.NoError(t, forks.AddValidatedState(states[1]))
requireLatestFinalizedState(t, forks, builder.GenesisState().State)
requireFinalityProof(t, forks, expectedFinalityProof)
})
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
forks, _ := newForks(t)
// After adding CertifiedState [◄(1) 2] ◄(2), Forks has enough knowledge to construct a FinalityProof for
// the genesis state. However, finalization remains at the genesis state, so no events should be emitted.
expectedFinalityProof := makeFinalityProof(t, builder.GenesisState().State, states[0], states[1].ParentQuorumCertificate)
c, err := models.NewCertifiedState(states[0], states[1].ParentQuorumCertificate)
require.NoError(t, err)
require.NoError(t, forks.AddCertifiedState(c))
requireLatestFinalizedState(t, forks, builder.GenesisState().State)
requireFinalityProof(t, forks, expectedFinalityProof)
})
}
// TestFinalize_Direct2Chain tests adding a direct 1-chain on a direct 1-chain (direct 2-chain).
// - receives [◄(1) 2] [◄(2) 3] [◄(3) 4]
// - Forks should finalize [◄(1) 2]
func TestFinalize_Direct2Chain(t *testing.T) {
states, err := NewStateBuilder().
Add(1, 2).
Add(2, 3).
Add(3, 4).
States()
require.NoError(t, err)
expectedFinalityProof := makeFinalityProof(t, states[0], states[1], states[2].ParentQuorumCertificate)
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
forks, _ := newForks(t)
require.Nil(t, addValidatedStateToForks(forks, states))
requireLatestFinalizedState(t, forks, states[0])
requireFinalityProof(t, forks, expectedFinalityProof)
})
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
forks, _ := newForks(t)
require.Nil(t, addCertifiedStatesToForks(forks, states))
requireLatestFinalizedState(t, forks, states[0])
requireFinalityProof(t, forks, expectedFinalityProof)
})
}
// TestFinalize_DirectIndirect2Chain tests adding an indirect 1-chain on a direct 1-chain.
// receives [◄(1) 2] [◄(2) 3] [◄(3) 5]
// it should finalize [◄(1) 2]
func TestFinalize_DirectIndirect2Chain(t *testing.T) {
states, err := NewStateBuilder().
Add(1, 2).
Add(2, 3).
Add(3, 5).
States()
require.NoError(t, err)
expectedFinalityProof := makeFinalityProof(t, states[0], states[1], states[2].ParentQuorumCertificate)
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
forks, _ := newForks(t)
require.Nil(t, addValidatedStateToForks(forks, states))
requireLatestFinalizedState(t, forks, states[0])
requireFinalityProof(t, forks, expectedFinalityProof)
})
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
forks, _ := newForks(t)
require.Nil(t, addCertifiedStatesToForks(forks, states))
requireLatestFinalizedState(t, forks, states[0])
requireFinalityProof(t, forks, expectedFinalityProof)
})
}
// TestFinalize_IndirectDirect2Chain tests adding a direct 1-chain on an indirect 1-chain.
// - Forks receives [◄(1) 3] [◄(3) 5] [◄(7) 7]
// - it should not finalize any states because there is no finalizable 2-chain.
func TestFinalize_IndirectDirect2Chain(t *testing.T) {
states, err := NewStateBuilder().
Add(1, 3).
Add(3, 5).
Add(5, 7).
States()
require.NoError(t, err)
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
forks, _ := newForks(t)
require.Nil(t, addValidatedStateToForks(forks, states))
requireOnlyGenesisStateFinalized(t, forks)
_, hasProof := forks.FinalityProof()
require.False(t, hasProof)
})
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
forks, _ := newForks(t)
require.Nil(t, addCertifiedStatesToForks(forks, states))
requireOnlyGenesisStateFinalized(t, forks)
_, hasProof := forks.FinalityProof()
require.False(t, hasProof)
})
}
// TestFinalize_Direct2ChainOnIndirect tests adding a direct 2-chain on an indirect 2-chain:
// - ingesting [◄(1) 3] [◄(3) 5] [◄(5) 6] [◄(6) 7] [◄(7) 8]
// - should result in finalization of [◄(5) 6]
func TestFinalize_Direct2ChainOnIndirect(t *testing.T) {
states, err := NewStateBuilder().
Add(1, 3).
Add(3, 5).
Add(5, 6).
Add(6, 7).
Add(7, 8).
States()
require.NoError(t, err)
expectedFinalityProof := makeFinalityProof(t, states[2], states[3], states[4].ParentQuorumCertificate)
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
forks, _ := newForks(t)
require.Nil(t, addValidatedStateToForks(forks, states))
requireLatestFinalizedState(t, forks, states[2])
requireFinalityProof(t, forks, expectedFinalityProof)
})
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
forks, _ := newForks(t)
require.Nil(t, addCertifiedStatesToForks(forks, states))
requireLatestFinalizedState(t, forks, states[2])
requireFinalityProof(t, forks, expectedFinalityProof)
})
}
// TestFinalize_Direct2ChainOnDirect tests adding a sequence of direct 2-chains:
// - ingesting [◄(1) 2] [◄(2) 3] [◄(3) 4] [◄(4) 5] [◄(5) 6]
// - should result in finalization of [◄(3) 4]
func TestFinalize_Direct2ChainOnDirect(t *testing.T) {
states, err := NewStateBuilder().
Add(1, 2).
Add(2, 3).
Add(3, 4).
Add(4, 5).
Add(5, 6).
States()
require.NoError(t, err)
expectedFinalityProof := makeFinalityProof(t, states[2], states[3], states[4].ParentQuorumCertificate)
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
forks, _ := newForks(t)
require.Nil(t, addValidatedStateToForks(forks, states))
requireLatestFinalizedState(t, forks, states[2])
requireFinalityProof(t, forks, expectedFinalityProof)
})
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
forks, _ := newForks(t)
require.Nil(t, addCertifiedStatesToForks(forks, states))
requireLatestFinalizedState(t, forks, states[2])
requireFinalityProof(t, forks, expectedFinalityProof)
})
}
// TestFinalize_Multiple2Chains tests the case where a state can be finalized by different 2-chains.
// - ingesting [◄(1) 2] [◄(2) 3] [◄(3) 5] [◄(3) 6] [◄(3) 7]
// - should result in finalization of [◄(1) 2]
func TestFinalize_Multiple2Chains(t *testing.T) {
states, err := NewStateBuilder().
Add(1, 2).
Add(2, 3).
Add(3, 5).
Add(3, 6).
Add(3, 7).
States()
require.NoError(t, err)
expectedFinalityProof := makeFinalityProof(t, states[0], states[1], states[2].ParentQuorumCertificate)
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
forks, _ := newForks(t)
require.Nil(t, addValidatedStateToForks(forks, states))
requireLatestFinalizedState(t, forks, states[0])
requireFinalityProof(t, forks, expectedFinalityProof)
})
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
forks, _ := newForks(t)
require.Nil(t, addCertifiedStatesToForks(forks, states))
requireLatestFinalizedState(t, forks, states[0])
requireFinalityProof(t, forks, expectedFinalityProof)
})
}
// TestFinalize_OrphanedFork tests that we can finalize a state which causes a conflicting fork to be orphaned.
// We ingest the following state tree:
//
// [◄(1) 2] [◄(2) 3]
// [◄(2) 4] [◄(4) 5] [◄(5) 6]
//
// which should result in finalization of [◄(2) 4] and pruning of [◄(2) 3]
func TestFinalize_OrphanedFork(t *testing.T) {
states, err := NewStateBuilder().
Add(1, 2). // [◄(1) 2]
Add(2, 3). // [◄(2) 3], should eventually be pruned
Add(2, 4). // [◄(2) 4], should eventually be finalized
Add(4, 5). // [◄(4) 5]
Add(5, 6). // [◄(5) 6]
States()
require.NoError(t, err)
expectedFinalityProof := makeFinalityProof(t, states[2], states[3], states[4].ParentQuorumCertificate)
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
forks, _ := newForks(t)
require.Nil(t, addValidatedStateToForks(forks, states))
require.False(t, forks.IsKnownState(states[1].Identifier))
requireLatestFinalizedState(t, forks, states[2])
requireFinalityProof(t, forks, expectedFinalityProof)
})
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
forks, _ := newForks(t)
require.Nil(t, addCertifiedStatesToForks(forks, states))
require.False(t, forks.IsKnownState(states[1].Identifier))
requireLatestFinalizedState(t, forks, states[2])
requireFinalityProof(t, forks, expectedFinalityProof)
})
}
// TestDuplication tests that delivering the same state/qc multiple times has
// the same end state as delivering the state/qc once.
// - Forks receives [◄(1) 2] [◄(2) 3] [◄(2) 3] [◄(3) 4] [◄(3) 4] [◄(4) 5] [◄(4) 5]
// - it should finalize [◄(2) 3]
func TestDuplication(t *testing.T) {
states, err := NewStateBuilder().
Add(1, 2).
Add(2, 3).
Add(2, 3).
Add(3, 4).
Add(3, 4).
Add(4, 5).
Add(4, 5).
States()
require.NoError(t, err)
expectedFinalityProof := makeFinalityProof(t, states[1], states[3], states[5].ParentQuorumCertificate)
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
forks, _ := newForks(t)
require.Nil(t, addValidatedStateToForks(forks, states))
requireLatestFinalizedState(t, forks, states[1])
requireFinalityProof(t, forks, expectedFinalityProof)
})
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
forks, _ := newForks(t)
require.Nil(t, addCertifiedStatesToForks(forks, states))
requireLatestFinalizedState(t, forks, states[1])
requireFinalityProof(t, forks, expectedFinalityProof)
})
}
// TestIgnoreStatesBelowFinalizedRank tests that states below finalized rank are ignored.
// - Forks receives [◄(1) 2] [◄(2) 3] [◄(3) 4] [◄(1) 5]
// - it should finalize [◄(1) 2]
func TestIgnoreStatesBelowFinalizedRank(t *testing.T) {
builder := NewStateBuilder().
Add(1, 2). // [◄(1) 2]
Add(2, 3). // [◄(2) 3]
Add(3, 4). // [◄(3) 4]
Add(1, 5) // [◄(1) 5]
states, err := builder.States()
require.NoError(t, err)
expectedFinalityProof := makeFinalityProof(t, states[0], states[1], states[2].ParentQuorumCertificate)
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
// initialize forks and add first 3 states:
// * state [◄(1) 2] should then be finalized
// * and state [1] should be pruned
forks, _ := newForks(t)
require.Nil(t, addValidatedStateToForks(forks, states[:3]))
// sanity checks to confirm correct test setup
requireLatestFinalizedState(t, forks, states[0])
requireFinalityProof(t, forks, expectedFinalityProof)
require.False(t, forks.IsKnownState(builder.GenesisState().Identifier()))
// adding state [◄(1) 5]: note that QC is _below_ the pruning threshold, i.e. cannot resolve the parent
// * Forks should store state, despite the parent already being pruned
// * finalization should not change
orphanedState := states[3]
require.Nil(t, forks.AddValidatedState(orphanedState))
require.True(t, forks.IsKnownState(orphanedState.Identifier))
requireLatestFinalizedState(t, forks, states[0])
requireFinalityProof(t, forks, expectedFinalityProof)
})
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
// initialize forks and add first 3 states:
// * state [◄(1) 2] should then be finalized
// * and state [1] should be pruned
forks, _ := newForks(t)
require.Nil(t, addCertifiedStatesToForks(forks, states[:3]))
// sanity checks to confirm correct test setup
requireLatestFinalizedState(t, forks, states[0])
requireFinalityProof(t, forks, expectedFinalityProof)
require.False(t, forks.IsKnownState(builder.GenesisState().Identifier()))
// adding state [◄(1) 5]: note that QC is _below_ the pruning threshold, i.e. cannot resolve the parent
// * Forks should store state, despite the parent already being pruned
// * finalization should not change
certStateWithUnknownParent := toCertifiedState(t, states[3])
require.Nil(t, forks.AddCertifiedState(certStateWithUnknownParent))
require.True(t, forks.IsKnownState(certStateWithUnknownParent.State.Identifier))
requireLatestFinalizedState(t, forks, states[0])
requireFinalityProof(t, forks, expectedFinalityProof)
})
}
// TestDoubleProposal tests that the DoubleProposal notification is emitted when two different
// states for the same rank are added. We ingest the following state tree:
//
// / [◄(1) 2]
// [1]
// \ [◄(1) 2']
//
// which should result in a DoubleProposal event referencing the states [◄(1) 2] and [◄(1) 2']
func TestDoubleProposal(t *testing.T) {
states, err := NewStateBuilder().
Add(1, 2). // [◄(1) 2]
AddVersioned(1, 2, 0, 1). // [◄(1) 2']
States()
require.NoError(t, err)
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
forks, notifier := newForks(t)
notifier.On("OnDoubleProposeDetected", states[1], states[0]).Once()
err = addValidatedStateToForks(forks, states)
require.NoError(t, err)
})
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
forks, notifier := newForks(t)
notifier.On("OnDoubleProposeDetected", states[1], states[0]).Once()
err = forks.AddCertifiedState(toCertifiedState(t, states[0])) // add [◄(1) 2] as certified state
require.NoError(t, err)
err = forks.AddCertifiedState(toCertifiedState(t, states[1])) // add [◄(1) 2'] as certified state
require.NoError(t, err)
})
}
// TestConflictingQCs checks that adding 2 conflicting QCs should return models.ByzantineThresholdExceededError
// We ingest the following state tree:
//
// [◄(1) 2] [◄(2) 3] [◄(3) 4] [◄(4) 6]
// [◄(2) 3'] [◄(3') 5]
//
// which should result in a `ByzantineThresholdExceededError`, because conflicting states 3 and 3' both have QCs
func TestConflictingQCs(t *testing.T) {
states, err := NewStateBuilder().
Add(1, 2). // [◄(1) 2]
Add(2, 3). // [◄(2) 3]
AddVersioned(2, 3, 0, 1). // [◄(2) 3']
Add(3, 4). // [◄(3) 4]
Add(4, 6). // [◄(4) 6]
AddVersioned(3, 5, 1, 0). // [◄(3') 5]
States()
require.NoError(t, err)
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
forks, notifier := newForks(t)
notifier.On("OnDoubleProposeDetected", states[2], states[1]).Return(nil)
err = addValidatedStateToForks(forks, states)
assert.True(t, models.IsByzantineThresholdExceededError(err))
})
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
forks, notifier := newForks(t)
notifier.On("OnDoubleProposeDetected", states[2], states[1]).Return(nil)
// As [◄(3') 5] is not certified, it will not be added to Forks. However, its QC ◄(3') is
// delivered to Forks as part of the *certified* state [◄(2) 3'].
err = addCertifiedStatesToForks(forks, states)
assert.True(t, models.IsByzantineThresholdExceededError(err))
})
}
// TestConflictingFinalizedForks checks that finalizing 2 conflicting forks should return models.ByzantineThresholdExceededError
// We ingest the following state tree:
//
// [◄(1) 2] [◄(2) 3] [◄(3) 4] [◄(4) 5]
// [◄(2) 6] [◄(6) 7] [◄(7) 8]
//
// Here, both states [◄(2) 3] and [◄(2) 6] satisfy the finalization condition, i.e. we have a fork
// in the finalized states, which should result in a models.ByzantineThresholdExceededError exception.
func TestConflictingFinalizedForks(t *testing.T) {
states, err := NewStateBuilder().
Add(1, 2).
Add(2, 3).
Add(3, 4).
Add(4, 5). // finalizes [◄(2) 3]
Add(2, 6).
Add(6, 7).
Add(7, 8). // finalizes [◄(2) 6], conflicting with conflicts with [◄(2) 3]
States()
require.NoError(t, err)
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
forks, _ := newForks(t)
err = addValidatedStateToForks(forks, states)
assert.True(t, models.IsByzantineThresholdExceededError(err))
})
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
forks, _ := newForks(t)
err = addCertifiedStatesToForks(forks, states)
assert.True(t, models.IsByzantineThresholdExceededError(err))
})
}
// TestAddDisconnectedState checks that adding a state which does not connect to the
// latest finalized state returns a `models.MissingStateError`
// - receives [◄(2) 3]
// - should return `models.MissingStateError`, because the parent is above the pruning
// threshold, but Forks does not know its parent
func TestAddDisconnectedState(t *testing.T) {
states, err := NewStateBuilder().
Add(1, 2). // we will skip this state [◄(1) 2]
Add(2, 3). // [◄(2) 3]
States()
require.NoError(t, err)
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
forks, _ := newForks(t)
err := forks.AddValidatedState(states[1])
require.Error(t, err)
assert.True(t, models.IsMissingStateError(err))
})
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
forks, _ := newForks(t)
err := forks.AddCertifiedState(toCertifiedState(t, states[1]))
require.Error(t, err)
assert.True(t, models.IsMissingStateError(err))
})
}
// TestGetState tests that we can retrieve stored states. Here, we test that
// attempting to retrieve nonexistent or pruned states fails without causing an exception.
// - Forks receives [◄(1) 2] [◄(2) 3] [◄(3) 4], then [◄(4) 5]
// - should finalize [◄(1) 2], then [◄(2) 3]
func TestGetState(t *testing.T) {
states, err := NewStateBuilder().
Add(1, 2). // [◄(1) 2]
Add(2, 3). // [◄(2) 3]
Add(3, 4). // [◄(3) 4]
Add(4, 5). // [◄(4) 5]
States()
require.NoError(t, err)
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
statesAddedFirst := states[:3] // [◄(1) 2] [◄(2) 3] [◄(3) 4]
remainingState := states[3] // [◄(4) 5]
forks, _ := newForks(t)
// should be unable to retrieve a state before it is added
_, ok := forks.GetState(states[0].Identifier)
assert.False(t, ok)
// add first 3 states - should finalize [◄(1) 2]
err = addValidatedStateToForks(forks, statesAddedFirst)
require.NoError(t, err)
// should be able to retrieve all stored states
for _, state := range statesAddedFirst {
b, ok := forks.GetState(state.Identifier)
assert.True(t, ok)
assert.Equal(t, state, b)
}
// add remaining state [◄(4) 5] - should finalize [◄(2) 3] and prune [◄(1) 2]
require.Nil(t, forks.AddValidatedState(remainingState))
// should be able to retrieve just added state
b, ok := forks.GetState(remainingState.Identifier)
assert.True(t, ok)
assert.Equal(t, remainingState, b)
// should be unable to retrieve pruned state
_, ok = forks.GetState(statesAddedFirst[0].Identifier)
assert.False(t, ok)
})
// Caution: finalization is driven by QCs. Therefore, we include the QC for state 3
// in the first batch of states that we add. This is analogous to previous test case,
// except that we are delivering the QC ◄(3) as part of the certified state of rank 2
// [◄(2) 3] ◄(3)
// while in the previous sub-test, the QC ◄(3) was delivered as part of state [◄(3) 4]
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
statesAddedFirst := toCertifiedStates(t, states[:2]...) // [◄(1) 2] [◄(2) 3] ◄(3)
remainingState := toCertifiedState(t, states[2]) // [◄(3) 4] ◄(4)
forks, _ := newForks(t)
// should be unable to retrieve a state before it is added
_, ok := forks.GetState(states[0].Identifier)
assert.False(t, ok)
// add first states - should finalize [◄(1) 2]
err := forks.AddCertifiedState(statesAddedFirst[0])
require.NoError(t, err)
err = forks.AddCertifiedState(statesAddedFirst[1])
require.NoError(t, err)
// should be able to retrieve all stored states
for _, state := range statesAddedFirst {
b, ok := forks.GetState(state.State.Identifier)
assert.True(t, ok)
assert.Equal(t, state.State, b)
}
// add remaining state [◄(4) 5] - should finalize [◄(2) 3] and prune [◄(1) 2]
require.Nil(t, forks.AddCertifiedState(remainingState))
// should be able to retrieve just added state
b, ok := forks.GetState(remainingState.State.Identifier)
assert.True(t, ok)
assert.Equal(t, remainingState.State, b)
// should be unable to retrieve pruned state
_, ok = forks.GetState(statesAddedFirst[0].State.Identifier)
assert.False(t, ok)
})
}
// TestGetStatesForRank tests retrieving states for a rank (also including double proposals).
// - Forks receives [◄(1) 2] [◄(2) 4] [◄(2) 4'],
// where [◄(2) 4'] is a double proposal, because it has the same rank as [◄(2) 4]
//
// Expected behaviour:
// - Forks should store all the states
// - Forks should emit a `OnDoubleProposeDetected` notification
// - we can retrieve all states, including the double proposals
func TestGetStatesForRank(t *testing.T) {
states, err := NewStateBuilder().
Add(1, 2). // [◄(1) 2]
Add(2, 4). // [◄(2) 4]
AddVersioned(2, 4, 0, 1). // [◄(2) 4']
States()
require.NoError(t, err)
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
forks, notifier := newForks(t)
notifier.On("OnDoubleProposeDetected", states[2], states[1]).Once()
err = addValidatedStateToForks(forks, states)
require.NoError(t, err)
// expect 1 state at rank 2
storedStates := forks.GetStatesForRank(2)
assert.Len(t, storedStates, 1)
assert.Equal(t, states[0], storedStates[0])
// expect 2 states at rank 4
storedStates = forks.GetStatesForRank(4)
assert.Len(t, storedStates, 2)
assert.ElementsMatch(t, states[1:], storedStates)
// expect 0 states at rank 3
storedStates = forks.GetStatesForRank(3)
assert.Len(t, storedStates, 0)
})
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
forks, notifier := newForks(t)
notifier.On("OnDoubleProposeDetected", states[2], states[1]).Once()
err := forks.AddCertifiedState(toCertifiedState(t, states[0]))
require.NoError(t, err)
err = forks.AddCertifiedState(toCertifiedState(t, states[1]))
require.NoError(t, err)
err = forks.AddCertifiedState(toCertifiedState(t, states[2]))
require.NoError(t, err)
// expect 1 state at rank 2
storedStates := forks.GetStatesForRank(2)
assert.Len(t, storedStates, 1)
assert.Equal(t, states[0], storedStates[0])
// expect 2 states at rank 4
storedStates = forks.GetStatesForRank(4)
assert.Len(t, storedStates, 2)
assert.ElementsMatch(t, states[1:], storedStates)
// expect 0 states at rank 3
storedStates = forks.GetStatesForRank(3)
assert.Len(t, storedStates, 0)
})
}
// TestNotifications tests that Forks emits the expected events:
// - Forks receives [◄(1) 2] [◄(2) 3] [◄(3) 4]
//
// Expected Behaviour:
// - Each of the ingested states should result in an `OnStateIncorporated` notification
// - Forks should finalize [◄(1) 2], resulting in a `MakeFinal` event and an `OnFinalizedState` event
func TestNotifications(t *testing.T) {
builder := NewStateBuilder().
Add(1, 2).
Add(2, 3).
Add(3, 4)
states, err := builder.States()
require.NoError(t, err)
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
notifier := &mocks.Consumer[*helper.TestState, *helper.TestVote]{}
// 4 states including the genesis are incorporated
notifier.On("OnStateIncorporated", mock.Anything).Return(nil).Times(4)
notifier.On("OnFinalizedState", states[0]).Once()
finalizationCallback := mocks.NewFinalizer(t)
finalizationCallback.On("MakeFinal", states[0].Identifier).Return(nil).Once()
forks, err := NewForks(builder.GenesisState(), finalizationCallback, notifier)
require.NoError(t, err)
require.NoError(t, addValidatedStateToForks(forks, states))
})
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
notifier := &mocks.Consumer[*helper.TestState, *helper.TestVote]{}
// 4 states including the genesis are incorporated
notifier.On("OnStateIncorporated", mock.Anything).Return(nil).Times(4)
notifier.On("OnFinalizedState", states[0]).Once()
finalizationCallback := mocks.NewFinalizer(t)
finalizationCallback.On("MakeFinal", states[0].Identifier).Return(nil).Once()
forks, err := NewForks(builder.GenesisState(), finalizationCallback, notifier)
require.NoError(t, err)
require.NoError(t, addCertifiedStatesToForks(forks, states))
})
}
// TestFinalizingMultipleStates tests that `OnFinalizedState` notifications are emitted in correct order
// when there are multiple states finalized by adding a _single_ state.
// - receiving [◄(1) 3] [◄(3) 5] [◄(5) 7] [◄(7) 11] [◄(11) 12] should not finalize any states,
// because there is no 2-chain with the first chain link being a _direct_ 1-chain
// - adding [◄(12) 22] should finalize up to state [◄(6) 11]
//
// This test verifies the following expected properties:
// 1. Safety under reentrancy:
// While Forks is single-threaded, there is still the possibility of reentrancy. Specifically, the
// consumers of our finalization events are served by the goroutine executing Forks. It is conceivable
// that a consumer might access Forks and query the latest finalization proof. This would be legal, if
// the component supplying the goroutine to Forks also consumes the notifications. Therefore, for API
// safety, we require forks to _first update_ its `FinalityProof()` before it emits _any_ events.
// 2. For each finalized state, `finalizationCallback` event is executed _before_ `OnFinalizedState` notifications.
// 3. States are finalized in order of increasing height (without skipping any states).
func TestFinalizingMultipleStates(t *testing.T) {
builder := NewStateBuilder().
Add(1, 3). // index 0: [◄(1) 2]
Add(3, 5). // index 1: [◄(2) 4]
Add(5, 7). // index 2: [◄(4) 6]
Add(7, 11). // index 3: [◄(6) 11] -- expected to be finalized
Add(11, 12). // index 4: [◄(11) 12]
Add(12, 22) // index 5: [◄(12) 22]
states, err := builder.States()
require.NoError(t, err)
// The Finality Proof should right away point to the _latest_ finalized state. Subsequently emitting
// Finalization events for lower states is fine, because notifications are guaranteed to be
// _eventually_ arriving. I.e. consumers expect notifications / events to be potentially lagging behind.
expectedFinalityProof := makeFinalityProof(t, states[3], states[4], states[5].ParentQuorumCertificate)
setupForksAndAssertions := func() (*Forks[*helper.TestState, *helper.TestVote], *mocks.Finalizer, *mocks.Consumer[*helper.TestState, *helper.TestVote]) {
// initialize Forks with custom event consumers so we can check order of emitted events
notifier := &mocks.Consumer[*helper.TestState, *helper.TestVote]{}
finalizationCallback := mocks.NewFinalizer(t)
notifier.On("OnStateIncorporated", mock.Anything).Return(nil)
forks, err := NewForks(builder.GenesisState(), finalizationCallback, notifier)
require.NoError(t, err)
// expecting finalization of [◄(1) 2] [◄(2) 4] [◄(4) 6] [◄(6) 11] in this order
statesAwaitingFinalization := toStateAwaitingFinalization(states[:4])
finalizationCallback.On("MakeFinal", mock.Anything).Run(func(args mock.Arguments) {
requireFinalityProof(t, forks, expectedFinalityProof) // Requirement 1: forks should _first update_ its `FinalityProof()` before it emits _any_ events
// Requirement 3: finalized in order of increasing height (without skipping any states).
expectedNextFinalizationEvents := statesAwaitingFinalization[0]
require.Equal(t, expectedNextFinalizationEvents.State.Identifier, args[0])
// Requirement 2: finalized state, `finalizationCallback` event is executed _before_ `OnFinalizedState` notifications.
// no duplication of events under normal operations expected
require.False(t, expectedNextFinalizationEvents.MakeFinalCalled)
require.False(t, expectedNextFinalizationEvents.OnFinalizedStateEmitted)
expectedNextFinalizationEvents.MakeFinalCalled = true
}).Return(nil).Times(4)
notifier.On("OnFinalizedState", mock.Anything).Run(func(args mock.Arguments) {
requireFinalityProof(t, forks, expectedFinalityProof) // Requirement 1: forks should _first update_ its `FinalityProof()` before it emits _any_ events
// Requirement 3: finalized in order of increasing height (without skipping any states).
expectedNextFinalizationEvents := statesAwaitingFinalization[0]
require.Equal(t, expectedNextFinalizationEvents.State, args[0])
// Requirement 2: finalized state, `finalizationCallback` event is executed _before_ `OnFinalizedState` notifications.
// no duplication of events under normal operations expected
require.True(t, expectedNextFinalizationEvents.MakeFinalCalled)
require.False(t, expectedNextFinalizationEvents.OnFinalizedStateEmitted)
expectedNextFinalizationEvents.OnFinalizedStateEmitted = true
// At this point, `MakeFinal` and `OnFinalizedState` have both been emitted for the state, so we are done with it
statesAwaitingFinalization = statesAwaitingFinalization[1:]
}).Times(4)
return forks, finalizationCallback, notifier
}
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
forks, finalizationCallback, notifier := setupForksAndAssertions()
err = addValidatedStateToForks(forks, states[:5]) // adding [◄(1) 2] [◄(2) 4] [◄(4) 6] [◄(6) 11] [◄(11) 12]
require.NoError(t, err)
requireOnlyGenesisStateFinalized(t, forks) // finalization should still be at the genesis state
require.NoError(t, forks.AddValidatedState(states[5])) // adding [◄(12) 22] should trigger finalization events
requireFinalityProof(t, forks, expectedFinalityProof)
finalizationCallback.AssertExpectations(t)
notifier.AssertExpectations(t)
})
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
forks, finalizationCallback, notifier := setupForksAndAssertions()
// adding [◄(1) 2] [◄(2) 4] [◄(4) 6] [◄(6) 11] ◄(11)
require.NoError(t, forks.AddCertifiedState(toCertifiedState(t, states[0])))
require.NoError(t, forks.AddCertifiedState(toCertifiedState(t, states[1])))
require.NoError(t, forks.AddCertifiedState(toCertifiedState(t, states[2])))
require.NoError(t, forks.AddCertifiedState(toCertifiedState(t, states[3])))
require.NoError(t, err)
requireOnlyGenesisStateFinalized(t, forks) // finalization should still be at the genesis state
// adding certified state [◄(11) 12] ◄(12) should trigger finalization events
require.NoError(t, forks.AddCertifiedState(toCertifiedState(t, states[4])))
requireFinalityProof(t, forks, expectedFinalityProof)
finalizationCallback.AssertExpectations(t)
notifier.AssertExpectations(t)
})
}
//* ************************************* internal functions ************************************* */
func newForks(t *testing.T) (*Forks[*helper.TestState, *helper.TestVote], *mocks.Consumer[*helper.TestState, *helper.TestVote]) {
notifier := mocks.NewConsumer[*helper.TestState, *helper.TestVote](t)
notifier.On("OnStateIncorporated", mock.Anything).Return(nil).Maybe()
notifier.On("OnFinalizedState", mock.Anything).Maybe()
finalizationCallback := mocks.NewFinalizer(t)
finalizationCallback.On("MakeFinal", mock.Anything).Return(nil).Maybe()
genesisBQ := makeGenesis()
forks, err := NewForks(genesisBQ, finalizationCallback, notifier)
require.NoError(t, err)
return forks, notifier
}
// addValidatedStateToForks adds all the given states to Forks, in order.
// If any errors occur, returns the first one.
func addValidatedStateToForks(forks *Forks[*helper.TestState, *helper.TestVote], states []*models.State[*helper.TestState]) error {
for _, state := range states {
err := forks.AddValidatedState(state)
if err != nil {
return fmt.Errorf("test failed to add state for rank %d: %w", state.Rank, err)
}
}
return nil
}
// addCertifiedStatesToForks iterates over all states, caches them locally in a map,
// constructs certified states whenever possible and adds the certified states to forks,
// Note: if states is a single fork, the _last state_ in the slice will not be added,
//
// because there is no qc for it
//
// If any errors occur, returns the first one.
func addCertifiedStatesToForks(forks *Forks[*helper.TestState, *helper.TestVote], states []*models.State[*helper.TestState]) error {
uncertifiedStates := make(map[models.Identity]*models.State[*helper.TestState])
for _, b := range states {
uncertifiedStates[b.Identifier] = b
parentID := b.ParentQuorumCertificate.Identity()
parent, found := uncertifiedStates[parentID]
if !found {
continue
}
delete(uncertifiedStates, parentID)
certParent, err := models.NewCertifiedState(parent, b.ParentQuorumCertificate)
if err != nil {
return fmt.Errorf("test failed to creat certified state for rank %d: %w", certParent.State.Rank, err)
}
err = forks.AddCertifiedState(certParent)
if err != nil {
return fmt.Errorf("test failed to add certified state for rank %d: %w", certParent.State.Rank, err)
}
}
return nil
}
// requireLatestFinalizedState asserts that the latest finalized state has the given rank and qc rank.
func requireLatestFinalizedState(t *testing.T, forks *Forks[*helper.TestState, *helper.TestVote], expectedFinalized *models.State[*helper.TestState]) {
require.Equal(t, expectedFinalized, forks.FinalizedState(), "finalized state is not as expected")
require.Equal(t, forks.FinalizedRank(), expectedFinalized.Rank, "FinalizedRank returned wrong value")
}
// requireOnlyGenesisStateFinalized asserts that no states have been finalized beyond the genesis state.
// Caution: does not inspect output of `forks.FinalityProof()`
func requireOnlyGenesisStateFinalized(t *testing.T, forks *Forks[*helper.TestState, *helper.TestVote]) {
genesis := makeGenesis()
require.Equal(t, forks.FinalizedState(), genesis.State, "finalized state is not the genesis state")
require.Equal(t, forks.FinalizedState().Rank, genesis.State.Rank)
require.Equal(t, forks.FinalizedState().Rank, genesis.CertifyingQuorumCertificate.GetRank())
require.Equal(t, forks.FinalizedRank(), genesis.State.Rank, "finalized state has wrong qc")
finalityProof, isKnown := forks.FinalityProof()
require.Nil(t, finalityProof, "expecting finality proof to be nil for genesis state at initialization")
require.False(t, isKnown, "no finality proof should be known for genesis state at initialization")
}
// requireNoStatesFinalized asserts that no states have been finalized (genesis is latest finalized state).
func requireFinalityProof(t *testing.T, forks *Forks[*helper.TestState, *helper.TestVote], expectedFinalityProof *consensus.FinalityProof[*helper.TestState]) {
finalityProof, isKnown := forks.FinalityProof()
require.True(t, isKnown)
require.Equal(t, expectedFinalityProof, finalityProof)
require.Equal(t, forks.FinalizedState(), expectedFinalityProof.State)
require.Equal(t, forks.FinalizedRank(), expectedFinalityProof.State.Rank)
}
// toCertifiedState generates a QC for the given state and returns their combination as a certified state
func toCertifiedState(t *testing.T, state *models.State[*helper.TestState]) *models.CertifiedState[*helper.TestState] {
qc := &helper.TestQuorumCertificate{
Rank: state.Rank,
Selector: state.Identifier,
}
cb, err := models.NewCertifiedState(state, qc)
require.NoError(t, err)
return cb
}
// toCertifiedStates generates a QC for the given state and returns their combination as a certified states
func toCertifiedStates(t *testing.T, states ...*models.State[*helper.TestState]) []*models.CertifiedState[*helper.TestState] {
certStates := make([]*models.CertifiedState[*helper.TestState], 0, len(states))
for _, b := range states {
certStates = append(certStates, toCertifiedState(t, b))
}
return certStates
}
func makeFinalityProof(t *testing.T, state *models.State[*helper.TestState], directChild *models.State[*helper.TestState], qcCertifyingChild models.QuorumCertificate) *consensus.FinalityProof[*helper.TestState] {
c, err := models.NewCertifiedState(directChild, qcCertifyingChild) // certified child of FinalizedState
require.NoError(t, err)
return &consensus.FinalityProof[*helper.TestState]{State: state, CertifiedChild: c}
}
// stateAwaitingFinalization is intended for tracking finalization events and their order for a specific state
type stateAwaitingFinalization struct {
State *models.State[*helper.TestState]
MakeFinalCalled bool // indicates whether `Finalizer.MakeFinal` was called
OnFinalizedStateEmitted bool // indicates whether `OnFinalizedStateCalled` notification was emitted
}
// toStateAwaitingFinalization creates a `stateAwaitingFinalization` tracker for each input state
func toStateAwaitingFinalization(states []*models.State[*helper.TestState]) []*stateAwaitingFinalization {
trackers := make([]*stateAwaitingFinalization, 0, len(states))
for _, b := range states {
tracker := &stateAwaitingFinalization{b, false, false}
trackers = append(trackers, tracker)
}
return trackers
}

View File

@ -0,0 +1,165 @@
package forks
import (
"fmt"
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// StateRank specifies the data to create a state
type StateRank struct {
// Rank is the rank of the state to be created
Rank uint64
// StateVersion is the version of the state for that rank.
// Useful for creating conflicting states at the same rank.
StateVersion int
// QCRank is the rank of the QC embedded in this state (also: the rank of the state's parent)
QCRank uint64
// QCVersion is the version of the QC for that rank.
QCVersion int
}
// QCIndex returns a unique identifier for the state's QC.
func (bv *StateRank) QCIndex() string {
return fmt.Sprintf("%v-%v", bv.QCRank, bv.QCVersion)
}
// StateIndex returns a unique identifier for the state.
func (bv *StateRank) StateIndex() string {
return fmt.Sprintf("%v-%v", bv.Rank, bv.StateVersion)
}
// StateBuilder is a test utility for creating state structure fixtures.
type StateBuilder struct {
stateRanks []*StateRank
}
func NewStateBuilder() *StateBuilder {
return &StateBuilder{
stateRanks: make([]*StateRank, 0),
}
}
// Add adds a state with the given qcRank and stateRank. Returns self-reference for chaining.
func (bb *StateBuilder) Add(qcRank uint64, stateRank uint64) *StateBuilder {
bb.stateRanks = append(bb.stateRanks, &StateRank{
Rank: stateRank,
QCRank: qcRank,
})
return bb
}
// GenesisState returns the genesis state, which is always finalized.
func (bb *StateBuilder) GenesisState() *models.CertifiedState[*helper.TestState] {
return makeGenesis()
}
// AddVersioned adds a state with the given qcRank and stateRank.
// In addition, the version identifier of the QC embedded within the state
// is specified by `qcVersion`. The version identifier for the state itself
// (primarily for emulating different state ID) is specified by `stateVersion`.
// [(◄3) 4] denotes a state of rank 4, with a qc for rank 3
// [(◄3) 4'] denotes a state of rank 4 that is different than [(◄3) 4], with a qc for rank 3
// [(◄3) 4'] can be created by AddVersioned(3, 4, 0, 1)
// [(◄3') 4] can be created by AddVersioned(3, 4, 1, 0)
// Returns self-reference for chaining.
func (bb *StateBuilder) AddVersioned(qcRank uint64, stateRank uint64, qcVersion int, stateVersion int) *StateBuilder {
bb.stateRanks = append(bb.stateRanks, &StateRank{
Rank: stateRank,
QCRank: qcRank,
StateVersion: stateVersion,
QCVersion: qcVersion,
})
return bb
}
// Proposals returns a list of all proposals added to the StateBuilder.
// Returns an error if the states do not form a connected tree rooted at genesis.
func (bb *StateBuilder) Proposals() ([]*models.Proposal[*helper.TestState], error) {
states := make([]*models.Proposal[*helper.TestState], 0, len(bb.stateRanks))
genesisState := makeGenesis()
genesisBV := &StateRank{
Rank: genesisState.State.Rank,
QCRank: genesisState.CertifyingQuorumCertificate.GetRank(),
}
qcs := make(map[string]models.QuorumCertificate)
qcs[genesisBV.QCIndex()] = genesisState.CertifyingQuorumCertificate
for _, bv := range bb.stateRanks {
qc, ok := qcs[bv.QCIndex()]
if !ok {
return nil, fmt.Errorf("test fail: no qc found for qc index: %v", bv.QCIndex())
}
var previousRankTimeoutCert models.TimeoutCertificate
if qc.GetRank()+1 != bv.Rank {
previousRankTimeoutCert = helper.MakeTC(helper.WithTCRank(bv.Rank - 1))
}
proposal := &models.Proposal[*helper.TestState]{
State: &models.State[*helper.TestState]{
Rank: bv.Rank,
ParentQuorumCertificate: qc,
},
PreviousRankTimeoutCertificate: previousRankTimeoutCert,
}
proposal.State.Identifier = makeIdentifier(proposal.State, bv.StateVersion)
states = append(states, proposal)
// generate QC for the new proposal
qcs[bv.StateIndex()] = &helper.TestQuorumCertificate{
Rank: proposal.State.Rank,
Selector: proposal.State.Identifier,
AggregatedSignature: nil,
}
}
return states, nil
}
// States returns a list of all states added to the StateBuilder.
// Returns an error if the states do not form a connected tree rooted at genesis.
func (bb *StateBuilder) States() ([]*models.State[*helper.TestState], error) {
proposals, err := bb.Proposals()
if err != nil {
return nil, fmt.Errorf("StateBuilder failed to generate proposals: %w", err)
}
return toStates(proposals), nil
}
// makeIdentifier creates a state identifier based on the state's rank, QC, and state version.
// This is used to identify states uniquely, in this specific test setup.
// ATTENTION: this should not be confused with the state ID used in production code which is a collision-resistant hash
// of the full state content.
func makeIdentifier(state *models.State[*helper.TestState], stateVersion int) models.Identity {
return fmt.Sprintf("%d-%s-%d", state.Rank, state.Identifier, stateVersion)
}
// constructs the genesis state (identical for all calls)
func makeGenesis() *models.CertifiedState[*helper.TestState] {
genesis := &models.State[*helper.TestState]{
Rank: 1,
}
genesis.Identifier = makeIdentifier(genesis, 0)
genesisQC := &helper.TestQuorumCertificate{
Rank: 1,
Selector: genesis.Identifier,
}
certifiedGenesisState, err := models.NewCertifiedState(genesis, genesisQC)
if err != nil {
panic(fmt.Sprintf("combining genesis state and genensis QC to certified state failed: %s", err.Error()))
}
return certifiedGenesisState
}
// toStates converts the given proposals to slice of states
func toStates(proposals []*models.Proposal[*helper.TestState]) []*models.State[*helper.TestState] {
states := make([]*models.State[*helper.TestState], 0, len(proposals))
for _, b := range proposals {
states = append(states, b.State)
}
return states
}

View File

@ -0,0 +1,77 @@
package forks
import (
"source.quilibrium.com/quilibrium/monorepo/consensus/forest"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// StateContainer wraps a state proposal to implement forest.Vertex
// so the proposal can be stored in forest.LevelledForest
type StateContainer[StateT models.Unique] models.State[StateT]
var _ forest.Vertex = (*StateContainer[*nilUnique])(nil)
func ToStateContainer2[StateT models.Unique](
state *models.State[StateT],
) *StateContainer[StateT] {
return (*StateContainer[StateT])(state)
}
func (b *StateContainer[StateT]) GetState() *models.State[StateT] {
return (*models.State[StateT])(b)
}
// Functions implementing forest.Vertex
func (b *StateContainer[StateT]) VertexID() models.Identity {
return b.Identifier
}
func (b *StateContainer[StateT]) Level() uint64 {
return b.Rank
}
func (b *StateContainer[StateT]) Parent() (models.Identity, uint64) {
// Caution: not all states have a QC for the parent, such as the spork root
// states. Per API contract, we are obliged to return a value to prevent
// panics during logging. (see vertex `forest.VertexToString` method).
if b.ParentQuorumCertificate == nil {
return "", 0
}
return b.ParentQuorumCertificate.Identity(),
b.ParentQuorumCertificate.GetRank()
}
// Type used to satisfy generic arguments in compiler time type assertion check
type nilUnique struct{}
// GetSignature implements models.Unique.
func (n *nilUnique) GetSignature() []byte {
panic("unimplemented")
}
// GetTimestamp implements models.Unique.
func (n *nilUnique) GetTimestamp() uint64 {
panic("unimplemented")
}
// Source implements models.Unique.
func (n *nilUnique) Source() models.Identity {
panic("unimplemented")
}
// Clone implements models.Unique.
func (n *nilUnique) Clone() models.Unique {
panic("unimplemented")
}
// GetRank implements models.Unique.
func (n *nilUnique) GetRank() uint64 {
panic("unimplemented")
}
// Identity implements models.Unique.
func (n *nilUnique) Identity() models.Identity {
panic("unimplemented")
}
var _ models.Unique = (*nilUnique)(nil)

View File

@ -1,16 +1,8 @@
module source.quilibrium.com/quilibrium/monorepo/consensus module source.quilibrium.com/quilibrium/monorepo/consensus
go 1.23.0 go 1.24.0
toolchain go1.23.4 toolchain go1.24.9
replace source.quilibrium.com/quilibrium/monorepo/protobufs => ../protobufs
replace source.quilibrium.com/quilibrium/monorepo/types => ../types
replace source.quilibrium.com/quilibrium/monorepo/config => ../config
replace source.quilibrium.com/quilibrium/monorepo/utils => ../utils
replace github.com/multiformats/go-multiaddr => ../go-multiaddr replace github.com/multiformats/go-multiaddr => ../go-multiaddr
@ -20,13 +12,31 @@ replace github.com/libp2p/go-libp2p => ../go-libp2p
replace github.com/libp2p/go-libp2p-kad-dht => ../go-libp2p-kad-dht replace github.com/libp2p/go-libp2p-kad-dht => ../go-libp2p-kad-dht
replace source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub => ../go-libp2p-blossomsub replace source.quilibrium.com/quilibrium/monorepo/lifecycle => ../lifecycle
require go.uber.org/zap v1.27.0
require ( require (
github.com/stretchr/testify v1.10.0 // indirect github.com/gammazero/workerpool v1.1.3
go.uber.org/multierr v1.11.0 // indirect github.com/rs/zerolog v1.34.0
) )
require github.com/pkg/errors v0.9.1 require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/gammazero/deque v0.2.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/stretchr/objx v0.5.2 // indirect
go.uber.org/goleak v1.3.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
require (
github.com/pkg/errors v0.9.1
github.com/stretchr/testify v1.11.1
go.uber.org/atomic v1.11.0
golang.org/x/sync v0.17.0
golang.org/x/sys v0.33.0 // indirect
source.quilibrium.com/quilibrium/monorepo/lifecycle v0.0.0-00010101000000-000000000000
)

View File

@ -1,16 +1,51 @@
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/gammazero/deque v0.2.0 h1:SkieyNB4bg2/uZZLxvya0Pq6diUlwx7m2TeT7GAIWaA=
github.com/gammazero/deque v0.2.0/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU=
github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q=
github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -0,0 +1,122 @@
package helper
import (
"bytes"
crand "crypto/rand"
"fmt"
"math/rand"
"time"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
type TestAggregatedSignature struct {
Signature []byte
PublicKey []byte
Bitmask []byte
}
func (t *TestAggregatedSignature) GetSignature() []byte {
return t.Signature
}
func (t *TestAggregatedSignature) GetPubKey() []byte {
return t.PublicKey
}
func (t *TestAggregatedSignature) GetBitmask() []byte {
return t.Bitmask
}
type TestQuorumCertificate struct {
Filter []byte
Rank uint64
FrameNumber uint64
Selector models.Identity
Timestamp uint64
AggregatedSignature models.AggregatedSignature
}
func (t *TestQuorumCertificate) GetFilter() []byte {
return t.Filter
}
func (t *TestQuorumCertificate) GetRank() uint64 {
return t.Rank
}
func (t *TestQuorumCertificate) GetFrameNumber() uint64 {
return t.FrameNumber
}
func (t *TestQuorumCertificate) Identity() models.Identity {
return t.Selector
}
func (t *TestQuorumCertificate) GetTimestamp() uint64 {
return t.Timestamp
}
func (t *TestQuorumCertificate) GetAggregatedSignature() models.AggregatedSignature {
return t.AggregatedSignature
}
func (t *TestQuorumCertificate) Equals(other models.QuorumCertificate) bool {
return bytes.Equal(t.Filter, other.GetFilter()) &&
t.Rank == other.GetRank() &&
t.FrameNumber == other.GetFrameNumber() &&
t.Selector == other.Identity() &&
t.Timestamp == other.GetTimestamp() &&
bytes.Equal(
t.AggregatedSignature.GetBitmask(),
other.GetAggregatedSignature().GetBitmask(),
) &&
bytes.Equal(
t.AggregatedSignature.GetPubKey(),
other.GetAggregatedSignature().GetPubKey(),
) &&
bytes.Equal(
t.AggregatedSignature.GetSignature(),
other.GetAggregatedSignature().GetSignature(),
)
}
func MakeQC(options ...func(*TestQuorumCertificate)) models.QuorumCertificate {
s := make([]byte, 32)
crand.Read(s)
qc := &TestQuorumCertificate{
Rank: rand.Uint64(),
FrameNumber: rand.Uint64() + 1,
Selector: string(s),
Timestamp: uint64(time.Now().UnixMilli()),
AggregatedSignature: &TestAggregatedSignature{
PublicKey: make([]byte, 585),
Signature: make([]byte, 74),
Bitmask: []byte{0x01},
},
}
for _, option := range options {
option(qc)
}
return qc
}
func WithQCState[StateT models.Unique](state *models.State[StateT]) func(*TestQuorumCertificate) {
return func(qc *TestQuorumCertificate) {
qc.Rank = state.Rank
qc.Selector = state.Identifier
}
}
func WithQCSigners(signerIndices []byte) func(*TestQuorumCertificate) {
return func(qc *TestQuorumCertificate) {
qc.AggregatedSignature.(*TestAggregatedSignature).Bitmask = signerIndices
}
}
func WithQCRank(rank uint64) func(*TestQuorumCertificate) {
return func(qc *TestQuorumCertificate) {
qc.Rank = rank
qc.Selector = fmt.Sprintf("%d", rank)
}
}

467
consensus/helper/state.go Normal file
View File

@ -0,0 +1,467 @@
package helper
import (
crand "crypto/rand"
"fmt"
"math/rand"
"slices"
"strings"
"time"
"source.quilibrium.com/quilibrium/monorepo/consensus"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
type TestWeightedIdentity struct {
ID string
}
// Identity implements models.WeightedIdentity.
func (t *TestWeightedIdentity) Identity() models.Identity {
return t.ID
}
// PublicKey implements models.WeightedIdentity.
func (t *TestWeightedIdentity) PublicKey() []byte {
return make([]byte, 585)
}
// Weight implements models.WeightedIdentity.
func (t *TestWeightedIdentity) Weight() uint64 {
return 1000
}
var _ models.WeightedIdentity = (*TestWeightedIdentity)(nil)
type TestState struct {
Rank uint64
Signature []byte
Timestamp uint64
ID models.Identity
Prover models.Identity
}
// Clone implements models.Unique.
func (t *TestState) Clone() models.Unique {
return &TestState{
Rank: t.Rank,
Signature: slices.Clone(t.Signature),
Timestamp: t.Timestamp,
ID: t.ID,
Prover: t.Prover,
}
}
// GetRank implements models.Unique.
func (t *TestState) GetRank() uint64 {
return t.Rank
}
// GetSignature implements models.Unique.
func (t *TestState) GetSignature() []byte {
return t.Signature
}
// GetTimestamp implements models.Unique.
func (t *TestState) GetTimestamp() uint64 {
return t.Timestamp
}
// Identity implements models.Unique.
func (t *TestState) Identity() models.Identity {
return t.ID
}
// Source implements models.Unique.
func (t *TestState) Source() models.Identity {
return t.Prover
}
type TestVote struct {
Rank uint64
Signature []byte
Timestamp uint64
ID models.Identity
StateID models.Identity
}
// Clone implements models.Unique.
func (t *TestVote) Clone() models.Unique {
return &TestVote{
Rank: t.Rank,
Signature: slices.Clone(t.Signature),
Timestamp: t.Timestamp,
ID: t.ID,
StateID: t.StateID,
}
}
// GetRank implements models.Unique.
func (t *TestVote) GetRank() uint64 {
return t.Rank
}
// GetSignature implements models.Unique.
func (t *TestVote) GetSignature() []byte {
return t.Signature
}
// GetTimestamp implements models.Unique.
func (t *TestVote) GetTimestamp() uint64 {
return t.Timestamp
}
// Identity implements models.Unique.
func (t *TestVote) Identity() models.Identity {
return t.ID
}
// Source implements models.Unique.
func (t *TestVote) Source() models.Identity {
return t.StateID
}
type TestPeer struct {
PeerID string
}
// Clone implements models.Unique.
func (t *TestPeer) Clone() models.Unique {
return &TestPeer{
PeerID: t.PeerID,
}
}
// GetRank implements models.Unique.
func (t *TestPeer) GetRank() uint64 {
return 0
}
// GetSignature implements models.Unique.
func (t *TestPeer) GetSignature() []byte {
return []byte{}
}
// GetTimestamp implements models.Unique.
func (t *TestPeer) GetTimestamp() uint64 {
return 0
}
// Identity implements models.Unique.
func (t *TestPeer) Identity() models.Identity {
return t.PeerID
}
// Source implements models.Unique.
func (t *TestPeer) Source() models.Identity {
return t.PeerID
}
type TestCollected struct {
Rank uint64
TXs [][]byte
}
// Clone implements models.Unique.
func (t *TestCollected) Clone() models.Unique {
return &TestCollected{
Rank: t.Rank,
TXs: slices.Clone(t.TXs),
}
}
// GetRank implements models.Unique.
func (t *TestCollected) GetRank() uint64 {
return t.Rank
}
// GetSignature implements models.Unique.
func (t *TestCollected) GetSignature() []byte {
return []byte{}
}
// GetTimestamp implements models.Unique.
func (t *TestCollected) GetTimestamp() uint64 {
return 0
}
// Identity implements models.Unique.
func (t *TestCollected) Identity() models.Identity {
return fmt.Sprintf("%d", t.Rank)
}
// Source implements models.Unique.
func (t *TestCollected) Source() models.Identity {
return ""
}
var _ models.Unique = (*TestState)(nil)
var _ models.Unique = (*TestVote)(nil)
var _ models.Unique = (*TestPeer)(nil)
var _ models.Unique = (*TestCollected)(nil)
func MakeIdentity() models.Identity {
s := make([]byte, 32)
crand.Read(s)
return models.Identity(s)
}
func MakeState[StateT models.Unique](options ...func(*models.State[StateT])) *models.State[StateT] {
rank := rand.Uint64()
state := models.State[StateT]{
Rank: rank,
Identifier: MakeIdentity(),
ProposerID: MakeIdentity(),
Timestamp: uint64(time.Now().UnixMilli()),
ParentQuorumCertificate: MakeQC(WithQCRank(rank - 1)),
}
for _, option := range options {
option(&state)
}
return &state
}
func WithStateRank[StateT models.Unique](rank uint64) func(*models.State[StateT]) {
return func(state *models.State[StateT]) {
state.Rank = rank
}
}
func WithStateProposer[StateT models.Unique](proposerID models.Identity) func(*models.State[StateT]) {
return func(state *models.State[StateT]) {
state.ProposerID = proposerID
}
}
func WithParentState[StateT models.Unique](parent *models.State[StateT]) func(*models.State[StateT]) {
return func(state *models.State[StateT]) {
state.ParentQuorumCertificate.(*TestQuorumCertificate).Selector = parent.Identifier
state.ParentQuorumCertificate.(*TestQuorumCertificate).Rank = parent.Rank
}
}
func WithParentSigners[StateT models.Unique](signerIndices []byte) func(*models.State[StateT]) {
return func(state *models.State[StateT]) {
state.ParentQuorumCertificate.(*TestQuorumCertificate).AggregatedSignature.(*TestAggregatedSignature).Bitmask = signerIndices
}
}
func WithStateQC[StateT models.Unique](qc models.QuorumCertificate) func(*models.State[StateT]) {
return func(state *models.State[StateT]) {
state.ParentQuorumCertificate = qc
}
}
func MakeVote[VoteT models.Unique]() *VoteT {
return new(VoteT)
}
func MakeSignedProposal[StateT models.Unique, VoteT models.Unique](options ...func(*models.SignedProposal[StateT, VoteT])) *models.SignedProposal[StateT, VoteT] {
proposal := &models.SignedProposal[StateT, VoteT]{
Proposal: *MakeProposal[StateT](),
Vote: MakeVote[VoteT](),
}
for _, option := range options {
option(proposal)
}
return proposal
}
func MakeProposal[StateT models.Unique](options ...func(*models.Proposal[StateT])) *models.Proposal[StateT] {
proposal := &models.Proposal[StateT]{
State: MakeState[StateT](),
PreviousRankTimeoutCertificate: nil,
}
for _, option := range options {
option(proposal)
}
return proposal
}
func WithProposal[StateT models.Unique, VoteT models.Unique](proposal *models.Proposal[StateT]) func(*models.SignedProposal[StateT, VoteT]) {
return func(signedProposal *models.SignedProposal[StateT, VoteT]) {
signedProposal.Proposal = *proposal
}
}
func WithState[StateT models.Unique](state *models.State[StateT]) func(*models.Proposal[StateT]) {
return func(proposal *models.Proposal[StateT]) {
proposal.State = state
}
}
func WithVote[StateT models.Unique, VoteT models.Unique](vote *VoteT) func(*models.SignedProposal[StateT, VoteT]) {
return func(proposal *models.SignedProposal[StateT, VoteT]) {
proposal.Vote = vote
}
}
func WithPreviousRankTimeoutCertificate[StateT models.Unique](previousRankTimeoutCert models.TimeoutCertificate) func(*models.Proposal[StateT]) {
return func(proposal *models.Proposal[StateT]) {
proposal.PreviousRankTimeoutCertificate = previousRankTimeoutCert
}
}
func WithWeightedIdentityList(count int) []models.WeightedIdentity {
wi := []models.WeightedIdentity{}
for _ = range count {
wi = append(wi, &TestWeightedIdentity{
ID: MakeIdentity(),
})
}
return wi
}
func VoteForStateFixture(state *models.State[*TestState], ops ...func(vote **TestVote)) *TestVote {
v := &TestVote{
Rank: state.Rank,
ID: MakeIdentity(),
StateID: state.Identifier,
Signature: make([]byte, 74),
}
for _, op := range ops {
op(&v)
}
return v
}
func VoteFixture(op func(vote **TestVote)) *TestVote {
v := &TestVote{
Rank: rand.Uint64(),
ID: MakeIdentity(),
StateID: MakeIdentity(),
Signature: make([]byte, 74),
}
op(&v)
return v
}
type FmtLog struct {
params []consensus.LogParam
}
// Error implements consensus.TraceLogger.
func (n *FmtLog) Error(message string, err error, params ...consensus.LogParam) {
b := strings.Builder{}
b.WriteString(fmt.Sprintf("ERROR: %s: %v\n", message, err))
for _, param := range n.params {
b.WriteString(fmt.Sprintf(
"\t%s: %s\n",
param.GetKey(),
stringFromValue(param),
))
}
for _, param := range params {
b.WriteString(fmt.Sprintf(
"\t%s: %s\n",
param.GetKey(),
stringFromValue(param),
))
}
fmt.Println(b.String())
}
// Trace implements consensus.TraceLogger.
func (n *FmtLog) Trace(message string, params ...consensus.LogParam) {
b := strings.Builder{}
b.WriteString(fmt.Sprintf("TRACE: %s\n", message))
b.WriteString(fmt.Sprintf("\t[%s]\n", time.Now().String()))
for _, param := range n.params {
b.WriteString(fmt.Sprintf(
"\t%s: %s\n",
param.GetKey(),
stringFromValue(param),
))
}
for _, param := range params {
b.WriteString(fmt.Sprintf(
"\t%s: %s\n",
param.GetKey(),
stringFromValue(param),
))
}
fmt.Println(b.String())
}
func (n *FmtLog) With(params ...consensus.LogParam) consensus.TraceLogger {
return &FmtLog{
params: slices.Concat(n.params, params),
}
}
func stringFromValue(param consensus.LogParam) string {
switch param.GetKind() {
case "string":
return param.GetValue().(string)
case "time":
return param.GetValue().(time.Time).String()
default:
return fmt.Sprintf("%v", param.GetValue())
}
}
func Logger() *FmtLog {
return &FmtLog{}
}
type BufferLog struct {
params []consensus.LogParam
b *strings.Builder
}
// Error implements consensus.TraceLogger.
func (n *BufferLog) Error(message string, err error, params ...consensus.LogParam) {
n.b.WriteString(fmt.Sprintf("ERROR: %s: %v\n", message, err))
for _, param := range n.params {
n.b.WriteString(fmt.Sprintf(
"\t%s: %s\n",
param.GetKey(),
stringFromValue(param),
))
}
for _, param := range params {
n.b.WriteString(fmt.Sprintf(
"\t%s: %s\n",
param.GetKey(),
stringFromValue(param),
))
}
}
// Trace implements consensus.TraceLogger.
func (n *BufferLog) Trace(message string, params ...consensus.LogParam) {
n.b.WriteString(fmt.Sprintf("TRACE: %s\n", message))
n.b.WriteString(fmt.Sprintf("\t[%s]\n", time.Now().String()))
for _, param := range n.params {
n.b.WriteString(fmt.Sprintf(
"\t%s: %s\n",
param.GetKey(),
stringFromValue(param),
))
}
for _, param := range params {
n.b.WriteString(fmt.Sprintf(
"\t%s: %s\n",
param.GetKey(),
stringFromValue(param),
))
}
}
func (n *BufferLog) Flush() {
fmt.Println(n.b.String())
}
func (n *BufferLog) With(params ...consensus.LogParam) consensus.TraceLogger {
return &BufferLog{
params: slices.Concat(n.params, params),
b: n.b,
}
}
func BufferLogger() *BufferLog {
return &BufferLog{
b: &strings.Builder{},
}
}

View File

@ -0,0 +1,171 @@
package helper
import (
"bytes"
crand "crypto/rand"
"math/rand"
"slices"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
type TestTimeoutCertificate struct {
Filter []byte
Rank uint64
LatestRanks []uint64
LatestQuorumCert models.QuorumCertificate
AggregatedSignature models.AggregatedSignature
}
func (t *TestTimeoutCertificate) GetFilter() []byte {
return t.Filter
}
func (t *TestTimeoutCertificate) GetRank() uint64 {
return t.Rank
}
func (t *TestTimeoutCertificate) GetLatestRanks() []uint64 {
return t.LatestRanks
}
func (t *TestTimeoutCertificate) GetLatestQuorumCert() models.QuorumCertificate {
return t.LatestQuorumCert
}
func (t *TestTimeoutCertificate) GetAggregatedSignature() models.AggregatedSignature {
return t.AggregatedSignature
}
func (t *TestTimeoutCertificate) Equals(other models.TimeoutCertificate) bool {
return bytes.Equal(t.Filter, other.GetFilter()) &&
t.Rank == other.GetRank() &&
slices.Equal(t.LatestRanks, other.GetLatestRanks()) &&
t.LatestQuorumCert.Equals(other.GetLatestQuorumCert()) &&
bytes.Equal(
t.AggregatedSignature.GetBitmask(),
other.GetAggregatedSignature().GetBitmask(),
) &&
bytes.Equal(
t.AggregatedSignature.GetPubKey(),
other.GetAggregatedSignature().GetPubKey(),
) &&
bytes.Equal(
t.AggregatedSignature.GetSignature(),
other.GetAggregatedSignature().GetSignature(),
)
}
func MakeTC(options ...func(*TestTimeoutCertificate)) models.TimeoutCertificate {
tcRank := rand.Uint64()
s := make([]byte, 32)
crand.Read(s)
qc := MakeQC(WithQCRank(tcRank - 1))
highQCRanks := make([]uint64, 3)
for i := range highQCRanks {
highQCRanks[i] = qc.GetRank()
}
tc := &TestTimeoutCertificate{
Rank: tcRank,
LatestQuorumCert: qc,
LatestRanks: highQCRanks,
AggregatedSignature: &TestAggregatedSignature{
Signature: make([]byte, 74),
PublicKey: make([]byte, 585),
Bitmask: []byte{0x01},
},
}
for _, option := range options {
option(tc)
}
return tc
}
func WithTCNewestQC(qc models.QuorumCertificate) func(*TestTimeoutCertificate) {
return func(tc *TestTimeoutCertificate) {
tc.LatestQuorumCert = qc
tc.LatestRanks = []uint64{qc.GetRank()}
}
}
func WithTCSigners(signerIndices []byte) func(*TestTimeoutCertificate) {
return func(tc *TestTimeoutCertificate) {
tc.AggregatedSignature.(*TestAggregatedSignature).Bitmask = signerIndices
}
}
func WithTCRank(rank uint64) func(*TestTimeoutCertificate) {
return func(tc *TestTimeoutCertificate) {
tc.Rank = rank
}
}
func WithTCHighQCRanks(highQCRanks []uint64) func(*TestTimeoutCertificate) {
return func(tc *TestTimeoutCertificate) {
tc.LatestRanks = highQCRanks
}
}
func TimeoutStateFixture[VoteT models.Unique](
opts ...func(TimeoutState *models.TimeoutState[VoteT]),
) *models.TimeoutState[VoteT] {
timeoutRank := uint64(rand.Uint32())
newestQC := MakeQC(WithQCRank(timeoutRank - 10))
timeout := &models.TimeoutState[VoteT]{
Rank: timeoutRank,
LatestQuorumCertificate: newestQC,
PriorRankTimeoutCertificate: MakeTC(
WithTCRank(timeoutRank-1),
WithTCNewestQC(MakeQC(WithQCRank(newestQC.GetRank()))),
),
}
for _, opt := range opts {
opt(timeout)
}
if timeout.Vote == nil {
panic("WithTimeoutVote must be called")
}
return timeout
}
func WithTimeoutVote[VoteT models.Unique](
vote VoteT,
) func(*models.TimeoutState[VoteT]) {
return func(state *models.TimeoutState[VoteT]) {
state.Vote = &vote
}
}
func WithTimeoutNewestQC[VoteT models.Unique](
newestQC models.QuorumCertificate,
) func(*models.TimeoutState[VoteT]) {
return func(timeout *models.TimeoutState[VoteT]) {
timeout.LatestQuorumCertificate = newestQC
}
}
func WithTimeoutPreviousRankTimeoutCertificate[VoteT models.Unique](
previousRankTimeoutCert models.TimeoutCertificate,
) func(*models.TimeoutState[VoteT]) {
return func(timeout *models.TimeoutState[VoteT]) {
timeout.PriorRankTimeoutCertificate = previousRankTimeoutCert
}
}
func WithTimeoutStateRank[VoteT models.Unique](
rank uint64,
) func(*models.TimeoutState[VoteT]) {
return func(timeout *models.TimeoutState[VoteT]) {
timeout.Rank = rank
if timeout.LatestQuorumCertificate != nil {
timeout.LatestQuorumCertificate.(*TestQuorumCertificate).Rank = rank
}
if timeout.PriorRankTimeoutCertificate != nil {
timeout.PriorRankTimeoutCertificate.(*TestTimeoutCertificate).Rank = rank - 1
}
}
}

View File

@ -0,0 +1,40 @@
package integration
import (
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
func FinalizedStates(in *Instance) []*models.State[*helper.TestState] {
finalized := make([]*models.State[*helper.TestState], 0)
lastFinalID := in.forks.FinalizedState().Identifier
in.updatingStates.RLock()
finalizedState, found := in.headers[lastFinalID]
defer in.updatingStates.RUnlock()
if !found {
return finalized
}
for {
finalized = append(finalized, finalizedState)
if finalizedState.ParentQuorumCertificate == nil {
break
}
finalizedState, found =
in.headers[finalizedState.ParentQuorumCertificate.Identity()]
if !found {
break
}
}
return finalized
}
func FinalizedRanks(in *Instance) []uint64 {
finalizedStates := FinalizedStates(in)
ranks := make([]uint64, 0, len(finalizedStates))
for _, b := range finalizedStates {
ranks = append(ranks, b.Rank)
}
return ranks
}

View File

@ -0,0 +1,19 @@
package integration
type Condition func(*Instance) bool
func RightAway(*Instance) bool {
return true
}
func RankFinalized(rank uint64) Condition {
return func(in *Instance) bool {
return in.forks.FinalizedRank() >= rank
}
}
func RankReached(rank uint64) Condition {
return func(in *Instance) bool {
return in.pacemaker.CurrentRank() >= rank
}
}

View File

@ -0,0 +1,114 @@
package integration
import (
"testing"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
func Connect(t *testing.T, instances []*Instance) {
// first, create a map of all instances and a queue for each
lookup := make(map[models.Identity]*Instance)
for _, in := range instances {
lookup[in.localID] = in
}
// then, for each instance, initialize a wired up communicator
for _, sender := range instances {
sender := sender // avoid capturing loop variable in closure
*sender.notifier = *NewMockedCommunicatorConsumer()
sender.notifier.CommunicatorConsumer.On("OnOwnProposal", mock.Anything, mock.Anything).Run(
func(args mock.Arguments) {
proposal, ok := args[0].(*models.SignedProposal[*helper.TestState, *helper.TestVote])
require.True(t, ok)
// sender should always have the parent
sender.updatingStates.RLock()
_, exists := sender.headers[proposal.State.ParentQuorumCertificate.Identity()]
sender.updatingStates.RUnlock()
if !exists {
t.Fatalf("parent for proposal not found (sender: %x, parent: %x)", sender.localID, proposal.State.ParentQuorumCertificate.Identity())
}
// store locally and loop back to engine for processing
sender.ProcessState(proposal)
// check if we should drop the outgoing proposal
if sender.dropPropOut(proposal) {
return
}
// iterate through potential receivers
for _, receiver := range instances {
// we should skip ourselves always
if receiver.localID == sender.localID {
continue
}
// check if we should drop the incoming proposal
if receiver.dropPropIn(proposal) {
continue
}
receiver.ProcessState(proposal)
}
},
)
sender.notifier.CommunicatorConsumer.On("OnOwnVote", mock.Anything, mock.Anything).Run(
func(args mock.Arguments) {
vote, ok := args[0].(**helper.TestVote)
require.True(t, ok)
recipientID, ok := args[1].(models.Identity)
require.True(t, ok)
// get the receiver
receiver, exists := lookup[recipientID]
if !exists {
t.Fatalf("recipient doesn't exist (sender: %x, receiver: %x)", sender.localID, recipientID)
}
// if we are next leader we should be receiving our own vote
if recipientID != sender.localID {
// check if we should drop the outgoing vote
if sender.dropVoteOut(*vote) {
return
}
// check if we should drop the incoming vote
if receiver.dropVoteIn(*vote) {
return
}
}
// submit the vote to the receiving event loop (non-dropping)
receiver.queue <- *vote
},
)
sender.notifier.CommunicatorConsumer.On("OnOwnTimeout", mock.Anything).Run(
func(args mock.Arguments) {
timeoutState, ok := args[0].(*models.TimeoutState[*helper.TestVote])
require.True(t, ok)
// iterate through potential receivers
for _, receiver := range instances {
// we should skip ourselves always
if receiver.localID == sender.localID {
continue
}
// check if we should drop the outgoing value
if sender.dropTimeoutStateOut(timeoutState) {
continue
}
// check if we should drop the incoming value
if receiver.dropTimeoutStateIn(timeoutState) {
continue
}
receiver.queue <- timeoutState
}
})
}
}

View File

@ -0,0 +1,27 @@
package integration
import (
"time"
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
func DefaultRoot() *models.State[*helper.TestState] {
ts := uint64(time.Now().UnixMilli())
id := helper.MakeIdentity()
s := &helper.TestState{
Rank: 0,
Signature: make([]byte, 0),
Timestamp: ts,
ID: id,
Prover: "",
}
header := &models.State[*helper.TestState]{
Rank: 0,
State: &s,
Identifier: id,
Timestamp: ts,
}
return header
}

View File

@ -0,0 +1,76 @@
package integration
import (
"math/rand"
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// VoteFilter is a filter function for dropping Votes.
// Return value `true` implies that the given Vote should be
// dropped, while `false` indicates that the Vote should be received.
type VoteFilter func(*helper.TestVote) bool
func DropNoVotes(*helper.TestVote) bool {
return false
}
func DropAllVotes(*helper.TestVote) bool {
return true
}
// DropVoteRandomly drops votes randomly with a probability of `dropProbability` ∈ [0,1]
func DropVoteRandomly(dropProbability float64) VoteFilter {
return func(*helper.TestVote) bool {
return rand.Float64() < dropProbability
}
}
func DropVotesBy(voterID models.Identity) VoteFilter {
return func(vote *helper.TestVote) bool {
return vote.ID == voterID
}
}
// ProposalFilter is a filter function for dropping Proposals.
// Return value `true` implies that the given SignedProposal should be
// dropped, while `false` indicates that the SignedProposal should be received.
type ProposalFilter func(*models.SignedProposal[*helper.TestState, *helper.TestVote]) bool
func DropNoProposals(*models.SignedProposal[*helper.TestState, *helper.TestVote]) bool {
return false
}
func DropAllProposals(*models.SignedProposal[*helper.TestState, *helper.TestVote]) bool {
return true
}
// DropProposalRandomly drops proposals randomly with a probability of `dropProbability` ∈ [0,1]
func DropProposalRandomly(dropProbability float64) ProposalFilter {
return func(*models.SignedProposal[*helper.TestState, *helper.TestVote]) bool {
return rand.Float64() < dropProbability
}
}
// DropProposalsBy drops all proposals originating from the specified `proposerID`
func DropProposalsBy(proposerID models.Identity) ProposalFilter {
return func(proposal *models.SignedProposal[*helper.TestState, *helper.TestVote]) bool {
return proposal.State.ProposerID == proposerID
}
}
// TimeoutStateFilter is a filter function for dropping TimeoutStates.
// Return value `true` implies that the given TimeoutState should be
// dropped, while `false` indicates that the TimeoutState should be received.
type TimeoutStateFilter func(*models.TimeoutState[*helper.TestVote]) bool
// DropAllTimeoutStates always returns `true`, i.e. drops all TimeoutStates
func DropAllTimeoutStates(*models.TimeoutState[*helper.TestVote]) bool {
return true
}
// DropNoTimeoutStates always returns `false`, i.e. it lets all TimeoutStates pass.
func DropNoTimeoutStates(*models.TimeoutState[*helper.TestVote]) bool {
return false
}

View File

@ -0,0 +1,734 @@
package integration
import (
"context"
"fmt"
"reflect"
"sync"
"testing"
"time"
"github.com/gammazero/workerpool"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
"source.quilibrium.com/quilibrium/monorepo/consensus"
"source.quilibrium.com/quilibrium/monorepo/consensus/counters"
"source.quilibrium.com/quilibrium/monorepo/consensus/eventhandler"
"source.quilibrium.com/quilibrium/monorepo/consensus/forks"
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
"source.quilibrium.com/quilibrium/monorepo/consensus/mocks"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
"source.quilibrium.com/quilibrium/monorepo/consensus/notifications"
"source.quilibrium.com/quilibrium/monorepo/consensus/notifications/pubsub"
"source.quilibrium.com/quilibrium/monorepo/consensus/pacemaker"
"source.quilibrium.com/quilibrium/monorepo/consensus/pacemaker/timeout"
"source.quilibrium.com/quilibrium/monorepo/consensus/safetyrules"
"source.quilibrium.com/quilibrium/monorepo/consensus/stateproducer"
"source.quilibrium.com/quilibrium/monorepo/consensus/timeoutaggregator"
"source.quilibrium.com/quilibrium/monorepo/consensus/timeoutcollector"
"source.quilibrium.com/quilibrium/monorepo/consensus/validator"
"source.quilibrium.com/quilibrium/monorepo/consensus/voteaggregator"
"source.quilibrium.com/quilibrium/monorepo/consensus/votecollector"
"source.quilibrium.com/quilibrium/monorepo/lifecycle"
"source.quilibrium.com/quilibrium/monorepo/lifecycle/unittest"
)
type Instance struct {
// instance parameters
logger consensus.TraceLogger
participants []models.WeightedIdentity
localID models.Identity
dropVoteIn VoteFilter
dropVoteOut VoteFilter
dropPropIn ProposalFilter
dropPropOut ProposalFilter
dropTimeoutStateIn TimeoutStateFilter
dropTimeoutStateOut TimeoutStateFilter
stop Condition
// instance data
queue chan interface{}
updatingStates sync.RWMutex
headers map[models.Identity]*models.State[*helper.TestState]
pendings map[models.Identity]*models.SignedProposal[*helper.TestState, *helper.TestVote] // indexed by parent ID
// mocked dependencies
committee *mocks.DynamicCommittee
builder *mocks.LeaderProvider[*helper.TestState, *helper.TestPeer, *helper.TestCollected]
finalizer *mocks.Finalizer
persist *mocks.ConsensusStore[*helper.TestVote]
signer *mocks.Signer[*helper.TestState, *helper.TestVote]
verifier *mocks.Verifier[*helper.TestVote]
notifier *MockedCommunicatorConsumer
voting *mocks.VotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer]
// real dependencies
pacemaker consensus.Pacemaker
producer *stateproducer.StateProducer[*helper.TestState, *helper.TestVote, *helper.TestPeer, *helper.TestCollected]
forks *forks.Forks[*helper.TestState, *helper.TestVote]
voteAggregator *voteaggregator.VoteAggregator[*helper.TestState, *helper.TestVote]
timeoutAggregator *timeoutaggregator.TimeoutAggregator[*helper.TestVote]
safetyRules *safetyrules.SafetyRules[*helper.TestState, *helper.TestVote]
validator *validator.Validator[*helper.TestState, *helper.TestVote]
// main logic
handler *eventhandler.EventHandler[*helper.TestState, *helper.TestVote, *helper.TestPeer, *helper.TestCollected]
}
type MockedCommunicatorConsumer struct {
notifications.NoopProposalViolationConsumer[*helper.TestState, *helper.TestVote]
notifications.NoopParticipantConsumer[*helper.TestState, *helper.TestVote]
notifications.NoopFinalizationConsumer[*helper.TestState]
*mocks.CommunicatorConsumer[*helper.TestState, *helper.TestVote]
}
func NewMockedCommunicatorConsumer() *MockedCommunicatorConsumer {
return &MockedCommunicatorConsumer{
CommunicatorConsumer: &mocks.CommunicatorConsumer[*helper.TestState, *helper.TestVote]{},
}
}
var _ consensus.Consumer[*helper.TestState, *helper.TestVote] = (*MockedCommunicatorConsumer)(nil)
var _ consensus.TimeoutCollectorConsumer[*helper.TestVote] = (*Instance)(nil)
func NewInstance(t *testing.T, options ...Option) *Instance {
// generate random default identity
identity := helper.MakeIdentity()
// initialize the default configuration
cfg := Config{
Logger: helper.Logger(),
Root: DefaultRoot(),
Participants: []models.WeightedIdentity{&helper.TestWeightedIdentity{
ID: identity,
}},
LocalID: identity,
Timeouts: timeout.DefaultConfig,
IncomingVotes: DropNoVotes,
OutgoingVotes: DropNoVotes,
IncomingProposals: DropNoProposals,
OutgoingProposals: DropNoProposals,
IncomingTimeoutStates: DropNoTimeoutStates,
OutgoingTimeoutStates: DropNoTimeoutStates,
StopCondition: RightAway,
}
// apply the custom options
for _, option := range options {
option(&cfg)
}
// check the local ID is a participant
takesPart := false
for _, participant := range cfg.Participants {
if participant.Identity() == cfg.LocalID {
takesPart = true
break
}
}
require.True(t, takesPart)
// initialize the instance
in := Instance{
// instance parameters
logger: cfg.Logger,
participants: cfg.Participants,
localID: cfg.LocalID,
dropVoteIn: cfg.IncomingVotes,
dropVoteOut: cfg.OutgoingVotes,
dropPropIn: cfg.IncomingProposals,
dropPropOut: cfg.OutgoingProposals,
dropTimeoutStateIn: cfg.IncomingTimeoutStates,
dropTimeoutStateOut: cfg.OutgoingTimeoutStates,
stop: cfg.StopCondition,
// instance data
pendings: make(map[models.Identity]*models.SignedProposal[*helper.TestState, *helper.TestVote]),
headers: make(map[models.Identity]*models.State[*helper.TestState]),
queue: make(chan interface{}, 1024),
// instance mocks
committee: &mocks.DynamicCommittee{},
builder: &mocks.LeaderProvider[*helper.TestState, *helper.TestPeer, *helper.TestCollected]{},
persist: &mocks.ConsensusStore[*helper.TestVote]{},
signer: &mocks.Signer[*helper.TestState, *helper.TestVote]{},
verifier: &mocks.Verifier[*helper.TestVote]{},
notifier: NewMockedCommunicatorConsumer(),
finalizer: &mocks.Finalizer{},
voting: &mocks.VotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer]{},
}
// insert root state into headers register
in.headers[cfg.Root.Identifier] = cfg.Root
// program the hotstuff committee state
in.committee.On("IdentitiesByRank", mock.Anything).Return(
func(_ uint64) []models.WeightedIdentity {
return in.participants
},
nil,
)
in.committee.On("IdentitiesByState", mock.Anything).Return(
func(_ models.Identity) []models.WeightedIdentity {
return in.participants
},
nil,
)
for _, participant := range in.participants {
in.committee.On("IdentityByState", mock.Anything, participant.Identity()).Return(participant, nil)
in.committee.On("IdentityByRank", mock.Anything, participant.Identity()).Return(participant, nil)
}
in.committee.On("Self").Return(in.localID)
in.committee.On("LeaderForRank", mock.Anything).Return(
func(rank uint64) models.Identity {
return in.participants[int(rank)%len(in.participants)].Identity()
}, nil,
)
in.committee.On("QuorumThresholdForRank", mock.Anything).Return(uint64(len(in.participants)*2000/3), nil)
in.committee.On("TimeoutThresholdForRank", mock.Anything).Return(uint64(len(in.participants)*2000/3), nil)
// program the builder module behaviour
in.builder.On("ProveNextState", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(
func(ctx context.Context, rank uint64, filter []byte, parentID models.Identity) **helper.TestState {
in.updatingStates.Lock()
defer in.updatingStates.Unlock()
_, ok := in.headers[parentID]
if !ok {
return nil
}
s := &helper.TestState{
Rank: rank,
Signature: []byte{},
Timestamp: uint64(time.Now().UnixMilli()),
ID: helper.MakeIdentity(),
Prover: in.localID,
}
return &s
},
func(ctx context.Context, rank uint64, filter []byte, parentID models.Identity) error {
in.updatingStates.RLock()
_, ok := in.headers[parentID]
in.updatingStates.RUnlock()
if !ok {
return fmt.Errorf("parent state not found (parent: %x)", parentID)
}
return nil
},
)
// check on stop condition, stop the tests as soon as entering a certain rank
in.persist.On("PutConsensusState", mock.Anything).Return(nil)
in.persist.On("PutLivenessState", mock.Anything).Return(nil)
// program the hotstuff signer behaviour
in.signer.On("CreateVote", mock.Anything).Return(
func(state *models.State[*helper.TestState]) **helper.TestVote {
vote := &helper.TestVote{
Rank: state.Rank,
StateID: state.Identifier,
ID: in.localID,
Signature: make([]byte, 74),
}
return &vote
},
nil,
)
in.signer.On("CreateTimeout", mock.Anything, mock.Anything, mock.Anything).Return(
func(curRank uint64, newestQC models.QuorumCertificate, previousRankTimeoutCert models.TimeoutCertificate) *models.TimeoutState[*helper.TestVote] {
v := &helper.TestVote{
Rank: curRank,
Signature: make([]byte, 74),
Timestamp: uint64(time.Now().UnixMilli()),
ID: in.localID,
}
timeoutState := &models.TimeoutState[*helper.TestVote]{
Rank: curRank,
LatestQuorumCertificate: newestQC,
PriorRankTimeoutCertificate: previousRankTimeoutCert,
Vote: &v,
}
return timeoutState
},
nil,
)
in.signer.On("CreateQuorumCertificate", mock.Anything).Return(
func(votes []*helper.TestVote) models.QuorumCertificate {
voterIDs := make([]models.Identity, 0, len(votes))
bitmask := []byte{0, 0}
for i, vote := range votes {
bitmask[i/8] |= 1 << (i % 8)
voterIDs = append(voterIDs, vote.ID)
}
qc := &helper.TestQuorumCertificate{
Rank: votes[0].Rank,
FrameNumber: votes[0].Rank,
Selector: votes[0].StateID,
Timestamp: uint64(time.Now().UnixMilli()),
AggregatedSignature: &helper.TestAggregatedSignature{
Signature: make([]byte, 74),
Bitmask: bitmask,
PublicKey: make([]byte, 585),
},
}
return qc
},
nil,
)
// program the hotstuff verifier behaviour
in.verifier.On("VerifyVote", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
in.verifier.On("VerifyQuorumCertificate", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
in.verifier.On("VerifyTimeoutCertificate", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
// program the hotstuff communicator behaviour
in.notifier.CommunicatorConsumer.On("OnOwnProposal", mock.Anything, mock.Anything).Run(
func(args mock.Arguments) {
proposal, ok := args[0].(*models.SignedProposal[*helper.TestState, *helper.TestVote])
require.True(t, ok)
// sender should always have the parent
in.updatingStates.RLock()
_, exists := in.headers[proposal.State.ParentQuorumCertificate.Identity()]
in.updatingStates.RUnlock()
if !exists {
t.Fatalf("parent for proposal not found parent: %x", proposal.State.ParentQuorumCertificate.Identity())
}
// store locally and loop back to engine for processing
in.ProcessState(proposal)
},
)
in.notifier.CommunicatorConsumer.On("OnOwnTimeout", mock.Anything).Run(func(args mock.Arguments) {
timeoutState, ok := args[0].(*models.TimeoutState[*helper.TestVote])
require.True(t, ok)
in.queue <- timeoutState
},
)
// in case of single node setup we should just forward vote to our own node
// for multi-node setup this method will be overridden
in.notifier.CommunicatorConsumer.On("OnOwnVote", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
vote, ok := args[0].(**helper.TestVote)
require.True(t, ok)
in.queue <- *vote
})
// program the finalizer module behaviour
in.finalizer.On("MakeFinal", mock.Anything).Return(
func(stateID models.Identity) error {
// as we don't use mocks to assert expectations, but only to
// simulate behaviour, we should drop the call data regularly
in.updatingStates.RLock()
state, found := in.headers[stateID]
in.updatingStates.RUnlock()
if !found {
return fmt.Errorf("can't broadcast with unknown parent")
}
if state.Rank%100 == 0 {
in.committee.Calls = nil
in.builder.Calls = nil
in.signer.Calls = nil
in.verifier.Calls = nil
in.notifier.CommunicatorConsumer.Calls = nil
in.finalizer.Calls = nil
}
return nil
},
)
// initialize error handling and logging
var err error
notifier := pubsub.NewDistributor[*helper.TestState, *helper.TestVote]()
notifier.AddConsumer(in.notifier)
logConsumer := notifications.NewLogConsumer[*helper.TestState, *helper.TestVote](in.logger)
notifier.AddConsumer(logConsumer)
// initialize the finalizer
var rootState *models.State[*helper.TestState]
if cfg.Root.ParentQuorumCertificate != nil {
rootState = models.StateFrom(cfg.Root.State, cfg.Root.ParentQuorumCertificate)
} else {
rootState = models.GenesisStateFrom(cfg.Root.State)
}
rootQC := &helper.TestQuorumCertificate{
Rank: rootState.Rank,
FrameNumber: rootState.Rank,
Selector: rootState.Identifier,
Timestamp: uint64(time.Now().UnixMilli()),
AggregatedSignature: &helper.TestAggregatedSignature{
Signature: make([]byte, 74),
Bitmask: []byte{0b11111111, 0b00000000},
PublicKey: make([]byte, 585),
},
}
certifiedRootState, err := models.NewCertifiedState(rootState, rootQC)
require.NoError(t, err)
livenessData := &models.LivenessState{
CurrentRank: rootQC.Rank + 1,
LatestQuorumCertificate: rootQC,
}
in.persist.On("GetLivenessState", mock.Anything).Return(livenessData, nil).Once()
// initialize the pacemaker
controller := timeout.NewController(cfg.Timeouts)
in.pacemaker, err = pacemaker.NewPacemaker[*helper.TestState, *helper.TestVote](nil, controller, pacemaker.NoProposalDelay(), notifier, in.persist, in.logger)
require.NoError(t, err)
// initialize the forks handler
in.forks, err = forks.NewForks(certifiedRootState, in.finalizer, notifier)
require.NoError(t, err)
// initialize the validator
in.validator = validator.NewValidator[*helper.TestState, *helper.TestVote](in.committee, in.verifier)
packer := &mocks.Packer{}
packer.On("Pack", mock.Anything, mock.Anything).Return(
func(rank uint64, sig *consensus.StateSignatureData) ([]byte, []byte, error) {
indices := []byte{0, 0}
for i := range sig.Signers {
indices[i/8] |= 1 << (i % 8)
}
return indices, make([]byte, 74), nil
},
).Maybe()
onQCCreated := func(qc models.QuorumCertificate) {
in.queue <- qc
}
voteProcessorFactory := mocks.NewVoteProcessorFactory[*helper.TestState, *helper.TestVote, *helper.TestPeer](t)
voteProcessorFactory.On("Create", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(
func(tracer consensus.TraceLogger, filter []byte, proposal *models.SignedProposal[*helper.TestState, *helper.TestVote], dsTag []byte, aggregator consensus.SignatureAggregator, votingProvider consensus.VotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer]) consensus.VerifyingVoteProcessor[*helper.TestState, *helper.TestVote] {
processor, err := votecollector.NewBootstrapVoteProcessor[*helper.TestState, *helper.TestVote, *helper.TestPeer](
in.logger,
filter,
in.committee,
proposal.State,
onQCCreated,
[]byte{},
aggregator,
in.voting,
)
require.NoError(t, err)
vote, err := proposal.ProposerVote()
require.NoError(t, err)
err = processor.Process(vote)
if err != nil {
t.Fatalf("invalid vote for own proposal: %v", err)
}
return processor
}, nil).Maybe()
in.voting.On("FinalizeQuorumCertificate", mock.Anything, mock.Anything, mock.Anything).Return(
func(
ctx context.Context,
state *models.State[*helper.TestState],
aggregatedSignature models.AggregatedSignature,
) (models.QuorumCertificate, error) {
return &helper.TestQuorumCertificate{
Rank: state.Rank,
Timestamp: state.Timestamp,
FrameNumber: state.Rank,
Selector: state.Identifier,
AggregatedSignature: aggregatedSignature,
}, nil
},
)
in.voting.On("FinalizeTimeout", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(
func(ctx context.Context, rank uint64, latestQuorumCertificate models.QuorumCertificate, latestQuorumCertificateRanks []uint64, aggregatedSignature models.AggregatedSignature) (models.TimeoutCertificate, error) {
return &helper.TestTimeoutCertificate{
Filter: nil,
Rank: rank,
LatestRanks: latestQuorumCertificateRanks,
LatestQuorumCert: latestQuorumCertificate,
AggregatedSignature: aggregatedSignature,
}, nil
},
)
voteAggregationDistributor := pubsub.NewVoteAggregationDistributor[*helper.TestState, *helper.TestVote]()
sigAgg := mocks.NewSignatureAggregator(t)
sigAgg.On("Aggregate", mock.Anything, mock.Anything).Return(
func(publicKeys [][]byte, signatures [][]byte) (models.AggregatedSignature, error) {
bitmask := []byte{0, 0}
for i := range publicKeys {
bitmask[i/8] |= 1 << (i % 8)
}
return &helper.TestAggregatedSignature{
Signature: make([]byte, 74),
Bitmask: bitmask,
PublicKey: make([]byte, 585),
}, nil
}).Maybe()
sigAgg.On("VerifySignatureRaw", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe()
createCollectorFactoryMethod := votecollector.NewStateMachineFactory(in.logger, []byte{}, voteAggregationDistributor, voteProcessorFactory.Create, []byte{}, sigAgg, in.voting)
voteCollectors := voteaggregator.NewVoteCollectors[*helper.TestState, *helper.TestVote](in.logger, livenessData.CurrentRank, workerpool.New(2), createCollectorFactoryMethod)
// initialize the vote aggregator
in.voteAggregator, err = voteaggregator.NewVoteAggregator[*helper.TestState, *helper.TestVote](
in.logger,
voteAggregationDistributor,
livenessData.CurrentRank,
voteCollectors,
)
require.NoError(t, err)
// initialize factories for timeout collector and timeout processor
timeoutAggregationDistributor := pubsub.NewTimeoutAggregationDistributor[*helper.TestVote]()
timeoutProcessorFactory := mocks.NewTimeoutProcessorFactory[*helper.TestVote](t)
timeoutProcessorFactory.On("Create", mock.Anything).Return(
func(rank uint64) consensus.TimeoutProcessor[*helper.TestVote] {
// mock signature aggregator which doesn't perform any crypto operations and just tracks total weight
aggregator := &mocks.TimeoutSignatureAggregator{}
totalWeight := atomic.NewUint64(0)
newestRank := counters.NewMonotonicCounter(0)
bits := counters.NewMonotonicCounter(0)
aggregator.On("Rank").Return(rank).Maybe()
aggregator.On("TotalWeight").Return(func() uint64 {
return totalWeight.Load()
}).Maybe()
aggregator.On("VerifyAndAdd", mock.Anything, mock.Anything, mock.Anything).Return(
func(signerID models.Identity, _ []byte, newestQCRank uint64) uint64 {
newestRank.Set(newestQCRank)
var signer models.WeightedIdentity
for _, p := range in.participants {
if p.Identity() == signerID {
signer = p
}
}
require.NotNil(t, signer)
bits.Increment()
return totalWeight.Add(signer.Weight())
}, nil,
).Maybe()
aggregator.On("Aggregate").Return(
func() []consensus.TimeoutSignerInfo {
signersData := make([]consensus.TimeoutSignerInfo, 0, len(in.participants))
newestQCRank := newestRank.Value()
for _, signer := range in.participants {
signersData = append(signersData, consensus.TimeoutSignerInfo{
NewestQCRank: newestQCRank,
Signer: signer.Identity(),
})
}
return signersData
},
func() models.AggregatedSignature {
bitCount := bits.Value()
bitmask := []byte{0, 0}
for i := range bitCount {
pos := i / 8
bitmask[pos] |= 1 << (i % 8)
}
return &helper.TestAggregatedSignature{
Signature: make([]byte, 74),
Bitmask: bitmask,
PublicKey: make([]byte, 585),
}
},
nil,
).Maybe()
p, err := timeoutcollector.NewTimeoutProcessor[*helper.TestState, *helper.TestVote, *helper.TestPeer](
in.logger,
in.committee,
in.validator,
aggregator,
timeoutAggregationDistributor,
in.voting,
)
require.NoError(t, err)
return p
}, nil).Maybe()
timeoutCollectorFactory := timeoutcollector.NewTimeoutCollectorFactory(
in.logger,
timeoutAggregationDistributor,
timeoutProcessorFactory,
)
timeoutCollectors := timeoutaggregator.NewTimeoutCollectors(
in.logger,
livenessData.CurrentRank,
timeoutCollectorFactory,
)
// initialize the timeout aggregator
in.timeoutAggregator, err = timeoutaggregator.NewTimeoutAggregator(
in.logger,
livenessData.CurrentRank,
timeoutCollectors,
)
require.NoError(t, err)
safetyData := &models.ConsensusState[*helper.TestVote]{
FinalizedRank: rootState.Rank,
LatestAcknowledgedRank: rootState.Rank,
}
in.persist.On("GetConsensusState", mock.Anything).Return(safetyData, nil).Once()
// initialize the safety rules
in.safetyRules, err = safetyrules.NewSafetyRules(nil, in.signer, in.persist, in.committee)
require.NoError(t, err)
// initialize the state producer
in.producer, err = stateproducer.NewStateProducer[*helper.TestState, *helper.TestVote, *helper.TestPeer, *helper.TestCollected](in.safetyRules, in.committee, in.builder)
require.NoError(t, err)
// initialize the event handler
in.handler, err = eventhandler.NewEventHandler[*helper.TestState, *helper.TestVote, *helper.TestPeer, *helper.TestCollected](
in.pacemaker,
in.producer,
in.forks,
in.persist,
in.committee,
in.safetyRules,
notifier,
in.logger,
)
require.NoError(t, err)
timeoutAggregationDistributor.AddTimeoutCollectorConsumer(logConsumer)
timeoutAggregationDistributor.AddTimeoutCollectorConsumer(&in)
voteAggregationDistributor.AddVoteCollectorConsumer(logConsumer)
return &in
}
func (in *Instance) Run(t *testing.T) error {
ctx, cancel := context.WithCancel(context.Background())
defer func() {
cancel()
<-lifecycle.AllDone(in.voteAggregator, in.timeoutAggregator)
}()
signalerCtx := unittest.NewMockSignalerContext(t, ctx)
in.voteAggregator.Start(signalerCtx)
in.timeoutAggregator.Start(signalerCtx)
<-lifecycle.AllReady(in.voteAggregator, in.timeoutAggregator)
// start the event handler
err := in.handler.Start(ctx)
if err != nil {
return fmt.Errorf("could not start event handler: %w", err)
}
// run until an error or stop condition is reached
for {
// check on stop conditions
if in.stop(in) {
return errStopCondition
}
// we handle timeouts with priority
select {
case <-in.handler.TimeoutChannel():
err := in.handler.OnLocalTimeout()
if err != nil {
panic(fmt.Errorf("could not process timeout: %w", err))
}
default:
}
// check on stop conditions
if in.stop(in) {
return errStopCondition
}
// otherwise, process first received event
select {
case <-in.handler.TimeoutChannel():
err := in.handler.OnLocalTimeout()
if err != nil {
return fmt.Errorf("could not process timeout: %w", err)
}
case msg := <-in.queue:
switch m := msg.(type) {
case *models.SignedProposal[*helper.TestState, *helper.TestVote]:
// add state to aggregator
in.voteAggregator.AddState(m)
// then pass to event handler
err := in.handler.OnReceiveProposal(m)
if err != nil {
return fmt.Errorf("could not process proposal: %w", err)
}
case *helper.TestVote:
in.voteAggregator.AddVote(&m)
case *models.TimeoutState[*helper.TestVote]:
in.timeoutAggregator.AddTimeout(m)
case models.QuorumCertificate:
err := in.handler.OnReceiveQuorumCertificate(m)
if err != nil {
return fmt.Errorf("could not process received QC: %w", err)
}
case models.TimeoutCertificate:
err := in.handler.OnReceiveTimeoutCertificate(m)
if err != nil {
return fmt.Errorf("could not process received TC: %w", err)
}
case *consensus.PartialTimeoutCertificateCreated:
err := in.handler.OnPartialTimeoutCertificateCreated(m)
if err != nil {
return fmt.Errorf("could not process partial TC: %w", err)
}
default:
fmt.Printf("unhandled queue event: %s\n", reflect.ValueOf(msg).Type().String())
}
}
}
}
func (in *Instance) ProcessState(proposal *models.SignedProposal[*helper.TestState, *helper.TestVote]) {
in.updatingStates.Lock()
defer in.updatingStates.Unlock()
_, parentExists := in.headers[proposal.State.ParentQuorumCertificate.Identity()]
if parentExists {
next := proposal
for next != nil {
in.headers[next.State.Identifier] = next.State
in.queue <- next
// keep processing the pending states
next = in.pendings[next.State.ParentQuorumCertificate.Identity()]
}
} else {
// cache it in pendings by ParentID
in.pendings[proposal.State.ParentQuorumCertificate.Identity()] = proposal
}
}
func (in *Instance) OnTimeoutCertificateConstructedFromTimeouts(tc models.TimeoutCertificate) {
in.queue <- tc
}
func (in *Instance) OnPartialTimeoutCertificateCreated(rank uint64, newestQC models.QuorumCertificate, previousRankTimeoutCert models.TimeoutCertificate) {
in.queue <- &consensus.PartialTimeoutCertificateCreated{
Rank: rank,
NewestQuorumCertificate: newestQC,
PriorRankTimeoutCertificate: previousRankTimeoutCert,
}
}
func (in *Instance) OnNewQuorumCertificateDiscovered(qc models.QuorumCertificate) {
in.queue <- qc
}
func (in *Instance) OnNewTimeoutCertificateDiscovered(tc models.TimeoutCertificate) {
in.queue <- tc
}
func (in *Instance) OnTimeoutProcessed(*models.TimeoutState[*helper.TestVote]) {
}

View File

@ -0,0 +1,153 @@
package integration
import (
"errors"
"fmt"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
)
// a pacemaker timeout to wait for proposals. Usually 10 ms is enough,
// but for slow environment like CI, a longer one is needed.
const safeTimeout = 2 * time.Second
// number of failed rounds before first timeout increase
const happyPathMaxRoundFailures = 6
func TestSingleInstance(t *testing.T) {
fmt.Println("starting single instance test")
// set up a single instance to run
finalRank := uint64(10)
in := NewInstance(t,
WithStopCondition(RankFinalized(finalRank)),
)
// run the event handler until we reach a stop condition
err := in.Run(t)
require.ErrorIs(t, err, errStopCondition, "should run until stop condition")
// check if forks and pacemaker are in expected rank state
assert.Equal(t, finalRank, in.forks.FinalizedRank(), "finalized rank should be three lower than current rank")
fmt.Println("ending single instance test")
}
func TestThreeInstances(t *testing.T) {
fmt.Println("starting three instance test")
// test parameters
num := 3
finalRank := uint64(100)
// generate three hotstuff participants
participants := helper.WithWeightedIdentityList(num)
root := DefaultRoot()
// set up three instances that are exactly the same
// since we don't drop any messages we should have enough data to advance in happy path
// for that reason we will drop all TO related communication.
instances := make([]*Instance, 0, num)
for n := 0; n < num; n++ {
in := NewInstance(t,
WithRoot(root),
WithParticipants(participants),
WithLocalID(participants[n].Identity()),
WithStopCondition(RankFinalized(finalRank)),
WithIncomingTimeoutStates(DropAllTimeoutStates),
)
instances = append(instances, in)
}
// connect the communicators of the instances together
Connect(t, instances)
// start the instances and wait for them to finish
var wg sync.WaitGroup
for _, in := range instances {
wg.Add(1)
go func(in *Instance) {
err := in.Run(t)
require.True(t, errors.Is(err, errStopCondition), "should run until stop condition")
wg.Done()
}(in)
}
wg.Wait()
// check that all instances have the same finalized state
in1 := instances[0]
in2 := instances[1]
in3 := instances[2]
// verify progress has been made
assert.GreaterOrEqual(t, in1.forks.FinalizedState().Rank, finalRank, "the first instance 's finalized rank should be four lower than current rank")
// verify same progresses have been made
assert.Equal(t, in1.forks.FinalizedState(), in2.forks.FinalizedState(), "second instance should have same finalized state as first instance")
assert.Equal(t, in1.forks.FinalizedState(), in3.forks.FinalizedState(), "third instance should have same finalized state as first instance")
assert.Equal(t, FinalizedRanks(in1), FinalizedRanks(in2))
assert.Equal(t, FinalizedRanks(in1), FinalizedRanks(in3))
fmt.Println("ending three instance test")
}
func TestSevenInstances(t *testing.T) {
fmt.Println("starting seven instance test")
// test parameters
numPass := 5
numFail := 2
finalRank := uint64(30)
// generate the seven hotstuff participants
participants := helper.WithWeightedIdentityList(numPass + numFail)
instances := make([]*Instance, 0, numPass+numFail)
root := DefaultRoot()
// set up five instances that work fully
for n := 0; n < numPass; n++ {
in := NewInstance(t,
WithRoot(root),
WithParticipants(participants),
WithLocalID(participants[n].Identity()),
WithStopCondition(RankFinalized(finalRank)),
)
instances = append(instances, in)
}
// set up two instances which can't vote
for n := numPass; n < numPass+numFail; n++ {
in := NewInstance(t,
WithRoot(root),
WithParticipants(participants),
WithLocalID(participants[n].Identity()),
WithStopCondition(RankFinalized(finalRank)),
WithOutgoingVotes(DropAllVotes),
)
instances = append(instances, in)
}
// connect the communicators of the instances together
Connect(t, instances)
// start all seven instances and wait for them to wrap up
var wg sync.WaitGroup
for _, in := range instances {
wg.Add(1)
go func(in *Instance) {
err := in.Run(t)
require.True(t, errors.Is(err, errStopCondition), "should run until stop condition")
wg.Done()
}(in)
}
wg.Wait()
// check that all instances have the same finalized state
ref := instances[0]
assert.Less(t, finalRank-uint64(2*numPass+numFail), ref.forks.FinalizedState().Rank, "expect instance 0 should made enough progress, but didn't")
finalizedRanks := FinalizedRanks(ref)
for i := 1; i < numPass; i++ {
assert.Equal(t, ref.forks.FinalizedState(), instances[i].forks.FinalizedState(), "instance %d should have same finalized state as first instance")
assert.Equal(t, finalizedRanks, FinalizedRanks(instances[i]), "instance %d should have same finalized rank as first instance")
}
fmt.Println("ending seven instance test")
}

View File

@ -0,0 +1,422 @@
package integration
import (
"encoding/hex"
"errors"
"fmt"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"source.quilibrium.com/quilibrium/monorepo/consensus"
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
"source.quilibrium.com/quilibrium/monorepo/consensus/pacemaker/timeout"
"source.quilibrium.com/quilibrium/monorepo/lifecycle/unittest"
)
// pacemaker timeout
// if your laptop is fast enough, 10 ms is enough
const pmTimeout = 100 * time.Millisecond
// maxTimeoutRebroadcast specifies how often the PaceMaker rebroadcasts
// its timeout state in case there is no progress. We keep the value
// small so we have smaller latency
const maxTimeoutRebroadcast = 1 * time.Second
// If 2 nodes are down in a 7 nodes cluster, the rest of 5 nodes can
// still make progress and reach consensus
func Test2TimeoutOutof7Instances(t *testing.T) {
healthyReplicas := 5
notVotingReplicas := 2
finalRank := uint64(30)
// generate the seven hotstuff participants
participants := helper.WithWeightedIdentityList(healthyReplicas + notVotingReplicas)
instances := make([]*Instance, 0, healthyReplicas+notVotingReplicas)
root := DefaultRoot()
timeouts, err := timeout.NewConfig(pmTimeout, pmTimeout, 1.5, happyPathMaxRoundFailures, maxTimeoutRebroadcast)
require.NoError(t, err)
// set up five instances that work fully
for n := 0; n < healthyReplicas; n++ {
in := NewInstance(t,
WithRoot(root),
WithParticipants(participants),
WithTimeouts(timeouts),
WithBufferLogger(),
WithLocalID(participants[n].Identity()),
WithLoggerParams(consensus.StringParam("status", "healthy")),
WithStopCondition(RankFinalized(finalRank)),
)
instances = append(instances, in)
}
// set up two instances which can't vote, nor propose
for n := healthyReplicas; n < healthyReplicas+notVotingReplicas; n++ {
in := NewInstance(t,
WithRoot(root),
WithParticipants(participants),
WithTimeouts(timeouts),
WithBufferLogger(),
WithLocalID(participants[n].Identity()),
WithLoggerParams(consensus.StringParam("status", "unhealthy")),
WithStopCondition(RankFinalized(finalRank)),
WithOutgoingVotes(DropAllVotes),
WithOutgoingProposals(DropAllProposals),
)
instances = append(instances, in)
}
// connect the communicators of the instances together
Connect(t, instances)
// start all seven instances and wait for them to wrap up
var wg sync.WaitGroup
for _, in := range instances {
wg.Add(1)
go func(in *Instance) {
err := in.Run(t)
require.ErrorIs(t, err, errStopCondition)
wg.Done()
}(in)
}
unittest.AssertReturnsBefore(t, wg.Wait, 20*time.Second, "expect to finish before timeout")
for i, in := range instances {
fmt.Println("=============================================================================")
fmt.Println("INSTANCE", i, "-", hex.EncodeToString([]byte(in.localID)))
fmt.Println("=============================================================================")
in.logger.(*helper.BufferLog).Flush()
}
// check that all instances have the same finalized state
ref := instances[0]
assert.Equal(t, finalRank, ref.forks.FinalizedState().Rank, "expect instance 0 should made enough progress, but didn't")
finalizedRanks := FinalizedRanks(ref)
for i := 1; i < healthyReplicas; i++ {
assert.Equal(t, ref.forks.FinalizedState(), instances[i].forks.FinalizedState(), "instance %d should have same finalized state as first instance")
assert.Equal(t, finalizedRanks, FinalizedRanks(instances[i]), "instance %d should have same finalized rank as first instance")
}
}
// 2 nodes in a 4-node cluster are configured to be able only to send timeout messages (no voting or proposing).
// The other 2 unconstrained nodes should be able to make progress through the recovery path by creating TCs
// for every round, but no state will be finalized, because finalization requires direct 1-chain and QC.
func Test2TimeoutOutof4Instances(t *testing.T) {
healthyReplicas := 2
replicasDroppingHappyPathMsgs := 2
finalRank := uint64(30)
// generate the 4 hotstuff participants
participants := helper.WithWeightedIdentityList(healthyReplicas + replicasDroppingHappyPathMsgs)
instances := make([]*Instance, 0, healthyReplicas+replicasDroppingHappyPathMsgs)
root := DefaultRoot()
timeouts, err := timeout.NewConfig(10*time.Millisecond, 50*time.Millisecond, 1.5, happyPathMaxRoundFailures, maxTimeoutRebroadcast)
require.NoError(t, err)
// set up two instances that work fully
for n := 0; n < healthyReplicas; n++ {
in := NewInstance(t,
WithRoot(root),
WithParticipants(participants),
WithLocalID(participants[n].Identity()),
WithTimeouts(timeouts),
WithLoggerParams(consensus.StringParam("status", "healthy")),
WithStopCondition(RankReached(finalRank)),
)
instances = append(instances, in)
}
// set up instances which can't vote, nor propose
for n := healthyReplicas; n < healthyReplicas+replicasDroppingHappyPathMsgs; n++ {
in := NewInstance(t,
WithRoot(root),
WithParticipants(participants),
WithLocalID(participants[n].Identity()),
WithTimeouts(timeouts),
WithLoggerParams(consensus.StringParam("status", "unhealthy")),
WithStopCondition(RankReached(finalRank)),
WithOutgoingVotes(DropAllVotes),
WithIncomingVotes(DropAllVotes),
WithOutgoingProposals(DropAllProposals),
)
instances = append(instances, in)
}
// connect the communicators of the instances together
Connect(t, instances)
// start the instances and wait for them to finish
var wg sync.WaitGroup
for _, in := range instances {
wg.Add(1)
go func(in *Instance) {
err := in.Run(t)
require.True(t, errors.Is(err, errStopCondition), "should run until stop condition")
wg.Done()
}(in)
}
unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second, "expect to finish before timeout")
// check that all instances have the same finalized state
ref := instances[0]
finalizedRanks := FinalizedRanks(ref)
assert.Equal(t, []uint64{0}, finalizedRanks, "no rank was finalized, because finalization requires 2 direct chain plus a QC which never happen in this case")
assert.Equal(t, finalRank, ref.pacemaker.CurrentRank(), "expect instance 0 should made enough progress, but didn't")
for i := 1; i < healthyReplicas; i++ {
assert.Equal(t, ref.forks.FinalizedState(), instances[i].forks.FinalizedState(), "instance %d should have same finalized state as first instance", i)
assert.Equal(t, finalizedRanks, FinalizedRanks(instances[i]), "instance %d should have same finalized rank as first instance", i)
assert.Equal(t, finalRank, instances[i].pacemaker.CurrentRank(), "instance %d should have same active rank as first instance", i)
}
}
// If 1 node is down in a 5 nodes cluster, the rest of 4 nodes can
// make progress and reach consensus
func Test1TimeoutOutof5Instances(t *testing.T) {
healthyReplicas := 4
stateedReplicas := 1
finalRank := uint64(30)
// generate the seven hotstuff participants
participants := helper.WithWeightedIdentityList(healthyReplicas + stateedReplicas)
instances := make([]*Instance, 0, healthyReplicas+stateedReplicas)
root := DefaultRoot()
timeouts, err := timeout.NewConfig(pmTimeout, pmTimeout, 1.5, happyPathMaxRoundFailures, maxTimeoutRebroadcast)
require.NoError(t, err)
// set up instances that work fully
for n := 0; n < healthyReplicas; n++ {
in := NewInstance(t,
WithRoot(root),
WithParticipants(participants),
WithLocalID(participants[n].Identity()),
WithTimeouts(timeouts),
WithLoggerParams(consensus.StringParam("status", "healthy")),
WithStopCondition(RankFinalized(finalRank)),
)
instances = append(instances, in)
}
// set up one instance which can't vote, nor propose
for n := healthyReplicas; n < healthyReplicas+stateedReplicas; n++ {
in := NewInstance(t,
WithRoot(root),
WithParticipants(participants),
WithLocalID(participants[n].Identity()),
WithTimeouts(timeouts),
WithLoggerParams(consensus.StringParam("status", "unhealthy")),
WithStopCondition(RankReached(finalRank)),
WithOutgoingVotes(DropAllVotes),
WithOutgoingProposals(DropAllProposals),
)
instances = append(instances, in)
}
// connect the communicators of the instances together
Connect(t, instances)
// start all seven instances and wait for them to wrap up
var wg sync.WaitGroup
for _, in := range instances {
wg.Add(1)
go func(in *Instance) {
err := in.Run(t)
require.ErrorIs(t, err, errStopCondition)
wg.Done()
}(in)
}
success := unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second, "expect to finish before timeout")
if !success {
t.Logf("dumping state of system:")
for i, inst := range instances {
t.Logf(
"instance %d: %d %d %d",
i,
inst.pacemaker.CurrentRank(),
inst.pacemaker.LatestQuorumCertificate().GetRank(),
inst.forks.FinalizedState().Rank,
)
}
}
// check that all instances have the same finalized state
ref := instances[0]
finalizedRanks := FinalizedRanks(ref)
assert.Equal(t, finalRank, ref.forks.FinalizedState().Rank, "expect instance 0 should made enough progress, but didn't")
for i := 1; i < healthyReplicas; i++ {
assert.Equal(t, ref.forks.FinalizedState(), instances[i].forks.FinalizedState(), "instance %d should have same finalized state as first instance")
assert.Equal(t, finalizedRanks, FinalizedRanks(instances[i]), "instance %d should have same finalized rank as first instance")
}
}
// TestStateDelayIsHigherThanTimeout tests an edge case protocol edge case, where
// - The state arrives in time for replicas to vote.
// - The next primary does not respond in time with a follow-up proposal,
// so nodes start sending TimeoutStates.
// - However, eventually, the next primary successfully constructs a QC and a new
// state before a TC leads to the round timing out.
//
// This test verifies that nodes still make progress on the happy path (QC constructed),
// despite already having initiated the timeout.
// Example scenarios, how this timing edge case could manifest:
// - state delay is very close (or larger) than round duration
// - delayed message transmission (specifically votes) within network
// - overwhelmed / slowed-down primary
// - byzantine primary
//
// Implementation:
// - We have 4 nodes in total where the TimeoutStates from two of them are always
// discarded. Therefore, no TC can be constructed.
// - To force nodes to initiate the timeout (i.e. send TimeoutStates), we set
// the `stateRateDelay` to _twice_ the PaceMaker Timeout. Furthermore, we configure
// the PaceMaker to only increase timeout duration after 6 successive round failures.
func TestStateDelayIsHigherThanTimeout(t *testing.T) {
healthyReplicas := 2
replicasNotGeneratingTimeouts := 2
finalRank := uint64(20)
// generate the 4 hotstuff participants
participants := helper.WithWeightedIdentityList(healthyReplicas + replicasNotGeneratingTimeouts)
instances := make([]*Instance, 0, healthyReplicas+replicasNotGeneratingTimeouts)
root := DefaultRoot()
timeouts, err := timeout.NewConfig(pmTimeout, pmTimeout, 1.5, happyPathMaxRoundFailures, maxTimeoutRebroadcast)
require.NoError(t, err)
// set up 2 instances that fully work (incl. sending TimeoutStates)
for n := 0; n < healthyReplicas; n++ {
in := NewInstance(t,
WithRoot(root),
WithParticipants(participants),
WithLocalID(participants[n].Identity()),
WithTimeouts(timeouts),
WithStopCondition(RankFinalized(finalRank)),
)
instances = append(instances, in)
}
// set up two instances which don't generate and receive timeout states
for n := healthyReplicas; n < healthyReplicas+replicasNotGeneratingTimeouts; n++ {
in := NewInstance(t,
WithRoot(root),
WithParticipants(participants),
WithLocalID(participants[n].Identity()),
WithTimeouts(timeouts),
WithStopCondition(RankFinalized(finalRank)),
WithIncomingTimeoutStates(DropAllTimeoutStates),
WithOutgoingTimeoutStates(DropAllTimeoutStates),
)
instances = append(instances, in)
}
// connect the communicators of the instances together
Connect(t, instances)
// start all 4 instances and wait for them to wrap up
var wg sync.WaitGroup
for _, in := range instances {
wg.Add(1)
go func(in *Instance) {
err := in.Run(t)
require.ErrorIs(t, err, errStopCondition)
wg.Done()
}(in)
}
unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second, "expect to finish before timeout")
// check that all instances have the same finalized state
ref := instances[0]
assert.Equal(t, finalRank, ref.forks.FinalizedState().Rank, "expect instance 0 should made enough progress, but didn't")
finalizedRanks := FinalizedRanks(ref)
// in this test we rely on QC being produced in each rank
// make sure that all ranks are strictly in increasing order with no gaps
for i := 1; i < len(finalizedRanks); i++ {
// finalized ranks are sorted in descending order
if finalizedRanks[i-1] != finalizedRanks[i]+1 {
t.Fatalf("finalized ranks series has gap, this is not expected: %v", finalizedRanks)
return
}
}
for i := 1; i < healthyReplicas; i++ {
assert.Equal(t, ref.forks.FinalizedState(), instances[i].forks.FinalizedState(), "instance %d should have same finalized state as first instance")
assert.Equal(t, finalizedRanks, FinalizedRanks(instances[i]), "instance %d should have same finalized rank as first instance")
}
}
// TestAsyncClusterStartup tests a realistic scenario where nodes are started asynchronously:
// - Replicas are started in sequential order
// - Each replica skips voting for first state(emulating message omission).
// - Each replica skips first Timeout State (emulating message omission).
// - At this point protocol loses liveness unless a timeout rebroadcast happens from super-majority of replicas.
//
// This test verifies that nodes still make progress, despite first TO messages being lost.
// Implementation:
// - We have 4 replicas in total, each of them skips voting for first rank to force a timeout
// - State TSs for whole committee until each replica has generated its first TO.
// - After each replica has generated a timeout allow subsequent timeout rebroadcasts to make progress.
func TestAsyncClusterStartup(t *testing.T) {
replicas := 4
finalRank := uint64(20)
// generate the four hotstuff participants
participants := helper.WithWeightedIdentityList(replicas)
instances := make([]*Instance, 0, replicas)
root := DefaultRoot()
timeouts, err := timeout.NewConfig(pmTimeout, pmTimeout, 1.5, 6, maxTimeoutRebroadcast)
require.NoError(t, err)
// set up instances that work fully
var lock sync.Mutex
timeoutStateGenerated := make(map[models.Identity]struct{}, 0)
for n := 0; n < replicas; n++ {
in := NewInstance(t,
WithRoot(root),
WithParticipants(participants),
WithLocalID(participants[n].Identity()),
WithTimeouts(timeouts),
WithStopCondition(RankFinalized(finalRank)),
WithOutgoingVotes(func(vote *helper.TestVote) bool {
return vote.Rank == 1
}),
WithOutgoingTimeoutStates(func(object *models.TimeoutState[*helper.TestVote]) bool {
lock.Lock()
defer lock.Unlock()
timeoutStateGenerated[(*object.Vote).ID] = struct{}{}
// start allowing timeouts when every node has generated one
// when nodes will broadcast again, it will go through
return len(timeoutStateGenerated) != replicas
}),
)
instances = append(instances, in)
}
// connect the communicators of the instances together
Connect(t, instances)
// start each node only after previous one has started
var wg sync.WaitGroup
for _, in := range instances {
wg.Add(1)
go func(in *Instance) {
err := in.Run(t)
require.ErrorIs(t, err, errStopCondition)
wg.Done()
}(in)
}
unittest.AssertReturnsBefore(t, wg.Wait, 20*time.Second, "expect to finish before timeout")
// check that all instances have the same finalized state
ref := instances[0]
assert.Equal(t, finalRank, ref.forks.FinalizedState().Rank, "expect instance 0 should made enough progress, but didn't")
finalizedRanks := FinalizedRanks(ref)
for i := 1; i < replicas; i++ {
assert.Equal(t, ref.forks.FinalizedState(), instances[i].forks.FinalizedState(), "instance %d should have same finalized state as first instance")
assert.Equal(t, finalizedRanks, FinalizedRanks(instances[i]), "instance %d should have same finalized rank as first instance")
}
}

View File

@ -0,0 +1,109 @@
package integration
import (
"errors"
"source.quilibrium.com/quilibrium/monorepo/consensus"
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
"source.quilibrium.com/quilibrium/monorepo/consensus/pacemaker/timeout"
)
var errStopCondition = errors.New("stop condition reached")
type Option func(*Config)
type Config struct {
Logger consensus.TraceLogger
Root *models.State[*helper.TestState]
Participants []models.WeightedIdentity
LocalID models.Identity
Timeouts timeout.Config
IncomingVotes VoteFilter
OutgoingVotes VoteFilter
IncomingTimeoutStates TimeoutStateFilter
OutgoingTimeoutStates TimeoutStateFilter
IncomingProposals ProposalFilter
OutgoingProposals ProposalFilter
StopCondition Condition
}
func WithRoot(root *models.State[*helper.TestState]) Option {
return func(cfg *Config) {
cfg.Root = root
}
}
func WithParticipants(participants []models.WeightedIdentity) Option {
return func(cfg *Config) {
cfg.Participants = participants
}
}
func WithLocalID(localID models.Identity) Option {
return func(cfg *Config) {
cfg.LocalID = localID
cfg.Logger = cfg.Logger.With(consensus.IdentityParam("self", localID))
}
}
func WithTimeouts(timeouts timeout.Config) Option {
return func(cfg *Config) {
cfg.Timeouts = timeouts
}
}
func WithBufferLogger() Option {
return func(cfg *Config) {
cfg.Logger = helper.BufferLogger()
}
}
func WithLoggerParams(params ...consensus.LogParam) Option {
return func(cfg *Config) {
cfg.Logger = cfg.Logger.With(params...)
}
}
func WithIncomingVotes(Filter VoteFilter) Option {
return func(cfg *Config) {
cfg.IncomingVotes = Filter
}
}
func WithOutgoingVotes(Filter VoteFilter) Option {
return func(cfg *Config) {
cfg.OutgoingVotes = Filter
}
}
func WithIncomingProposals(Filter ProposalFilter) Option {
return func(cfg *Config) {
cfg.IncomingProposals = Filter
}
}
func WithOutgoingProposals(Filter ProposalFilter) Option {
return func(cfg *Config) {
cfg.OutgoingProposals = Filter
}
}
func WithIncomingTimeoutStates(Filter TimeoutStateFilter) Option {
return func(cfg *Config) {
cfg.IncomingTimeoutStates = Filter
}
}
func WithOutgoingTimeoutStates(Filter TimeoutStateFilter) Option {
return func(cfg *Config) {
cfg.OutgoingTimeoutStates = Filter
}
}
func WithStopCondition(stop Condition) Option {
return func(cfg *Config) {
cfg.StopCondition = stop
}
}

View File

@ -0,0 +1,48 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
time "time"
"github.com/stretchr/testify/mock"
"source.quilibrium.com/quilibrium/monorepo/consensus"
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// CommunicatorConsumer is an autogenerated mock type for the CommunicatorConsumer type
type CommunicatorConsumer[StateT models.Unique, VoteT models.Unique] struct {
mock.Mock
}
// OnOwnProposal provides a mock function with given fields: proposal, targetPublicationTime
func (_m *CommunicatorConsumer[StateT, VoteT]) OnOwnProposal(proposal *models.SignedProposal[StateT, VoteT], targetPublicationTime time.Time) {
_m.Called(proposal, targetPublicationTime)
}
// OnOwnTimeout provides a mock function with given fields: timeout
func (_m *CommunicatorConsumer[StateT, VoteT]) OnOwnTimeout(timeout *models.TimeoutState[VoteT]) {
_m.Called(timeout)
}
// OnOwnVote provides a mock function with given fields: vote, recipientID
func (_m *CommunicatorConsumer[StateT, VoteT]) OnOwnVote(vote *VoteT, recipientID models.Identity) {
_m.Called(vote, recipientID)
}
// NewCommunicatorConsumer creates a new instance of CommunicatorConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewCommunicatorConsumer[StateT models.Unique, VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *CommunicatorConsumer[StateT, VoteT] {
mock := &CommunicatorConsumer[StateT, VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
var _ consensus.CommunicatorConsumer[*helper.TestState, *helper.TestVote] = (*CommunicatorConsumer[*helper.TestState, *helper.TestVote])(nil)

View File

@ -0,0 +1,123 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// ConsensusStore is an autogenerated mock type for the ConsensusStore type
type ConsensusStore[VoteT models.Unique] struct {
mock.Mock
}
// GetConsensusState provides a mock function with no fields
func (_m *ConsensusStore[VoteT]) GetConsensusState(filter []byte) (*models.ConsensusState[VoteT], error) {
ret := _m.Called(filter)
if len(ret) == 0 {
panic("no return value specified for GetConsensusState")
}
var r0 *models.ConsensusState[VoteT]
var r1 error
if rf, ok := ret.Get(0).(func(filter []byte) (*models.ConsensusState[VoteT], error)); ok {
return rf(filter)
}
if rf, ok := ret.Get(0).(func(filter []byte) *models.ConsensusState[VoteT]); ok {
r0 = rf(filter)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*models.ConsensusState[VoteT])
}
}
if rf, ok := ret.Get(1).(func(filter []byte) error); ok {
r1 = rf(filter)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetLivenessState provides a mock function with no fields
func (_m *ConsensusStore[VoteT]) GetLivenessState(filter []byte) (*models.LivenessState, error) {
ret := _m.Called(filter)
if len(ret) == 0 {
panic("no return value specified for GetLivenessState")
}
var r0 *models.LivenessState
var r1 error
if rf, ok := ret.Get(0).(func(filter []byte) (*models.LivenessState, error)); ok {
return rf(filter)
}
if rf, ok := ret.Get(0).(func(filter []byte) *models.LivenessState); ok {
r0 = rf(filter)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*models.LivenessState)
}
}
if rf, ok := ret.Get(1).(func(filter []byte) error); ok {
r1 = rf(filter)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// PutConsensusState provides a mock function with given fields: state
func (_m *ConsensusStore[VoteT]) PutConsensusState(state *models.ConsensusState[VoteT]) error {
ret := _m.Called(state)
if len(ret) == 0 {
panic("no return value specified for PutConsensusState")
}
var r0 error
if rf, ok := ret.Get(0).(func(*models.ConsensusState[VoteT]) error); ok {
r0 = rf(state)
} else {
r0 = ret.Error(0)
}
return r0
}
// PutLivenessState provides a mock function with given fields: state
func (_m *ConsensusStore[VoteT]) PutLivenessState(state *models.LivenessState) error {
ret := _m.Called(state)
if len(ret) == 0 {
panic("no return value specified for PutLivenessState")
}
var r0 error
if rf, ok := ret.Get(0).(func(*models.LivenessState) error); ok {
r0 = rf(state)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewConsensusStore creates a new instance of ConsensusStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewConsensusStore[VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *ConsensusStore[VoteT] {
mock := &ConsensusStore[VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

126
consensus/mocks/consumer.go Normal file
View File

@ -0,0 +1,126 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
consensus "source.quilibrium.com/quilibrium/monorepo/consensus"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
time "time"
)
// Consumer is an autogenerated mock type for the Consumer type
type Consumer[StateT models.Unique, VoteT models.Unique] struct {
mock.Mock
}
// OnCurrentRankDetails provides a mock function with given fields: currentRank, finalizedRank, currentLeader
func (_m *Consumer[StateT, VoteT]) OnCurrentRankDetails(currentRank uint64, finalizedRank uint64, currentLeader models.Identity) {
_m.Called(currentRank, finalizedRank, currentLeader)
}
// OnDoubleProposeDetected provides a mock function with given fields: _a0, _a1
func (_m *Consumer[StateT, VoteT]) OnDoubleProposeDetected(_a0 *models.State[StateT], _a1 *models.State[StateT]) {
_m.Called(_a0, _a1)
}
// OnEventProcessed provides a mock function with no fields
func (_m *Consumer[StateT, VoteT]) OnEventProcessed() {
_m.Called()
}
// OnFinalizedState provides a mock function with given fields: _a0
func (_m *Consumer[StateT, VoteT]) OnFinalizedState(_a0 *models.State[StateT]) {
_m.Called(_a0)
}
// OnInvalidStateDetected provides a mock function with given fields: err
func (_m *Consumer[StateT, VoteT]) OnInvalidStateDetected(err *models.InvalidProposalError[StateT, VoteT]) {
_m.Called(err)
}
// OnLocalTimeout provides a mock function with given fields: currentRank
func (_m *Consumer[StateT, VoteT]) OnLocalTimeout(currentRank uint64) {
_m.Called(currentRank)
}
// OnOwnProposal provides a mock function with given fields: proposal, targetPublicationTime
func (_m *Consumer[StateT, VoteT]) OnOwnProposal(proposal *models.SignedProposal[StateT, VoteT], targetPublicationTime time.Time) {
_m.Called(proposal, targetPublicationTime)
}
// OnOwnTimeout provides a mock function with given fields: timeout
func (_m *Consumer[StateT, VoteT]) OnOwnTimeout(timeout *models.TimeoutState[VoteT]) {
_m.Called(timeout)
}
// OnOwnVote provides a mock function with given fields: vote, recipientID
func (_m *Consumer[StateT, VoteT]) OnOwnVote(vote *VoteT, recipientID models.Identity) {
_m.Called(vote, recipientID)
}
// OnPartialTimeoutCertificate provides a mock function with given fields: currentRank, partialTimeoutCertificate
func (_m *Consumer[StateT, VoteT]) OnPartialTimeoutCertificate(currentRank uint64, partialTimeoutCertificate *consensus.PartialTimeoutCertificateCreated) {
_m.Called(currentRank, partialTimeoutCertificate)
}
// OnQuorumCertificateTriggeredRankChange provides a mock function with given fields: oldRank, newRank, qc
func (_m *Consumer[StateT, VoteT]) OnQuorumCertificateTriggeredRankChange(oldRank uint64, newRank uint64, qc models.QuorumCertificate) {
_m.Called(oldRank, newRank, qc)
}
// OnRankChange provides a mock function with given fields: oldRank, newRank
func (_m *Consumer[StateT, VoteT]) OnRankChange(oldRank uint64, newRank uint64) {
_m.Called(oldRank, newRank)
}
// OnReceiveProposal provides a mock function with given fields: currentRank, proposal
func (_m *Consumer[StateT, VoteT]) OnReceiveProposal(currentRank uint64, proposal *models.SignedProposal[StateT, VoteT]) {
_m.Called(currentRank, proposal)
}
// OnReceiveQuorumCertificate provides a mock function with given fields: currentRank, qc
func (_m *Consumer[StateT, VoteT]) OnReceiveQuorumCertificate(currentRank uint64, qc models.QuorumCertificate) {
_m.Called(currentRank, qc)
}
// OnReceiveTimeoutCertificate provides a mock function with given fields: currentRank, tc
func (_m *Consumer[StateT, VoteT]) OnReceiveTimeoutCertificate(currentRank uint64, tc models.TimeoutCertificate) {
_m.Called(currentRank, tc)
}
// OnStart provides a mock function with given fields: currentRank
func (_m *Consumer[StateT, VoteT]) OnStart(currentRank uint64) {
_m.Called(currentRank)
}
// OnStartingTimeout provides a mock function with given fields: startTime, endTime
func (_m *Consumer[StateT, VoteT]) OnStartingTimeout(startTime time.Time, endTime time.Time) {
_m.Called(startTime, endTime)
}
// OnStateIncorporated provides a mock function with given fields: _a0
func (_m *Consumer[StateT, VoteT]) OnStateIncorporated(_a0 *models.State[StateT]) {
_m.Called(_a0)
}
// OnTimeoutCertificateTriggeredRankChange provides a mock function with given fields: oldRank, newRank, tc
func (_m *Consumer[StateT, VoteT]) OnTimeoutCertificateTriggeredRankChange(oldRank uint64, newRank uint64, tc models.TimeoutCertificate) {
_m.Called(oldRank, newRank, tc)
}
// NewConsumer creates a new instance of Consumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewConsumer[StateT models.Unique, VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *Consumer[StateT, VoteT] {
mock := &Consumer[StateT, VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,249 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// DynamicCommittee is an autogenerated mock type for the DynamicCommittee type
type DynamicCommittee struct {
mock.Mock
}
// IdentitiesByRank provides a mock function with given fields: rank
func (_m *DynamicCommittee) IdentitiesByRank(rank uint64) ([]models.WeightedIdentity, error) {
ret := _m.Called(rank)
if len(ret) == 0 {
panic("no return value specified for IdentitiesByRank")
}
var r0 []models.WeightedIdentity
var r1 error
if rf, ok := ret.Get(0).(func(uint64) ([]models.WeightedIdentity, error)); ok {
return rf(rank)
}
if rf, ok := ret.Get(0).(func(uint64) []models.WeightedIdentity); ok {
r0 = rf(rank)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]models.WeightedIdentity)
}
}
if rf, ok := ret.Get(1).(func(uint64) error); ok {
r1 = rf(rank)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// IdentitiesByState provides a mock function with given fields: stateID
func (_m *DynamicCommittee) IdentitiesByState(stateID models.Identity) ([]models.WeightedIdentity, error) {
ret := _m.Called(stateID)
if len(ret) == 0 {
panic("no return value specified for IdentitiesByState")
}
var r0 []models.WeightedIdentity
var r1 error
if rf, ok := ret.Get(0).(func(models.Identity) ([]models.WeightedIdentity, error)); ok {
return rf(stateID)
}
if rf, ok := ret.Get(0).(func(models.Identity) []models.WeightedIdentity); ok {
r0 = rf(stateID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]models.WeightedIdentity)
}
}
if rf, ok := ret.Get(1).(func(models.Identity) error); ok {
r1 = rf(stateID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// IdentityByRank provides a mock function with given fields: rank, participantID
func (_m *DynamicCommittee) IdentityByRank(rank uint64, participantID models.Identity) (models.WeightedIdentity, error) {
ret := _m.Called(rank, participantID)
if len(ret) == 0 {
panic("no return value specified for IdentityByRank")
}
var r0 models.WeightedIdentity
var r1 error
if rf, ok := ret.Get(0).(func(uint64, models.Identity) (models.WeightedIdentity, error)); ok {
return rf(rank, participantID)
}
if rf, ok := ret.Get(0).(func(uint64, models.Identity) models.WeightedIdentity); ok {
r0 = rf(rank, participantID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(models.WeightedIdentity)
}
}
if rf, ok := ret.Get(1).(func(uint64, models.Identity) error); ok {
r1 = rf(rank, participantID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// IdentityByState provides a mock function with given fields: stateID, participantID
func (_m *DynamicCommittee) IdentityByState(stateID models.Identity, participantID models.Identity) (models.WeightedIdentity, error) {
ret := _m.Called(stateID, participantID)
if len(ret) == 0 {
panic("no return value specified for IdentityByState")
}
var r0 models.WeightedIdentity
var r1 error
if rf, ok := ret.Get(0).(func(models.Identity, models.Identity) (models.WeightedIdentity, error)); ok {
return rf(stateID, participantID)
}
if rf, ok := ret.Get(0).(func(models.Identity, models.Identity) models.WeightedIdentity); ok {
r0 = rf(stateID, participantID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(models.WeightedIdentity)
}
}
if rf, ok := ret.Get(1).(func(models.Identity, models.Identity) error); ok {
r1 = rf(stateID, participantID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// LeaderForRank provides a mock function with given fields: rank
func (_m *DynamicCommittee) LeaderForRank(rank uint64) (models.Identity, error) {
ret := _m.Called(rank)
if len(ret) == 0 {
panic("no return value specified for LeaderForRank")
}
var r0 models.Identity
var r1 error
if rf, ok := ret.Get(0).(func(uint64) (models.Identity, error)); ok {
return rf(rank)
}
if rf, ok := ret.Get(0).(func(uint64) models.Identity); ok {
r0 = rf(rank)
} else {
r0 = ret.Get(0).(models.Identity)
}
if rf, ok := ret.Get(1).(func(uint64) error); ok {
r1 = rf(rank)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// QuorumThresholdForRank provides a mock function with given fields: rank
func (_m *DynamicCommittee) QuorumThresholdForRank(rank uint64) (uint64, error) {
ret := _m.Called(rank)
if len(ret) == 0 {
panic("no return value specified for QuorumThresholdForRank")
}
var r0 uint64
var r1 error
if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok {
return rf(rank)
}
if rf, ok := ret.Get(0).(func(uint64) uint64); ok {
r0 = rf(rank)
} else {
r0 = ret.Get(0).(uint64)
}
if rf, ok := ret.Get(1).(func(uint64) error); ok {
r1 = rf(rank)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Self provides a mock function with no fields
func (_m *DynamicCommittee) Self() models.Identity {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Self")
}
var r0 models.Identity
if rf, ok := ret.Get(0).(func() models.Identity); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(models.Identity)
}
return r0
}
// TimeoutThresholdForRank provides a mock function with given fields: rank
func (_m *DynamicCommittee) TimeoutThresholdForRank(rank uint64) (uint64, error) {
ret := _m.Called(rank)
if len(ret) == 0 {
panic("no return value specified for TimeoutThresholdForRank")
}
var r0 uint64
var r1 error
if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok {
return rf(rank)
}
if rf, ok := ret.Get(0).(func(uint64) uint64); ok {
r0 = rf(rank)
} else {
r0 = ret.Get(0).(uint64)
}
if rf, ok := ret.Get(1).(func(uint64) error); ok {
r1 = rf(rank)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewDynamicCommittee creates a new instance of DynamicCommittee. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewDynamicCommittee(t interface {
mock.TestingT
Cleanup(func())
}) *DynamicCommittee {
mock := &DynamicCommittee{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,162 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
context "context"
consensus "source.quilibrium.com/quilibrium/monorepo/consensus"
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
time "time"
)
// EventHandler is an autogenerated mock type for the EventHandler type
type EventHandler[StateT models.Unique, VoteT models.Unique] struct {
mock.Mock
}
// OnLocalTimeout provides a mock function with no fields
func (_m *EventHandler[StateT, VoteT]) OnLocalTimeout() error {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for OnLocalTimeout")
}
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
} else {
r0 = ret.Error(0)
}
return r0
}
// OnPartialTimeoutCertificateCreated provides a mock function with given fields: partialTimeoutCertificate
func (_m *EventHandler[StateT, VoteT]) OnPartialTimeoutCertificateCreated(partialTimeoutCertificate *consensus.PartialTimeoutCertificateCreated) error {
ret := _m.Called(partialTimeoutCertificate)
if len(ret) == 0 {
panic("no return value specified for OnPartialTimeoutCertificateCreated")
}
var r0 error
if rf, ok := ret.Get(0).(func(*consensus.PartialTimeoutCertificateCreated) error); ok {
r0 = rf(partialTimeoutCertificate)
} else {
r0 = ret.Error(0)
}
return r0
}
// OnReceiveProposal provides a mock function with given fields: proposal
func (_m *EventHandler[StateT, VoteT]) OnReceiveProposal(proposal *models.SignedProposal[StateT, VoteT]) error {
ret := _m.Called(proposal)
if len(ret) == 0 {
panic("no return value specified for OnReceiveProposal")
}
var r0 error
if rf, ok := ret.Get(0).(func(*models.SignedProposal[StateT, VoteT]) error); ok {
r0 = rf(proposal)
} else {
r0 = ret.Error(0)
}
return r0
}
// OnReceiveQuorumCertificate provides a mock function with given fields: quorumCertificate
func (_m *EventHandler[StateT, VoteT]) OnReceiveQuorumCertificate(quorumCertificate models.QuorumCertificate) error {
ret := _m.Called(quorumCertificate)
if len(ret) == 0 {
panic("no return value specified for OnReceiveQuorumCertificate")
}
var r0 error
if rf, ok := ret.Get(0).(func(models.QuorumCertificate) error); ok {
r0 = rf(quorumCertificate)
} else {
r0 = ret.Error(0)
}
return r0
}
// OnReceiveTimeoutCertificate provides a mock function with given fields: timeoutCertificate
func (_m *EventHandler[StateT, VoteT]) OnReceiveTimeoutCertificate(timeoutCertificate models.TimeoutCertificate) error {
ret := _m.Called(timeoutCertificate)
if len(ret) == 0 {
panic("no return value specified for OnReceiveTimeoutCertificate")
}
var r0 error
if rf, ok := ret.Get(0).(func(models.TimeoutCertificate) error); ok {
r0 = rf(timeoutCertificate)
} else {
r0 = ret.Error(0)
}
return r0
}
// Start provides a mock function with given fields: ctx
func (_m *EventHandler[StateT, VoteT]) Start(ctx context.Context) error {
ret := _m.Called(ctx)
if len(ret) == 0 {
panic("no return value specified for Start")
}
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// TimeoutChannel provides a mock function with no fields
func (_m *EventHandler[StateT, VoteT]) TimeoutChannel() <-chan time.Time {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for TimeoutChannel")
}
var r0 <-chan time.Time
if rf, ok := ret.Get(0).(func() <-chan time.Time); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(<-chan time.Time)
}
}
return r0
}
// NewEventHandler creates a new instance of EventHandler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewEventHandler[StateT models.Unique, VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *EventHandler[StateT, VoteT] {
mock := &EventHandler[StateT, VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,67 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// EventLoop is an autogenerated mock type for the EventLoop type
type EventLoop[StateT models.Unique, VoteT models.Unique] struct {
mock.Mock
}
// OnNewQuorumCertificateDiscovered provides a mock function with given fields: certificate
func (_m *EventLoop[StateT, VoteT]) OnNewQuorumCertificateDiscovered(certificate models.QuorumCertificate) {
_m.Called(certificate)
}
// OnNewTimeoutCertificateDiscovered provides a mock function with given fields: certificate
func (_m *EventLoop[StateT, VoteT]) OnNewTimeoutCertificateDiscovered(certificate models.TimeoutCertificate) {
_m.Called(certificate)
}
// OnPartialTimeoutCertificateCreated provides a mock function with given fields: rank, newestQC, lastRankTC
func (_m *EventLoop[StateT, VoteT]) OnPartialTimeoutCertificateCreated(rank uint64, newestQC models.QuorumCertificate, lastRankTC models.TimeoutCertificate) {
_m.Called(rank, newestQC, lastRankTC)
}
// OnQuorumCertificateConstructedFromVotes provides a mock function with given fields: _a0
func (_m *EventLoop[StateT, VoteT]) OnQuorumCertificateConstructedFromVotes(_a0 models.QuorumCertificate) {
_m.Called(_a0)
}
// OnTimeoutCertificateConstructedFromTimeouts provides a mock function with given fields: certificate
func (_m *EventLoop[StateT, VoteT]) OnTimeoutCertificateConstructedFromTimeouts(certificate models.TimeoutCertificate) {
_m.Called(certificate)
}
// OnTimeoutProcessed provides a mock function with given fields: timeout
func (_m *EventLoop[StateT, VoteT]) OnTimeoutProcessed(timeout *models.TimeoutState[VoteT]) {
_m.Called(timeout)
}
// OnVoteProcessed provides a mock function with given fields: vote
func (_m *EventLoop[StateT, VoteT]) OnVoteProcessed(vote *VoteT) {
_m.Called(vote)
}
// SubmitProposal provides a mock function with given fields: proposal
func (_m *EventLoop[StateT, VoteT]) SubmitProposal(proposal *models.SignedProposal[StateT, VoteT]) {
_m.Called(proposal)
}
// NewEventLoop creates a new instance of EventLoop. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewEventLoop[StateT models.Unique, VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *EventLoop[StateT, VoteT] {
mock := &EventLoop[StateT, VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,37 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// FinalizationConsumer is an autogenerated mock type for the FinalizationConsumer type
type FinalizationConsumer[StateT models.Unique] struct {
mock.Mock
}
// OnFinalizedState provides a mock function with given fields: _a0
func (_m *FinalizationConsumer[StateT]) OnFinalizedState(_a0 *models.State[StateT]) {
_m.Called(_a0)
}
// OnStateIncorporated provides a mock function with given fields: _a0
func (_m *FinalizationConsumer[StateT]) OnStateIncorporated(_a0 *models.State[StateT]) {
_m.Called(_a0)
}
// NewFinalizationConsumer creates a new instance of FinalizationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewFinalizationConsumer[StateT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *FinalizationConsumer[StateT] {
mock := &FinalizationConsumer[StateT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,45 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// Finalizer is an autogenerated mock type for the Finalizer type
type Finalizer struct {
mock.Mock
}
// MakeFinal provides a mock function with given fields: stateID
func (_m *Finalizer) MakeFinal(stateID models.Identity) error {
ret := _m.Called(stateID)
if len(ret) == 0 {
panic("no return value specified for MakeFinal")
}
var r0 error
if rf, ok := ret.Get(0).(func(models.Identity) error); ok {
r0 = rf(stateID)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewFinalizer creates a new instance of Finalizer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewFinalizer(t interface {
mock.TestingT
Cleanup(func())
}) *Finalizer {
mock := &Finalizer{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,47 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// FollowerConsumer is an autogenerated mock type for the FollowerConsumer type
type FollowerConsumer[StateT models.Unique, VoteT models.Unique] struct {
mock.Mock
}
// OnDoubleProposeDetected provides a mock function with given fields: _a0, _a1
func (_m *FollowerConsumer[StateT, VoteT]) OnDoubleProposeDetected(_a0 *models.State[StateT], _a1 *models.State[StateT]) {
_m.Called(_a0, _a1)
}
// OnFinalizedState provides a mock function with given fields: _a0
func (_m *FollowerConsumer[StateT, VoteT]) OnFinalizedState(_a0 *models.State[StateT]) {
_m.Called(_a0)
}
// OnInvalidStateDetected provides a mock function with given fields: err
func (_m *FollowerConsumer[StateT, VoteT]) OnInvalidStateDetected(err *models.InvalidProposalError[StateT, VoteT]) {
_m.Called(err)
}
// OnStateIncorporated provides a mock function with given fields: _a0
func (_m *FollowerConsumer[StateT, VoteT]) OnStateIncorporated(_a0 *models.State[StateT]) {
_m.Called(_a0)
}
// NewFollowerConsumer creates a new instance of FollowerConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewFollowerConsumer[StateT models.Unique, VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *FollowerConsumer[StateT, VoteT] {
mock := &FollowerConsumer[StateT, VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,32 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// FollowerLoop is an autogenerated mock type for the FollowerLoop type
type FollowerLoop[StateT models.Unique, VoteT models.Unique] struct {
mock.Mock
}
// AddCertifiedState provides a mock function with given fields: certifiedState
func (_m *FollowerLoop[StateT, VoteT]) AddCertifiedState(certifiedState *models.CertifiedState[StateT]) {
_m.Called(certifiedState)
}
// NewFollowerLoop creates a new instance of FollowerLoop. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewFollowerLoop[StateT models.Unique, VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *FollowerLoop[StateT, VoteT] {
mock := &FollowerLoop[StateT, VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

183
consensus/mocks/forks.go Normal file
View File

@ -0,0 +1,183 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
consensus "source.quilibrium.com/quilibrium/monorepo/consensus"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// Forks is an autogenerated mock type for the Forks type
type Forks[StateT models.Unique] struct {
mock.Mock
}
// AddCertifiedState provides a mock function with given fields: certifiedState
func (_m *Forks[StateT]) AddCertifiedState(certifiedState *models.CertifiedState[StateT]) error {
ret := _m.Called(certifiedState)
if len(ret) == 0 {
panic("no return value specified for AddCertifiedState")
}
var r0 error
if rf, ok := ret.Get(0).(func(*models.CertifiedState[StateT]) error); ok {
r0 = rf(certifiedState)
} else {
r0 = ret.Error(0)
}
return r0
}
// AddValidatedState provides a mock function with given fields: proposal
func (_m *Forks[StateT]) AddValidatedState(proposal *models.State[StateT]) error {
ret := _m.Called(proposal)
if len(ret) == 0 {
panic("no return value specified for AddValidatedState")
}
var r0 error
if rf, ok := ret.Get(0).(func(*models.State[StateT]) error); ok {
r0 = rf(proposal)
} else {
r0 = ret.Error(0)
}
return r0
}
// FinalityProof provides a mock function with no fields
func (_m *Forks[StateT]) FinalityProof() (*consensus.FinalityProof[StateT], bool) {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for FinalityProof")
}
var r0 *consensus.FinalityProof[StateT]
var r1 bool
if rf, ok := ret.Get(0).(func() (*consensus.FinalityProof[StateT], bool)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() *consensus.FinalityProof[StateT]); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*consensus.FinalityProof[StateT])
}
}
if rf, ok := ret.Get(1).(func() bool); ok {
r1 = rf()
} else {
r1 = ret.Get(1).(bool)
}
return r0, r1
}
// FinalizedRank provides a mock function with no fields
func (_m *Forks[StateT]) FinalizedRank() uint64 {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for FinalizedRank")
}
var r0 uint64
if rf, ok := ret.Get(0).(func() uint64); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(uint64)
}
return r0
}
// FinalizedState provides a mock function with no fields
func (_m *Forks[StateT]) FinalizedState() *models.State[StateT] {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for FinalizedState")
}
var r0 *models.State[StateT]
if rf, ok := ret.Get(0).(func() *models.State[StateT]); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*models.State[StateT])
}
}
return r0
}
// GetState provides a mock function with given fields: stateID
func (_m *Forks[StateT]) GetState(stateID models.Identity) (*models.State[StateT], bool) {
ret := _m.Called(stateID)
if len(ret) == 0 {
panic("no return value specified for GetState")
}
var r0 *models.State[StateT]
var r1 bool
if rf, ok := ret.Get(0).(func(models.Identity) (*models.State[StateT], bool)); ok {
return rf(stateID)
}
if rf, ok := ret.Get(0).(func(models.Identity) *models.State[StateT]); ok {
r0 = rf(stateID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*models.State[StateT])
}
}
if rf, ok := ret.Get(1).(func(models.Identity) bool); ok {
r1 = rf(stateID)
} else {
r1 = ret.Get(1).(bool)
}
return r0, r1
}
// GetStatesForRank provides a mock function with given fields: rank
func (_m *Forks[StateT]) GetStatesForRank(rank uint64) []*models.State[StateT] {
ret := _m.Called(rank)
if len(ret) == 0 {
panic("no return value specified for GetStatesForRank")
}
var r0 []*models.State[StateT]
if rf, ok := ret.Get(0).(func(uint64) []*models.State[StateT]); ok {
r0 = rf(rank)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*models.State[StateT])
}
}
return r0
}
// NewForks creates a new instance of Forks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewForks[StateT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *Forks[StateT] {
mock := &Forks[StateT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,89 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
context "context"
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// LeaderProvider is an autogenerated mock type for the LeaderProvider type
type LeaderProvider[StateT models.Unique, PeerIDT models.Unique, CollectedT models.Unique] struct {
mock.Mock
}
// GetNextLeaders provides a mock function with given fields: ctx, prior
func (_m *LeaderProvider[StateT, PeerIDT, CollectedT]) GetNextLeaders(ctx context.Context, prior *StateT) ([]PeerIDT, error) {
ret := _m.Called(ctx, prior)
if len(ret) == 0 {
panic("no return value specified for GetNextLeaders")
}
var r0 []PeerIDT
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *StateT) ([]PeerIDT, error)); ok {
return rf(ctx, prior)
}
if rf, ok := ret.Get(0).(func(context.Context, *StateT) []PeerIDT); ok {
r0 = rf(ctx, prior)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]PeerIDT)
}
}
if rf, ok := ret.Get(1).(func(context.Context, *StateT) error); ok {
r1 = rf(ctx, prior)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ProveNextState provides a mock function with given fields: ctx, filter, priorState
func (_m *LeaderProvider[StateT, PeerIDT, CollectedT]) ProveNextState(ctx context.Context, rank uint64, filter []byte, priorState models.Identity) (*StateT, error) {
ret := _m.Called(ctx, rank, filter, priorState)
if len(ret) == 0 {
panic("no return value specified for ProveNextState")
}
var r0 *StateT
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, uint64, []byte, models.Identity) (*StateT, error)); ok {
return rf(ctx, rank, filter, priorState)
}
if rf, ok := ret.Get(0).(func(context.Context, uint64, []byte, models.Identity) *StateT); ok {
r0 = rf(ctx, rank, filter, priorState)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*StateT)
}
}
if rf, ok := ret.Get(1).(func(context.Context, uint64, []byte, models.Identity) error); ok {
r1 = rf(ctx, rank, filter, priorState)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewLeaderProvider creates a new instance of LeaderProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewLeaderProvider[StateT models.Unique, PeerIDT models.Unique, CollectedT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *LeaderProvider[StateT, PeerIDT, CollectedT] {
mock := &LeaderProvider[StateT, PeerIDT, CollectedT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,77 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
context "context"
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// LivenessProvider is an autogenerated mock type for the LivenessProvider type
type LivenessProvider[StateT models.Unique, PeerIDT models.Unique, CollectedT models.Unique] struct {
mock.Mock
}
// Collect provides a mock function with given fields: ctx
func (_m *LivenessProvider[StateT, PeerIDT, CollectedT]) Collect(ctx context.Context) (CollectedT, error) {
ret := _m.Called(ctx)
if len(ret) == 0 {
panic("no return value specified for Collect")
}
var r0 CollectedT
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) (CollectedT, error)); ok {
return rf(ctx)
}
if rf, ok := ret.Get(0).(func(context.Context) CollectedT); ok {
r0 = rf(ctx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(CollectedT)
}
}
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = rf(ctx)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// SendLiveness provides a mock function with given fields: ctx, prior, collected
func (_m *LivenessProvider[StateT, PeerIDT, CollectedT]) SendLiveness(ctx context.Context, prior *StateT, collected CollectedT) error {
ret := _m.Called(ctx, prior, collected)
if len(ret) == 0 {
panic("no return value specified for SendLiveness")
}
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *StateT, CollectedT) error); ok {
r0 = rf(ctx, prior, collected)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewLivenessProvider creates a new instance of LivenessProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewLivenessProvider[StateT models.Unique, PeerIDT models.Unique, CollectedT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *LivenessProvider[StateT, PeerIDT, CollectedT] {
mock := &LivenessProvider[StateT, PeerIDT, CollectedT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,205 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
context "context"
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
time "time"
)
// Pacemaker is an autogenerated mock type for the Pacemaker type
type Pacemaker struct {
mock.Mock
}
// CurrentRank provides a mock function with no fields
func (_m *Pacemaker) CurrentRank() uint64 {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for CurrentRank")
}
var r0 uint64
if rf, ok := ret.Get(0).(func() uint64); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(uint64)
}
return r0
}
// LatestQuorumCertificate provides a mock function with no fields
func (_m *Pacemaker) LatestQuorumCertificate() models.QuorumCertificate {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for LatestQuorumCertificate")
}
var r0 models.QuorumCertificate
if rf, ok := ret.Get(0).(func() models.QuorumCertificate); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(models.QuorumCertificate)
}
}
return r0
}
// PriorRankTimeoutCertificate provides a mock function with no fields
func (_m *Pacemaker) PriorRankTimeoutCertificate() models.TimeoutCertificate {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for PriorRankTimeoutCertificate")
}
var r0 models.TimeoutCertificate
if rf, ok := ret.Get(0).(func() models.TimeoutCertificate); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(models.TimeoutCertificate)
}
}
return r0
}
// ReceiveQuorumCertificate provides a mock function with given fields: quorumCertificate
func (_m *Pacemaker) ReceiveQuorumCertificate(quorumCertificate models.QuorumCertificate) (*models.NextRank, error) {
ret := _m.Called(quorumCertificate)
if len(ret) == 0 {
panic("no return value specified for ReceiveQuorumCertificate")
}
var r0 *models.NextRank
var r1 error
if rf, ok := ret.Get(0).(func(models.QuorumCertificate) (*models.NextRank, error)); ok {
return rf(quorumCertificate)
}
if rf, ok := ret.Get(0).(func(models.QuorumCertificate) *models.NextRank); ok {
r0 = rf(quorumCertificate)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*models.NextRank)
}
}
if rf, ok := ret.Get(1).(func(models.QuorumCertificate) error); ok {
r1 = rf(quorumCertificate)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ReceiveTimeoutCertificate provides a mock function with given fields: timeoutCertificate
func (_m *Pacemaker) ReceiveTimeoutCertificate(timeoutCertificate models.TimeoutCertificate) (*models.NextRank, error) {
ret := _m.Called(timeoutCertificate)
if len(ret) == 0 {
panic("no return value specified for ReceiveTimeoutCertificate")
}
var r0 *models.NextRank
var r1 error
if rf, ok := ret.Get(0).(func(models.TimeoutCertificate) (*models.NextRank, error)); ok {
return rf(timeoutCertificate)
}
if rf, ok := ret.Get(0).(func(models.TimeoutCertificate) *models.NextRank); ok {
r0 = rf(timeoutCertificate)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*models.NextRank)
}
}
if rf, ok := ret.Get(1).(func(models.TimeoutCertificate) error); ok {
r1 = rf(timeoutCertificate)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Start provides a mock function with given fields: ctx
func (_m *Pacemaker) Start(ctx context.Context) error {
ret := _m.Called(ctx)
if len(ret) == 0 {
panic("no return value specified for Start")
}
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// TargetPublicationTime provides a mock function with given fields: proposalRank, timeRankEntered, parentStateId
func (_m *Pacemaker) TargetPublicationTime(proposalRank uint64, timeRankEntered time.Time, parentStateId models.Identity) time.Time {
ret := _m.Called(proposalRank, timeRankEntered, parentStateId)
if len(ret) == 0 {
panic("no return value specified for TargetPublicationTime")
}
var r0 time.Time
if rf, ok := ret.Get(0).(func(uint64, time.Time, models.Identity) time.Time); ok {
r0 = rf(proposalRank, timeRankEntered, parentStateId)
} else {
r0 = ret.Get(0).(time.Time)
}
return r0
}
// TimeoutCh provides a mock function with no fields
func (_m *Pacemaker) TimeoutCh() <-chan time.Time {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for TimeoutCh")
}
var r0 <-chan time.Time
if rf, ok := ret.Get(0).(func() <-chan time.Time); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(<-chan time.Time)
}
}
return r0
}
// NewPacemaker creates a new instance of Pacemaker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewPacemaker(t interface {
mock.TestingT
Cleanup(func())
}) *Pacemaker {
mock := &Pacemaker{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

98
consensus/mocks/packer.go Normal file
View File

@ -0,0 +1,98 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
consensus "source.quilibrium.com/quilibrium/monorepo/consensus"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// Packer is an autogenerated mock type for the Packer type
type Packer struct {
mock.Mock
}
// Pack provides a mock function with given fields: rank, sig
func (_m *Packer) Pack(rank uint64, sig *consensus.StateSignatureData) ([]byte, []byte, error) {
ret := _m.Called(rank, sig)
if len(ret) == 0 {
panic("no return value specified for Pack")
}
var r0 []byte
var r1 []byte
var r2 error
if rf, ok := ret.Get(0).(func(uint64, *consensus.StateSignatureData) ([]byte, []byte, error)); ok {
return rf(rank, sig)
}
if rf, ok := ret.Get(0).(func(uint64, *consensus.StateSignatureData) []byte); ok {
r0 = rf(rank, sig)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]byte)
}
}
if rf, ok := ret.Get(1).(func(uint64, *consensus.StateSignatureData) []byte); ok {
r1 = rf(rank, sig)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).([]byte)
}
}
if rf, ok := ret.Get(2).(func(uint64, *consensus.StateSignatureData) error); ok {
r2 = rf(rank, sig)
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// Unpack provides a mock function with given fields: signerIdentities, sigData
func (_m *Packer) Unpack(signerIdentities []models.WeightedIdentity, sigData []byte) (*consensus.StateSignatureData, error) {
ret := _m.Called(signerIdentities, sigData)
if len(ret) == 0 {
panic("no return value specified for Unpack")
}
var r0 *consensus.StateSignatureData
var r1 error
if rf, ok := ret.Get(0).(func([]models.WeightedIdentity, []byte) (*consensus.StateSignatureData, error)); ok {
return rf(signerIdentities, sigData)
}
if rf, ok := ret.Get(0).(func([]models.WeightedIdentity, []byte) *consensus.StateSignatureData); ok {
r0 = rf(signerIdentities, sigData)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*consensus.StateSignatureData)
}
}
if rf, ok := ret.Get(1).(func([]models.WeightedIdentity, []byte) error); ok {
r1 = rf(signerIdentities, sigData)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewPacker creates a new instance of Packer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewPacker(t interface {
mock.TestingT
Cleanup(func())
}) *Packer {
mock := &Packer{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,91 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
consensus "source.quilibrium.com/quilibrium/monorepo/consensus"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
time "time"
)
// ParticipantConsumer is an autogenerated mock type for the ParticipantConsumer type
type ParticipantConsumer[StateT models.Unique, VoteT models.Unique] struct {
mock.Mock
}
// OnCurrentRankDetails provides a mock function with given fields: currentRank, finalizedRank, currentLeader
func (_m *ParticipantConsumer[StateT, VoteT]) OnCurrentRankDetails(currentRank uint64, finalizedRank uint64, currentLeader models.Identity) {
_m.Called(currentRank, finalizedRank, currentLeader)
}
// OnEventProcessed provides a mock function with no fields
func (_m *ParticipantConsumer[StateT, VoteT]) OnEventProcessed() {
_m.Called()
}
// OnLocalTimeout provides a mock function with given fields: currentRank
func (_m *ParticipantConsumer[StateT, VoteT]) OnLocalTimeout(currentRank uint64) {
_m.Called(currentRank)
}
// OnPartialTimeoutCertificate provides a mock function with given fields: currentRank, partialTimeoutCertificate
func (_m *ParticipantConsumer[StateT, VoteT]) OnPartialTimeoutCertificate(currentRank uint64, partialTimeoutCertificate *consensus.PartialTimeoutCertificateCreated) {
_m.Called(currentRank, partialTimeoutCertificate)
}
// OnQuorumCertificateTriggeredRankChange provides a mock function with given fields: oldRank, newRank, qc
func (_m *ParticipantConsumer[StateT, VoteT]) OnQuorumCertificateTriggeredRankChange(oldRank uint64, newRank uint64, qc models.QuorumCertificate) {
_m.Called(oldRank, newRank, qc)
}
// OnRankChange provides a mock function with given fields: oldRank, newRank
func (_m *ParticipantConsumer[StateT, VoteT]) OnRankChange(oldRank uint64, newRank uint64) {
_m.Called(oldRank, newRank)
}
// OnReceiveProposal provides a mock function with given fields: currentRank, proposal
func (_m *ParticipantConsumer[StateT, VoteT]) OnReceiveProposal(currentRank uint64, proposal *models.SignedProposal[StateT, VoteT]) {
_m.Called(currentRank, proposal)
}
// OnReceiveQuorumCertificate provides a mock function with given fields: currentRank, qc
func (_m *ParticipantConsumer[StateT, VoteT]) OnReceiveQuorumCertificate(currentRank uint64, qc models.QuorumCertificate) {
_m.Called(currentRank, qc)
}
// OnReceiveTimeoutCertificate provides a mock function with given fields: currentRank, tc
func (_m *ParticipantConsumer[StateT, VoteT]) OnReceiveTimeoutCertificate(currentRank uint64, tc models.TimeoutCertificate) {
_m.Called(currentRank, tc)
}
// OnStart provides a mock function with given fields: currentRank
func (_m *ParticipantConsumer[StateT, VoteT]) OnStart(currentRank uint64) {
_m.Called(currentRank)
}
// OnStartingTimeout provides a mock function with given fields: startTime, endTime
func (_m *ParticipantConsumer[StateT, VoteT]) OnStartingTimeout(startTime time.Time, endTime time.Time) {
_m.Called(startTime, endTime)
}
// OnTimeoutCertificateTriggeredRankChange provides a mock function with given fields: oldRank, newRank, tc
func (_m *ParticipantConsumer[StateT, VoteT]) OnTimeoutCertificateTriggeredRankChange(oldRank uint64, newRank uint64, tc models.TimeoutCertificate) {
_m.Called(oldRank, newRank, tc)
}
// NewParticipantConsumer creates a new instance of ParticipantConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewParticipantConsumer[StateT models.Unique, VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *ParticipantConsumer[StateT, VoteT] {
mock := &ParticipantConsumer[StateT, VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,47 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
time "time"
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// ProposalDurationProvider is an autogenerated mock type for the ProposalDurationProvider type
type ProposalDurationProvider struct {
mock.Mock
}
// TargetPublicationTime provides a mock function with given fields: proposalRank, timeRankEntered, parentStateId
func (_m *ProposalDurationProvider) TargetPublicationTime(proposalRank uint64, timeRankEntered time.Time, parentStateId models.Identity) time.Time {
ret := _m.Called(proposalRank, timeRankEntered, parentStateId)
if len(ret) == 0 {
panic("no return value specified for TargetPublicationTime")
}
var r0 time.Time
if rf, ok := ret.Get(0).(func(uint64, time.Time, models.Identity) time.Time); ok {
r0 = rf(proposalRank, timeRankEntered, parentStateId)
} else {
r0 = ret.Get(0).(time.Time)
}
return r0
}
// NewProposalDurationProvider creates a new instance of ProposalDurationProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewProposalDurationProvider(t interface {
mock.TestingT
Cleanup(func())
}) *ProposalDurationProvider {
mock := &ProposalDurationProvider{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,37 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// ProposalViolationConsumer is an autogenerated mock type for the ProposalViolationConsumer type
type ProposalViolationConsumer[StateT models.Unique, VoteT models.Unique] struct {
mock.Mock
}
// OnDoubleProposeDetected provides a mock function with given fields: _a0, _a1
func (_m *ProposalViolationConsumer[StateT, VoteT]) OnDoubleProposeDetected(_a0 *models.State[StateT], _a1 *models.State[StateT]) {
_m.Called(_a0, _a1)
}
// OnInvalidStateDetected provides a mock function with given fields: err
func (_m *ProposalViolationConsumer[StateT, VoteT]) OnInvalidStateDetected(err *models.InvalidProposalError[StateT, VoteT]) {
_m.Called(err)
}
// NewProposalViolationConsumer creates a new instance of ProposalViolationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewProposalViolationConsumer[StateT models.Unique, VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *ProposalViolationConsumer[StateT, VoteT] {
mock := &ProposalViolationConsumer[StateT, VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,87 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// ReadOnlyConsensusStore is an autogenerated mock type for the ReadOnlyConsensusStore type
type ReadOnlyConsensusStore[VoteT models.Unique] struct {
mock.Mock
}
// GetConsensusState provides a mock function with no fields
func (_m *ReadOnlyConsensusStore[VoteT]) GetConsensusState() (*models.ConsensusState[VoteT], error) {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for GetConsensusState")
}
var r0 *models.ConsensusState[VoteT]
var r1 error
if rf, ok := ret.Get(0).(func() (*models.ConsensusState[VoteT], error)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() *models.ConsensusState[VoteT]); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*models.ConsensusState[VoteT])
}
}
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetLivenessState provides a mock function with no fields
func (_m *ReadOnlyConsensusStore[VoteT]) GetLivenessState() (*models.LivenessState, error) {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for GetLivenessState")
}
var r0 *models.LivenessState
var r1 error
if rf, ok := ret.Get(0).(func() (*models.LivenessState, error)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() *models.LivenessState); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*models.LivenessState)
}
}
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewReadOnlyConsensusStore creates a new instance of ReadOnlyConsensusStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewReadOnlyConsensusStore[VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *ReadOnlyConsensusStore[VoteT] {
mock := &ReadOnlyConsensusStore[VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

189
consensus/mocks/replicas.go Normal file
View File

@ -0,0 +1,189 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// Replicas is an autogenerated mock type for the Replicas type
type Replicas struct {
mock.Mock
}
// IdentitiesByRank provides a mock function with given fields: rank
func (_m *Replicas) IdentitiesByRank(rank uint64) ([]models.WeightedIdentity, error) {
ret := _m.Called(rank)
if len(ret) == 0 {
panic("no return value specified for IdentitiesByRank")
}
var r0 []models.WeightedIdentity
var r1 error
if rf, ok := ret.Get(0).(func(uint64) ([]models.WeightedIdentity, error)); ok {
return rf(rank)
}
if rf, ok := ret.Get(0).(func(uint64) []models.WeightedIdentity); ok {
r0 = rf(rank)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]models.WeightedIdentity)
}
}
if rf, ok := ret.Get(1).(func(uint64) error); ok {
r1 = rf(rank)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// IdentityByRank provides a mock function with given fields: rank, participantID
func (_m *Replicas) IdentityByRank(rank uint64, participantID models.Identity) (models.WeightedIdentity, error) {
ret := _m.Called(rank, participantID)
if len(ret) == 0 {
panic("no return value specified for IdentityByRank")
}
var r0 models.WeightedIdentity
var r1 error
if rf, ok := ret.Get(0).(func(uint64, models.Identity) (models.WeightedIdentity, error)); ok {
return rf(rank, participantID)
}
if rf, ok := ret.Get(0).(func(uint64, models.Identity) models.WeightedIdentity); ok {
r0 = rf(rank, participantID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(models.WeightedIdentity)
}
}
if rf, ok := ret.Get(1).(func(uint64, models.Identity) error); ok {
r1 = rf(rank, participantID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// LeaderForRank provides a mock function with given fields: rank
func (_m *Replicas) LeaderForRank(rank uint64) (models.Identity, error) {
ret := _m.Called(rank)
if len(ret) == 0 {
panic("no return value specified for LeaderForRank")
}
var r0 models.Identity
var r1 error
if rf, ok := ret.Get(0).(func(uint64) (models.Identity, error)); ok {
return rf(rank)
}
if rf, ok := ret.Get(0).(func(uint64) models.Identity); ok {
r0 = rf(rank)
} else {
r0 = ret.Get(0).(models.Identity)
}
if rf, ok := ret.Get(1).(func(uint64) error); ok {
r1 = rf(rank)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// QuorumThresholdForRank provides a mock function with given fields: rank
func (_m *Replicas) QuorumThresholdForRank(rank uint64) (uint64, error) {
ret := _m.Called(rank)
if len(ret) == 0 {
panic("no return value specified for QuorumThresholdForRank")
}
var r0 uint64
var r1 error
if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok {
return rf(rank)
}
if rf, ok := ret.Get(0).(func(uint64) uint64); ok {
r0 = rf(rank)
} else {
r0 = ret.Get(0).(uint64)
}
if rf, ok := ret.Get(1).(func(uint64) error); ok {
r1 = rf(rank)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Self provides a mock function with no fields
func (_m *Replicas) Self() models.Identity {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Self")
}
var r0 models.Identity
if rf, ok := ret.Get(0).(func() models.Identity); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(models.Identity)
}
return r0
}
// TimeoutThresholdForRank provides a mock function with given fields: rank
func (_m *Replicas) TimeoutThresholdForRank(rank uint64) (uint64, error) {
ret := _m.Called(rank)
if len(ret) == 0 {
panic("no return value specified for TimeoutThresholdForRank")
}
var r0 uint64
var r1 error
if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok {
return rf(rank)
}
if rf, ok := ret.Get(0).(func(uint64) uint64); ok {
r0 = rf(rank)
} else {
r0 = ret.Get(0).(uint64)
}
if rf, ok := ret.Get(1).(func(uint64) error); ok {
r1 = rf(rank)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewReplicas creates a new instance of Replicas. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewReplicas(t interface {
mock.TestingT
Cleanup(func())
}) *Replicas {
mock := &Replicas{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,117 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// SafetyRules is an autogenerated mock type for the SafetyRules type
type SafetyRules[StateT models.Unique, VoteT models.Unique] struct {
mock.Mock
}
// ProduceTimeout provides a mock function with given fields: curRank, newestQC, lastRankTC
func (_m *SafetyRules[StateT, VoteT]) ProduceTimeout(curRank uint64, newestQC models.QuorumCertificate, lastRankTC models.TimeoutCertificate) (*models.TimeoutState[VoteT], error) {
ret := _m.Called(curRank, newestQC, lastRankTC)
if len(ret) == 0 {
panic("no return value specified for ProduceTimeout")
}
var r0 *models.TimeoutState[VoteT]
var r1 error
if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) (*models.TimeoutState[VoteT], error)); ok {
return rf(curRank, newestQC, lastRankTC)
}
if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) *models.TimeoutState[VoteT]); ok {
r0 = rf(curRank, newestQC, lastRankTC)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*models.TimeoutState[VoteT])
}
}
if rf, ok := ret.Get(1).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) error); ok {
r1 = rf(curRank, newestQC, lastRankTC)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ProduceVote provides a mock function with given fields: proposal, curRank
func (_m *SafetyRules[StateT, VoteT]) ProduceVote(proposal *models.SignedProposal[StateT, VoteT], curRank uint64) (*VoteT, error) {
ret := _m.Called(proposal, curRank)
if len(ret) == 0 {
panic("no return value specified for ProduceVote")
}
var r0 *VoteT
var r1 error
if rf, ok := ret.Get(0).(func(*models.SignedProposal[StateT, VoteT], uint64) (*VoteT, error)); ok {
return rf(proposal, curRank)
}
if rf, ok := ret.Get(0).(func(*models.SignedProposal[StateT, VoteT], uint64) *VoteT); ok {
r0 = rf(proposal, curRank)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*VoteT)
}
}
if rf, ok := ret.Get(1).(func(*models.SignedProposal[StateT, VoteT], uint64) error); ok {
r1 = rf(proposal, curRank)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// SignOwnProposal provides a mock function with given fields: unsignedProposal
func (_m *SafetyRules[StateT, VoteT]) SignOwnProposal(unsignedProposal *models.Proposal[StateT]) (*VoteT, error) {
ret := _m.Called(unsignedProposal)
if len(ret) == 0 {
panic("no return value specified for SignOwnProposal")
}
var r0 *VoteT
var r1 error
if rf, ok := ret.Get(0).(func(*models.Proposal[StateT]) (*VoteT, error)); ok {
return rf(unsignedProposal)
}
if rf, ok := ret.Get(0).(func(*models.Proposal[StateT]) *VoteT); ok {
r0 = rf(unsignedProposal)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*VoteT)
}
}
if rf, ok := ret.Get(1).(func(*models.Proposal[StateT]) error); ok {
r1 = rf(unsignedProposal)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewSafetyRules creates a new instance of SafetyRules. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewSafetyRules[StateT models.Unique, VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *SafetyRules[StateT, VoteT] {
mock := &SafetyRules[StateT, VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,93 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// SignatureAggregator is an autogenerated mock type for the SignatureAggregator type
type SignatureAggregator struct {
mock.Mock
}
// Aggregate provides a mock function with given fields: publicKeys, signatures
func (_m *SignatureAggregator) Aggregate(publicKeys [][]byte, signatures [][]byte) (models.AggregatedSignature, error) {
ret := _m.Called(publicKeys, signatures)
if len(ret) == 0 {
panic("no return value specified for Aggregate")
}
var r0 models.AggregatedSignature
var r1 error
if rf, ok := ret.Get(0).(func([][]byte, [][]byte) (models.AggregatedSignature, error)); ok {
return rf(publicKeys, signatures)
}
if rf, ok := ret.Get(0).(func([][]byte, [][]byte) models.AggregatedSignature); ok {
r0 = rf(publicKeys, signatures)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(models.AggregatedSignature)
}
}
if rf, ok := ret.Get(1).(func([][]byte, [][]byte) error); ok {
r1 = rf(publicKeys, signatures)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// VerifySignatureMultiMessage provides a mock function with given fields: publicKeys, signature, messages, context
func (_m *SignatureAggregator) VerifySignatureMultiMessage(publicKeys [][]byte, signature []byte, messages [][]byte, context []byte) bool {
ret := _m.Called(publicKeys, signature, messages, context)
if len(ret) == 0 {
panic("no return value specified for VerifySignatureMultiMessage")
}
var r0 bool
if rf, ok := ret.Get(0).(func([][]byte, []byte, [][]byte, []byte) bool); ok {
r0 = rf(publicKeys, signature, messages, context)
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// VerifySignatureRaw provides a mock function with given fields: publicKey, signature, message, context
func (_m *SignatureAggregator) VerifySignatureRaw(publicKey []byte, signature []byte, message []byte, context []byte) bool {
ret := _m.Called(publicKey, signature, message, context)
if len(ret) == 0 {
panic("no return value specified for VerifySignatureRaw")
}
var r0 bool
if rf, ok := ret.Get(0).(func([]byte, []byte, []byte, []byte) bool); ok {
r0 = rf(publicKey, signature, message, context)
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// NewSignatureAggregator creates a new instance of SignatureAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewSignatureAggregator(t interface {
mock.TestingT
Cleanup(func())
}) *SignatureAggregator {
mock := &SignatureAggregator{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

87
consensus/mocks/signer.go Normal file
View File

@ -0,0 +1,87 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// Signer is an autogenerated mock type for the Signer type
type Signer[StateT models.Unique, VoteT models.Unique] struct {
mock.Mock
}
// CreateTimeout provides a mock function with given fields: curRank, newestQC, previousRankTimeoutCert
func (_m *Signer[StateT, VoteT]) CreateTimeout(curRank uint64, newestQC models.QuorumCertificate, previousRankTimeoutCert models.TimeoutCertificate) (*models.TimeoutState[VoteT], error) {
ret := _m.Called(curRank, newestQC, previousRankTimeoutCert)
if len(ret) == 0 {
panic("no return value specified for CreateTimeout")
}
var r0 *models.TimeoutState[VoteT]
var r1 error
if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) (*models.TimeoutState[VoteT], error)); ok {
return rf(curRank, newestQC, previousRankTimeoutCert)
}
if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) *models.TimeoutState[VoteT]); ok {
r0 = rf(curRank, newestQC, previousRankTimeoutCert)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*models.TimeoutState[VoteT])
}
}
if rf, ok := ret.Get(1).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) error); ok {
r1 = rf(curRank, newestQC, previousRankTimeoutCert)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CreateVote provides a mock function with given fields: state
func (_m *Signer[StateT, VoteT]) CreateVote(state *models.State[StateT]) (*VoteT, error) {
ret := _m.Called(state)
if len(ret) == 0 {
panic("no return value specified for CreateVote")
}
var r0 *VoteT
var r1 error
if rf, ok := ret.Get(0).(func(*models.State[StateT]) (*VoteT, error)); ok {
return rf(state)
}
if rf, ok := ret.Get(0).(func(*models.State[StateT]) *VoteT); ok {
r0 = rf(state)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*VoteT)
}
}
if rf, ok := ret.Get(1).(func(*models.State[StateT]) error); ok {
r1 = rf(state)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewSigner creates a new instance of Signer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewSigner[StateT models.Unique, VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *Signer[StateT, VoteT] {
mock := &Signer[StateT, VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,57 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// StateProducer is an autogenerated mock type for the StateProducer type
type StateProducer[StateT models.Unique, VoteT models.Unique] struct {
mock.Mock
}
// MakeStateProposal provides a mock function with given fields: rank, qc, lastRankTC
func (_m *StateProducer[StateT, VoteT]) MakeStateProposal(rank uint64, qc models.QuorumCertificate, lastRankTC models.TimeoutCertificate) (*models.SignedProposal[StateT, VoteT], error) {
ret := _m.Called(rank, qc, lastRankTC)
if len(ret) == 0 {
panic("no return value specified for MakeStateProposal")
}
var r0 *models.SignedProposal[StateT, VoteT]
var r1 error
if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) (*models.SignedProposal[StateT, VoteT], error)); ok {
return rf(rank, qc, lastRankTC)
}
if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) *models.SignedProposal[StateT, VoteT]); ok {
r0 = rf(rank, qc, lastRankTC)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*models.SignedProposal[StateT, VoteT])
}
}
if rf, ok := ret.Get(1).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) error); ok {
r1 = rf(rank, qc, lastRankTC)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewStateProducer creates a new instance of StateProducer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewStateProducer[StateT models.Unique, VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *StateProducer[StateT, VoteT] {
mock := &StateProducer[StateT, VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,57 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// StateSignerDecoder is an autogenerated mock type for the StateSignerDecoder type
type StateSignerDecoder[StateT models.Unique] struct {
mock.Mock
}
// DecodeSignerIDs provides a mock function with given fields: state
func (_m *StateSignerDecoder[StateT]) DecodeSignerIDs(state *models.State[StateT]) ([]models.WeightedIdentity, error) {
ret := _m.Called(state)
if len(ret) == 0 {
panic("no return value specified for DecodeSignerIDs")
}
var r0 []models.WeightedIdentity
var r1 error
if rf, ok := ret.Get(0).(func(*models.State[StateT]) ([]models.WeightedIdentity, error)); ok {
return rf(state)
}
if rf, ok := ret.Get(0).(func(*models.State[StateT]) []models.WeightedIdentity); ok {
r0 = rf(state)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]models.WeightedIdentity)
}
}
if rf, ok := ret.Get(1).(func(*models.State[StateT]) error); ok {
r1 = rf(state)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewStateSignerDecoder creates a new instance of StateSignerDecoder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewStateSignerDecoder[StateT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *StateSignerDecoder[StateT] {
mock := &StateSignerDecoder[StateT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,61 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
context "context"
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// SyncProvider is an autogenerated mock type for the SyncProvider type
type SyncProvider[StateT models.Unique] struct {
mock.Mock
}
// Synchronize provides a mock function with given fields: ctx, existing
func (_m *SyncProvider[StateT]) Synchronize(ctx context.Context, existing *StateT) (<-chan *StateT, <-chan error) {
ret := _m.Called(ctx, existing)
if len(ret) == 0 {
panic("no return value specified for Synchronize")
}
var r0 <-chan *StateT
var r1 <-chan error
if rf, ok := ret.Get(0).(func(context.Context, *StateT) (<-chan *StateT, <-chan error)); ok {
return rf(ctx, existing)
}
if rf, ok := ret.Get(0).(func(context.Context, *StateT) <-chan *StateT); ok {
r0 = rf(ctx, existing)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(<-chan *StateT)
}
}
if rf, ok := ret.Get(1).(func(context.Context, *StateT) <-chan error); ok {
r1 = rf(ctx, existing)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(<-chan error)
}
}
return r0, r1
}
// NewSyncProvider creates a new instance of SyncProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewSyncProvider[StateT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *SyncProvider[StateT] {
mock := &SyncProvider[StateT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,62 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// TimeoutAggregationConsumer is an autogenerated mock type for the TimeoutAggregationConsumer type
type TimeoutAggregationConsumer[VoteT models.Unique] struct {
mock.Mock
}
// OnDoubleTimeoutDetected provides a mock function with given fields: _a0, _a1
func (_m *TimeoutAggregationConsumer[VoteT]) OnDoubleTimeoutDetected(_a0 *models.TimeoutState[VoteT], _a1 *models.TimeoutState[VoteT]) {
_m.Called(_a0, _a1)
}
// OnInvalidTimeoutDetected provides a mock function with given fields: err
func (_m *TimeoutAggregationConsumer[VoteT]) OnInvalidTimeoutDetected(err models.InvalidTimeoutError[VoteT]) {
_m.Called(err)
}
// OnNewQuorumCertificateDiscovered provides a mock function with given fields: certificate
func (_m *TimeoutAggregationConsumer[VoteT]) OnNewQuorumCertificateDiscovered(certificate models.QuorumCertificate) {
_m.Called(certificate)
}
// OnNewTimeoutCertificateDiscovered provides a mock function with given fields: certificate
func (_m *TimeoutAggregationConsumer[VoteT]) OnNewTimeoutCertificateDiscovered(certificate models.TimeoutCertificate) {
_m.Called(certificate)
}
// OnPartialTimeoutCertificateCreated provides a mock function with given fields: rank, newestQC, lastRankTC
func (_m *TimeoutAggregationConsumer[VoteT]) OnPartialTimeoutCertificateCreated(rank uint64, newestQC models.QuorumCertificate, lastRankTC models.TimeoutCertificate) {
_m.Called(rank, newestQC, lastRankTC)
}
// OnTimeoutCertificateConstructedFromTimeouts provides a mock function with given fields: certificate
func (_m *TimeoutAggregationConsumer[VoteT]) OnTimeoutCertificateConstructedFromTimeouts(certificate models.TimeoutCertificate) {
_m.Called(certificate)
}
// OnTimeoutProcessed provides a mock function with given fields: timeout
func (_m *TimeoutAggregationConsumer[VoteT]) OnTimeoutProcessed(timeout *models.TimeoutState[VoteT]) {
_m.Called(timeout)
}
// NewTimeoutAggregationConsumer creates a new instance of TimeoutAggregationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewTimeoutAggregationConsumer[VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *TimeoutAggregationConsumer[VoteT] {
mock := &TimeoutAggregationConsumer[VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,37 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// TimeoutAggregationViolationConsumer is an autogenerated mock type for the TimeoutAggregationViolationConsumer type
type TimeoutAggregationViolationConsumer[VoteT models.Unique] struct {
mock.Mock
}
// OnDoubleTimeoutDetected provides a mock function with given fields: _a0, _a1
func (_m *TimeoutAggregationViolationConsumer[VoteT]) OnDoubleTimeoutDetected(_a0 *models.TimeoutState[VoteT], _a1 *models.TimeoutState[VoteT]) {
_m.Called(_a0, _a1)
}
// OnInvalidTimeoutDetected provides a mock function with given fields: err
func (_m *TimeoutAggregationViolationConsumer[VoteT]) OnInvalidTimeoutDetected(err models.InvalidTimeoutError[VoteT]) {
_m.Called(err)
}
// NewTimeoutAggregationViolationConsumer creates a new instance of TimeoutAggregationViolationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewTimeoutAggregationViolationConsumer[VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *TimeoutAggregationViolationConsumer[VoteT] {
mock := &TimeoutAggregationViolationConsumer[VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,57 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
context "context"
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// TimeoutAggregator is an autogenerated mock type for the TimeoutAggregator type
type TimeoutAggregator[VoteT models.Unique] struct {
mock.Mock
}
// AddTimeout provides a mock function with given fields: timeoutState
func (_m *TimeoutAggregator[VoteT]) AddTimeout(timeoutState *models.TimeoutState[VoteT]) {
_m.Called(timeoutState)
}
// PruneUpToRank provides a mock function with given fields: lowestRetainedRank
func (_m *TimeoutAggregator[VoteT]) PruneUpToRank(lowestRetainedRank uint64) {
_m.Called(lowestRetainedRank)
}
// Start provides a mock function with given fields: ctx
func (_m *TimeoutAggregator[VoteT]) Start(ctx context.Context) error {
ret := _m.Called(ctx)
if len(ret) == 0 {
panic("no return value specified for Start")
}
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewTimeoutAggregator creates a new instance of TimeoutAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewTimeoutAggregator[VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *TimeoutAggregator[VoteT] {
mock := &TimeoutAggregator[VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,63 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// TimeoutCollector is an autogenerated mock type for the TimeoutCollector type
type TimeoutCollector[VoteT models.Unique] struct {
mock.Mock
}
// AddTimeout provides a mock function with given fields: timeoutState
func (_m *TimeoutCollector[VoteT]) AddTimeout(timeoutState *models.TimeoutState[VoteT]) error {
ret := _m.Called(timeoutState)
if len(ret) == 0 {
panic("no return value specified for AddTimeout")
}
var r0 error
if rf, ok := ret.Get(0).(func(*models.TimeoutState[VoteT]) error); ok {
r0 = rf(timeoutState)
} else {
r0 = ret.Error(0)
}
return r0
}
// Rank provides a mock function with no fields
func (_m *TimeoutCollector[VoteT]) Rank() uint64 {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Rank")
}
var r0 uint64
if rf, ok := ret.Get(0).(func() uint64); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(uint64)
}
return r0
}
// NewTimeoutCollector creates a new instance of TimeoutCollector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewTimeoutCollector[VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *TimeoutCollector[VoteT] {
mock := &TimeoutCollector[VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,52 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// TimeoutCollectorConsumer is an autogenerated mock type for the TimeoutCollectorConsumer type
type TimeoutCollectorConsumer[VoteT models.Unique] struct {
mock.Mock
}
// OnNewQuorumCertificateDiscovered provides a mock function with given fields: certificate
func (_m *TimeoutCollectorConsumer[VoteT]) OnNewQuorumCertificateDiscovered(certificate models.QuorumCertificate) {
_m.Called(certificate)
}
// OnNewTimeoutCertificateDiscovered provides a mock function with given fields: certificate
func (_m *TimeoutCollectorConsumer[VoteT]) OnNewTimeoutCertificateDiscovered(certificate models.TimeoutCertificate) {
_m.Called(certificate)
}
// OnPartialTimeoutCertificateCreated provides a mock function with given fields: rank, newestQC, lastRankTC
func (_m *TimeoutCollectorConsumer[VoteT]) OnPartialTimeoutCertificateCreated(rank uint64, newestQC models.QuorumCertificate, lastRankTC models.TimeoutCertificate) {
_m.Called(rank, newestQC, lastRankTC)
}
// OnTimeoutCertificateConstructedFromTimeouts provides a mock function with given fields: certificate
func (_m *TimeoutCollectorConsumer[VoteT]) OnTimeoutCertificateConstructedFromTimeouts(certificate models.TimeoutCertificate) {
_m.Called(certificate)
}
// OnTimeoutProcessed provides a mock function with given fields: timeout
func (_m *TimeoutCollectorConsumer[VoteT]) OnTimeoutProcessed(timeout *models.TimeoutState[VoteT]) {
_m.Called(timeout)
}
// NewTimeoutCollectorConsumer creates a new instance of TimeoutCollectorConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewTimeoutCollectorConsumer[VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *TimeoutCollectorConsumer[VoteT] {
mock := &TimeoutCollectorConsumer[VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,59 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
consensus "source.quilibrium.com/quilibrium/monorepo/consensus"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// TimeoutCollectorFactory is an autogenerated mock type for the TimeoutCollectorFactory type
type TimeoutCollectorFactory[VoteT models.Unique] struct {
mock.Mock
}
// Create provides a mock function with given fields: rank
func (_m *TimeoutCollectorFactory[VoteT]) Create(rank uint64) (consensus.TimeoutCollector[VoteT], error) {
ret := _m.Called(rank)
if len(ret) == 0 {
panic("no return value specified for Create")
}
var r0 consensus.TimeoutCollector[VoteT]
var r1 error
if rf, ok := ret.Get(0).(func(uint64) (consensus.TimeoutCollector[VoteT], error)); ok {
return rf(rank)
}
if rf, ok := ret.Get(0).(func(uint64) consensus.TimeoutCollector[VoteT]); ok {
r0 = rf(rank)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(consensus.TimeoutCollector[VoteT])
}
}
if rf, ok := ret.Get(1).(func(uint64) error); ok {
r1 = rf(rank)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewTimeoutCollectorFactory creates a new instance of TimeoutCollectorFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewTimeoutCollectorFactory[VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *TimeoutCollectorFactory[VoteT] {
mock := &TimeoutCollectorFactory[VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,71 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
consensus "source.quilibrium.com/quilibrium/monorepo/consensus"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// TimeoutCollectors is an autogenerated mock type for the TimeoutCollectors type
type TimeoutCollectors[VoteT models.Unique] struct {
mock.Mock
}
// GetOrCreateCollector provides a mock function with given fields: rank
func (_m *TimeoutCollectors[VoteT]) GetOrCreateCollector(rank uint64) (consensus.TimeoutCollector[VoteT], bool, error) {
ret := _m.Called(rank)
if len(ret) == 0 {
panic("no return value specified for GetOrCreateCollector")
}
var r0 consensus.TimeoutCollector[VoteT]
var r1 bool
var r2 error
if rf, ok := ret.Get(0).(func(uint64) (consensus.TimeoutCollector[VoteT], bool, error)); ok {
return rf(rank)
}
if rf, ok := ret.Get(0).(func(uint64) consensus.TimeoutCollector[VoteT]); ok {
r0 = rf(rank)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(consensus.TimeoutCollector[VoteT])
}
}
if rf, ok := ret.Get(1).(func(uint64) bool); ok {
r1 = rf(rank)
} else {
r1 = ret.Get(1).(bool)
}
if rf, ok := ret.Get(2).(func(uint64) error); ok {
r2 = rf(rank)
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// PruneUpToRank provides a mock function with given fields: lowestRetainedRank
func (_m *TimeoutCollectors[VoteT]) PruneUpToRank(lowestRetainedRank uint64) {
_m.Called(lowestRetainedRank)
}
// NewTimeoutCollectors creates a new instance of TimeoutCollectors. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewTimeoutCollectors[VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *TimeoutCollectors[VoteT] {
mock := &TimeoutCollectors[VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,45 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// TimeoutProcessor is an autogenerated mock type for the TimeoutProcessor type
type TimeoutProcessor[VoteT models.Unique] struct {
mock.Mock
}
// Process provides a mock function with given fields: timeout
func (_m *TimeoutProcessor[VoteT]) Process(timeout *models.TimeoutState[VoteT]) error {
ret := _m.Called(timeout)
if len(ret) == 0 {
panic("no return value specified for Process")
}
var r0 error
if rf, ok := ret.Get(0).(func(*models.TimeoutState[VoteT]) error); ok {
r0 = rf(timeout)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewTimeoutProcessor creates a new instance of TimeoutProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewTimeoutProcessor[VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *TimeoutProcessor[VoteT] {
mock := &TimeoutProcessor[VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,59 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
consensus "source.quilibrium.com/quilibrium/monorepo/consensus"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// TimeoutProcessorFactory is an autogenerated mock type for the TimeoutProcessorFactory type
type TimeoutProcessorFactory[VoteT models.Unique] struct {
mock.Mock
}
// Create provides a mock function with given fields: rank
func (_m *TimeoutProcessorFactory[VoteT]) Create(rank uint64) (consensus.TimeoutProcessor[VoteT], error) {
ret := _m.Called(rank)
if len(ret) == 0 {
panic("no return value specified for Create")
}
var r0 consensus.TimeoutProcessor[VoteT]
var r1 error
if rf, ok := ret.Get(0).(func(uint64) (consensus.TimeoutProcessor[VoteT], error)); ok {
return rf(rank)
}
if rf, ok := ret.Get(0).(func(uint64) consensus.TimeoutProcessor[VoteT]); ok {
r0 = rf(rank)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(consensus.TimeoutProcessor[VoteT])
}
}
if rf, ok := ret.Get(1).(func(uint64) error); ok {
r1 = rf(rank)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewTimeoutProcessorFactory creates a new instance of TimeoutProcessorFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewTimeoutProcessorFactory[VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *TimeoutProcessorFactory[VoteT] {
mock := &TimeoutProcessorFactory[VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,132 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
consensus "source.quilibrium.com/quilibrium/monorepo/consensus"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// TimeoutSignatureAggregator is an autogenerated mock type for the TimeoutSignatureAggregator type
type TimeoutSignatureAggregator struct {
mock.Mock
}
// Aggregate provides a mock function with no fields
func (_m *TimeoutSignatureAggregator) Aggregate() ([]consensus.TimeoutSignerInfo, models.AggregatedSignature, error) {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Aggregate")
}
var r0 []consensus.TimeoutSignerInfo
var r1 models.AggregatedSignature
var r2 error
if rf, ok := ret.Get(0).(func() ([]consensus.TimeoutSignerInfo, models.AggregatedSignature, error)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() []consensus.TimeoutSignerInfo); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]consensus.TimeoutSignerInfo)
}
}
if rf, ok := ret.Get(1).(func() models.AggregatedSignature); ok {
r1 = rf()
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(models.AggregatedSignature)
}
}
if rf, ok := ret.Get(2).(func() error); ok {
r2 = rf()
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// Rank provides a mock function with no fields
func (_m *TimeoutSignatureAggregator) Rank() uint64 {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Rank")
}
var r0 uint64
if rf, ok := ret.Get(0).(func() uint64); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(uint64)
}
return r0
}
// TotalWeight provides a mock function with no fields
func (_m *TimeoutSignatureAggregator) TotalWeight() uint64 {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for TotalWeight")
}
var r0 uint64
if rf, ok := ret.Get(0).(func() uint64); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(uint64)
}
return r0
}
// VerifyAndAdd provides a mock function with given fields: signerID, sig, newestQCRank
func (_m *TimeoutSignatureAggregator) VerifyAndAdd(signerID models.Identity, sig []byte, newestQCRank uint64) (uint64, error) {
ret := _m.Called(signerID, sig, newestQCRank)
if len(ret) == 0 {
panic("no return value specified for VerifyAndAdd")
}
var r0 uint64
var r1 error
if rf, ok := ret.Get(0).(func(models.Identity, []byte, uint64) (uint64, error)); ok {
return rf(signerID, sig, newestQCRank)
}
if rf, ok := ret.Get(0).(func(models.Identity, []byte, uint64) uint64); ok {
r0 = rf(signerID, sig, newestQCRank)
} else {
r0 = ret.Get(0).(uint64)
}
if rf, ok := ret.Get(1).(func(models.Identity, []byte, uint64) error); ok {
r1 = rf(signerID, sig, newestQCRank)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewTimeoutSignatureAggregator creates a new instance of TimeoutSignatureAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewTimeoutSignatureAggregator(t interface {
mock.TestingT
Cleanup(func())
}) *TimeoutSignatureAggregator {
mock := &TimeoutSignatureAggregator{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,34 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import mock "github.com/stretchr/testify/mock"
// TraceLogger is an autogenerated mock type for the TraceLogger type
type TraceLogger struct {
mock.Mock
}
// Error provides a mock function with given fields: message, err
func (_m *TraceLogger) Error(message string, err error) {
_m.Called(message, err)
}
// Trace provides a mock function with given fields: message
func (_m *TraceLogger) Trace(message string) {
_m.Called(message)
}
// NewTraceLogger creates a new instance of TraceLogger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewTraceLogger(t interface {
mock.TestingT
Cleanup(func())
}) *TraceLogger {
mock := &TraceLogger{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,111 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// Validator is an autogenerated mock type for the Validator type
type Validator[StateT models.Unique, VoteT models.Unique] struct {
mock.Mock
}
// ValidateProposal provides a mock function with given fields: proposal
func (_m *Validator[StateT, VoteT]) ValidateProposal(proposal *models.SignedProposal[StateT, VoteT]) error {
ret := _m.Called(proposal)
if len(ret) == 0 {
panic("no return value specified for ValidateProposal")
}
var r0 error
if rf, ok := ret.Get(0).(func(*models.SignedProposal[StateT, VoteT]) error); ok {
r0 = rf(proposal)
} else {
r0 = ret.Error(0)
}
return r0
}
// ValidateQuorumCertificate provides a mock function with given fields: qc
func (_m *Validator[StateT, VoteT]) ValidateQuorumCertificate(qc models.QuorumCertificate) error {
ret := _m.Called(qc)
if len(ret) == 0 {
panic("no return value specified for ValidateQuorumCertificate")
}
var r0 error
if rf, ok := ret.Get(0).(func(models.QuorumCertificate) error); ok {
r0 = rf(qc)
} else {
r0 = ret.Error(0)
}
return r0
}
// ValidateTimeoutCertificate provides a mock function with given fields: tc
func (_m *Validator[StateT, VoteT]) ValidateTimeoutCertificate(tc models.TimeoutCertificate) error {
ret := _m.Called(tc)
if len(ret) == 0 {
panic("no return value specified for ValidateTimeoutCertificate")
}
var r0 error
if rf, ok := ret.Get(0).(func(models.TimeoutCertificate) error); ok {
r0 = rf(tc)
} else {
r0 = ret.Error(0)
}
return r0
}
// ValidateVote provides a mock function with given fields: vote
func (_m *Validator[StateT, VoteT]) ValidateVote(vote *VoteT) (*models.WeightedIdentity, error) {
ret := _m.Called(vote)
if len(ret) == 0 {
panic("no return value specified for ValidateVote")
}
var r0 *models.WeightedIdentity
var r1 error
if rf, ok := ret.Get(0).(func(*VoteT) (*models.WeightedIdentity, error)); ok {
return rf(vote)
}
if rf, ok := ret.Get(0).(func(*VoteT) *models.WeightedIdentity); ok {
r0 = rf(vote)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*models.WeightedIdentity)
}
}
if rf, ok := ret.Get(1).(func(*VoteT) error); ok {
r1 = rf(vote)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewValidator creates a new instance of Validator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewValidator[StateT models.Unique, VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *Validator[StateT, VoteT] {
mock := &Validator[StateT, VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,81 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// Verifier is an autogenerated mock type for the Verifier type
type Verifier[VoteT models.Unique] struct {
mock.Mock
}
// VerifyQuorumCertificate provides a mock function with given fields: quorumCertificate
func (_m *Verifier[VoteT]) VerifyQuorumCertificate(quorumCertificate models.QuorumCertificate) error {
ret := _m.Called(quorumCertificate)
if len(ret) == 0 {
panic("no return value specified for VerifyQuorumCertificate")
}
var r0 error
if rf, ok := ret.Get(0).(func(models.QuorumCertificate) error); ok {
r0 = rf(quorumCertificate)
} else {
r0 = ret.Error(0)
}
return r0
}
// VerifyTimeoutCertificate provides a mock function with given fields: timeoutCertificate
func (_m *Verifier[VoteT]) VerifyTimeoutCertificate(timeoutCertificate models.TimeoutCertificate) error {
ret := _m.Called(timeoutCertificate)
if len(ret) == 0 {
panic("no return value specified for VerifyTimeoutCertificate")
}
var r0 error
if rf, ok := ret.Get(0).(func(models.TimeoutCertificate) error); ok {
r0 = rf(timeoutCertificate)
} else {
r0 = ret.Error(0)
}
return r0
}
// VerifyVote provides a mock function with given fields: vote
func (_m *Verifier[VoteT]) VerifyVote(vote *VoteT) error {
ret := _m.Called(vote)
if len(ret) == 0 {
panic("no return value specified for VerifyVote")
}
var r0 error
if rf, ok := ret.Get(0).(func(*VoteT) error); ok {
r0 = rf(vote)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewVerifier creates a new instance of Verifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewVerifier[VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *Verifier[VoteT] {
mock := &Verifier[VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,85 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
consensus "source.quilibrium.com/quilibrium/monorepo/consensus"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// VerifyingVoteProcessor is an autogenerated mock type for the VerifyingVoteProcessor type
type VerifyingVoteProcessor[StateT models.Unique, VoteT models.Unique] struct {
mock.Mock
}
// Process provides a mock function with given fields: vote
func (_m *VerifyingVoteProcessor[StateT, VoteT]) Process(vote *VoteT) error {
ret := _m.Called(vote)
if len(ret) == 0 {
panic("no return value specified for Process")
}
var r0 error
if rf, ok := ret.Get(0).(func(*VoteT) error); ok {
r0 = rf(vote)
} else {
r0 = ret.Error(0)
}
return r0
}
// State provides a mock function with no fields
func (_m *VerifyingVoteProcessor[StateT, VoteT]) State() *models.State[StateT] {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for State")
}
var r0 *models.State[StateT]
if rf, ok := ret.Get(0).(func() *models.State[StateT]); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*models.State[StateT])
}
}
return r0
}
// Status provides a mock function with no fields
func (_m *VerifyingVoteProcessor[StateT, VoteT]) Status() consensus.VoteCollectorStatus {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Status")
}
var r0 consensus.VoteCollectorStatus
if rf, ok := ret.Get(0).(func() consensus.VoteCollectorStatus); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(consensus.VoteCollectorStatus)
}
return r0
}
// NewVerifyingVoteProcessor creates a new instance of VerifyingVoteProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewVerifyingVoteProcessor[StateT models.Unique, VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *VerifyingVoteProcessor[StateT, VoteT] {
mock := &VerifyingVoteProcessor[StateT, VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,52 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// VoteAggregationConsumer is an autogenerated mock type for the VoteAggregationConsumer type
type VoteAggregationConsumer[StateT models.Unique, VoteT models.Unique] struct {
mock.Mock
}
// OnDoubleVotingDetected provides a mock function with given fields: _a0, _a1
func (_m *VoteAggregationConsumer[StateT, VoteT]) OnDoubleVotingDetected(_a0 *VoteT, _a1 *VoteT) {
_m.Called(_a0, _a1)
}
// OnInvalidVoteDetected provides a mock function with given fields: err
func (_m *VoteAggregationConsumer[StateT, VoteT]) OnInvalidVoteDetected(err models.InvalidVoteError[VoteT]) {
_m.Called(err)
}
// OnQuorumCertificateConstructedFromVotes provides a mock function with given fields: _a0
func (_m *VoteAggregationConsumer[StateT, VoteT]) OnQuorumCertificateConstructedFromVotes(_a0 models.QuorumCertificate) {
_m.Called(_a0)
}
// OnVoteForInvalidStateDetected provides a mock function with given fields: vote, invalidProposal
func (_m *VoteAggregationConsumer[StateT, VoteT]) OnVoteForInvalidStateDetected(vote *VoteT, invalidProposal *models.SignedProposal[StateT, VoteT]) {
_m.Called(vote, invalidProposal)
}
// OnVoteProcessed provides a mock function with given fields: vote
func (_m *VoteAggregationConsumer[StateT, VoteT]) OnVoteProcessed(vote *VoteT) {
_m.Called(vote)
}
// NewVoteAggregationConsumer creates a new instance of VoteAggregationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewVoteAggregationConsumer[StateT models.Unique, VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *VoteAggregationConsumer[StateT, VoteT] {
mock := &VoteAggregationConsumer[StateT, VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,42 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// VoteAggregationViolationConsumer is an autogenerated mock type for the VoteAggregationViolationConsumer type
type VoteAggregationViolationConsumer[StateT models.Unique, VoteT models.Unique] struct {
mock.Mock
}
// OnDoubleVotingDetected provides a mock function with given fields: _a0, _a1
func (_m *VoteAggregationViolationConsumer[StateT, VoteT]) OnDoubleVotingDetected(_a0 *VoteT, _a1 *VoteT) {
_m.Called(_a0, _a1)
}
// OnInvalidVoteDetected provides a mock function with given fields: err
func (_m *VoteAggregationViolationConsumer[StateT, VoteT]) OnInvalidVoteDetected(err models.InvalidVoteError[VoteT]) {
_m.Called(err)
}
// OnVoteForInvalidStateDetected provides a mock function with given fields: vote, invalidProposal
func (_m *VoteAggregationViolationConsumer[StateT, VoteT]) OnVoteForInvalidStateDetected(vote *VoteT, invalidProposal *models.SignedProposal[StateT, VoteT]) {
_m.Called(vote, invalidProposal)
}
// NewVoteAggregationViolationConsumer creates a new instance of VoteAggregationViolationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewVoteAggregationViolationConsumer[StateT models.Unique, VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *VoteAggregationViolationConsumer[StateT, VoteT] {
mock := &VoteAggregationViolationConsumer[StateT, VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,80 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
context "context"
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// VoteAggregator is an autogenerated mock type for the VoteAggregator type
type VoteAggregator[StateT models.Unique, VoteT models.Unique] struct {
mock.Mock
}
// AddState provides a mock function with given fields: state
func (_m *VoteAggregator[StateT, VoteT]) AddState(state *models.SignedProposal[StateT, VoteT]) {
_m.Called(state)
}
// AddVote provides a mock function with given fields: vote
func (_m *VoteAggregator[StateT, VoteT]) AddVote(vote *VoteT) {
_m.Called(vote)
}
// InvalidState provides a mock function with given fields: state
func (_m *VoteAggregator[StateT, VoteT]) InvalidState(state *models.SignedProposal[StateT, VoteT]) error {
ret := _m.Called(state)
if len(ret) == 0 {
panic("no return value specified for InvalidState")
}
var r0 error
if rf, ok := ret.Get(0).(func(*models.SignedProposal[StateT, VoteT]) error); ok {
r0 = rf(state)
} else {
r0 = ret.Error(0)
}
return r0
}
// PruneUpToRank provides a mock function with given fields: rank
func (_m *VoteAggregator[StateT, VoteT]) PruneUpToRank(rank uint64) {
_m.Called(rank)
}
// Start provides a mock function with given fields: ctx
func (_m *VoteAggregator[StateT, VoteT]) Start(ctx context.Context) error {
ret := _m.Called(ctx)
if len(ret) == 0 {
panic("no return value specified for Start")
}
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewVoteAggregator creates a new instance of VoteAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewVoteAggregator[StateT models.Unique, VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *VoteAggregator[StateT, VoteT] {
mock := &VoteAggregator[StateT, VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,106 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
consensus "source.quilibrium.com/quilibrium/monorepo/consensus"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// VoteCollector is an autogenerated mock type for the VoteCollector type
type VoteCollector[StateT models.Unique, VoteT models.Unique] struct {
mock.Mock
}
// AddVote provides a mock function with given fields: vote
func (_m *VoteCollector[StateT, VoteT]) AddVote(vote *VoteT) error {
ret := _m.Called(vote)
if len(ret) == 0 {
panic("no return value specified for AddVote")
}
var r0 error
if rf, ok := ret.Get(0).(func(*VoteT) error); ok {
r0 = rf(vote)
} else {
r0 = ret.Error(0)
}
return r0
}
// ProcessState provides a mock function with given fields: state
func (_m *VoteCollector[StateT, VoteT]) ProcessState(state *models.SignedProposal[StateT, VoteT]) error {
ret := _m.Called(state)
if len(ret) == 0 {
panic("no return value specified for ProcessState")
}
var r0 error
if rf, ok := ret.Get(0).(func(*models.SignedProposal[StateT, VoteT]) error); ok {
r0 = rf(state)
} else {
r0 = ret.Error(0)
}
return r0
}
// Rank provides a mock function with no fields
func (_m *VoteCollector[StateT, VoteT]) Rank() uint64 {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Rank")
}
var r0 uint64
if rf, ok := ret.Get(0).(func() uint64); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(uint64)
}
return r0
}
// RegisterVoteConsumer provides a mock function with given fields: consumer
func (_m *VoteCollector[StateT, VoteT]) RegisterVoteConsumer(consumer consensus.VoteConsumer[VoteT]) {
_m.Called(consumer)
}
// Status provides a mock function with no fields
func (_m *VoteCollector[StateT, VoteT]) Status() consensus.VoteCollectorStatus {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Status")
}
var r0 consensus.VoteCollectorStatus
if rf, ok := ret.Get(0).(func() consensus.VoteCollectorStatus); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(consensus.VoteCollectorStatus)
}
return r0
}
// NewVoteCollector creates a new instance of VoteCollector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewVoteCollector[StateT models.Unique, VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *VoteCollector[StateT, VoteT] {
mock := &VoteCollector[StateT, VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,37 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// VoteCollectorConsumer is an autogenerated mock type for the VoteCollectorConsumer type
type VoteCollectorConsumer[VoteT models.Unique] struct {
mock.Mock
}
// OnQuorumCertificateConstructedFromVotes provides a mock function with given fields: _a0
func (_m *VoteCollectorConsumer[VoteT]) OnQuorumCertificateConstructedFromVotes(_a0 models.QuorumCertificate) {
_m.Called(_a0)
}
// OnVoteProcessed provides a mock function with given fields: vote
func (_m *VoteCollectorConsumer[VoteT]) OnVoteProcessed(vote *VoteT) {
_m.Called(vote)
}
// NewVoteCollectorConsumer creates a new instance of VoteCollectorConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewVoteCollectorConsumer[VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *VoteCollectorConsumer[VoteT] {
mock := &VoteCollectorConsumer[VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,131 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
consensus "source.quilibrium.com/quilibrium/monorepo/consensus"
"source.quilibrium.com/quilibrium/monorepo/lifecycle"
mock "github.com/stretchr/testify/mock"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// VoteCollectors is an autogenerated mock type for the VoteCollectors type
type VoteCollectors[StateT models.Unique, VoteT models.Unique] struct {
mock.Mock
}
// Done provides a mock function with no fields
func (_m *VoteCollectors[StateT, VoteT]) Done() <-chan struct{} {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Done")
}
var r0 <-chan struct{}
if rf, ok := ret.Get(0).(func() <-chan struct{}); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(<-chan struct{})
}
}
return r0
}
// GetOrCreateCollector provides a mock function with given fields: rank
func (_m *VoteCollectors[StateT, VoteT]) GetOrCreateCollector(rank uint64) (consensus.VoteCollector[StateT, VoteT], bool, error) {
ret := _m.Called(rank)
if len(ret) == 0 {
panic("no return value specified for GetOrCreateCollector")
}
var r0 consensus.VoteCollector[StateT, VoteT]
var r1 bool
var r2 error
if rf, ok := ret.Get(0).(func(uint64) (consensus.VoteCollector[StateT, VoteT], bool, error)); ok {
return rf(rank)
}
if rf, ok := ret.Get(0).(func(uint64) consensus.VoteCollector[StateT, VoteT]); ok {
r0 = rf(rank)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(consensus.VoteCollector[StateT, VoteT])
}
}
if rf, ok := ret.Get(1).(func(uint64) bool); ok {
r1 = rf(rank)
} else {
r1 = ret.Get(1).(bool)
}
if rf, ok := ret.Get(2).(func(uint64) error); ok {
r2 = rf(rank)
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// PruneUpToRank provides a mock function with given fields: lowestRetainedRank
func (_m *VoteCollectors[StateT, VoteT]) PruneUpToRank(lowestRetainedRank uint64) {
_m.Called(lowestRetainedRank)
}
// Ready provides a mock function with no fields
func (_m *VoteCollectors[StateT, VoteT]) Ready() <-chan struct{} {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Ready")
}
var r0 <-chan struct{}
if rf, ok := ret.Get(0).(func() <-chan struct{}); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(<-chan struct{})
}
}
return r0
}
// Start provides a mock function with given fields: ctx
func (_m *VoteCollectors[StateT, VoteT]) Start(ctx lifecycle.SignalerContext) error {
ret := _m.Called(ctx)
if len(ret) == 0 {
panic("no return value specified for Start")
}
var r0 error
if rf, ok := ret.Get(0).(func(lifecycle.SignalerContext) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewVoteCollectors creates a new instance of VoteCollectors. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewVoteCollectors[StateT models.Unique, VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *VoteCollectors[StateT, VoteT] {
mock := &VoteCollectors[StateT, VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,65 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
consensus "source.quilibrium.com/quilibrium/monorepo/consensus"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// VoteProcessor is an autogenerated mock type for the VoteProcessor type
type VoteProcessor[VoteT models.Unique] struct {
mock.Mock
}
// Process provides a mock function with given fields: vote
func (_m *VoteProcessor[VoteT]) Process(vote *VoteT) error {
ret := _m.Called(vote)
if len(ret) == 0 {
panic("no return value specified for Process")
}
var r0 error
if rf, ok := ret.Get(0).(func(*VoteT) error); ok {
r0 = rf(vote)
} else {
r0 = ret.Error(0)
}
return r0
}
// Status provides a mock function with no fields
func (_m *VoteProcessor[VoteT]) Status() consensus.VoteCollectorStatus {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Status")
}
var r0 consensus.VoteCollectorStatus
if rf, ok := ret.Get(0).(func() consensus.VoteCollectorStatus); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(consensus.VoteCollectorStatus)
}
return r0
}
// NewVoteProcessor creates a new instance of VoteProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewVoteProcessor[VoteT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *VoteProcessor[VoteT] {
mock := &VoteProcessor[VoteT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,59 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
consensus "source.quilibrium.com/quilibrium/monorepo/consensus"
models "source.quilibrium.com/quilibrium/monorepo/consensus/models"
)
// VoteProcessorFactory is an autogenerated mock type for the VoteProcessorFactory type
type VoteProcessorFactory[StateT models.Unique, VoteT models.Unique, PeerIDT models.Unique] struct {
mock.Mock
}
// Create provides a mock function with given fields: tracer, proposal, dsTag, aggregator
func (_m *VoteProcessorFactory[StateT, VoteT, PeerIDT]) Create(tracer consensus.TraceLogger, filter []byte, proposal *models.SignedProposal[StateT, VoteT], dsTag []byte, aggregator consensus.SignatureAggregator, voter consensus.VotingProvider[StateT, VoteT, PeerIDT]) (consensus.VerifyingVoteProcessor[StateT, VoteT], error) {
ret := _m.Called(tracer, filter, proposal, dsTag, aggregator, voter)
if len(ret) == 0 {
panic("no return value specified for Create")
}
var r0 consensus.VerifyingVoteProcessor[StateT, VoteT]
var r1 error
if rf, ok := ret.Get(0).(func(consensus.TraceLogger, []byte, *models.SignedProposal[StateT, VoteT], []byte, consensus.SignatureAggregator, consensus.VotingProvider[StateT, VoteT, PeerIDT]) (consensus.VerifyingVoteProcessor[StateT, VoteT], error)); ok {
return rf(tracer, filter, proposal, dsTag, aggregator, voter)
}
if rf, ok := ret.Get(0).(func(consensus.TraceLogger, []byte, *models.SignedProposal[StateT, VoteT], []byte, consensus.SignatureAggregator, consensus.VotingProvider[StateT, VoteT, PeerIDT]) consensus.VerifyingVoteProcessor[StateT, VoteT]); ok {
r0 = rf(tracer, filter, proposal, dsTag, aggregator, voter)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(consensus.VerifyingVoteProcessor[StateT, VoteT])
}
}
if rf, ok := ret.Get(1).(func(consensus.TraceLogger, []byte, *models.SignedProposal[StateT, VoteT], []byte, consensus.SignatureAggregator, consensus.VotingProvider[StateT, VoteT, PeerIDT]) error); ok {
r1 = rf(tracer, filter, proposal, dsTag, aggregator, voter)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewVoteProcessorFactory creates a new instance of VoteProcessorFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewVoteProcessorFactory[StateT models.Unique, VoteT models.Unique, PeerIDT models.Unique](t interface {
mock.TestingT
Cleanup(func())
}) *VoteProcessorFactory[StateT, VoteT, PeerIDT] {
mock := &VoteProcessorFactory[StateT, VoteT, PeerIDT]{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

Some files were not shown because too many files have changed in this diff Show More