mirror of
https://github.com/QuilibriumNetwork/ceremonyclient.git
synced 2026-02-21 10:27:26 +08:00
bulk of tests
This commit is contained in:
parent
d71b0538f2
commit
a3d3a8d795
@ -132,7 +132,7 @@ type DynamicCommittee interface {
|
||||
IdentityByState(
|
||||
stateID models.Identity,
|
||||
participantID models.Identity,
|
||||
) (*models.WeightedIdentity, error)
|
||||
) (models.WeightedIdentity, error)
|
||||
}
|
||||
|
||||
// StateSignerDecoder defines how to convert the ParentSignerIndices field
|
||||
@ -146,7 +146,7 @@ type StateSignerDecoder[StateT models.Unique] interface {
|
||||
// parent state. Consequently, the returned IdentifierList contains the
|
||||
// consensus participants that signed the parent state.
|
||||
// Expected Error returns during normal operations:
|
||||
// - signature.InvalidSignerIndicesError if signer indices included in the
|
||||
// - consensus.InvalidSignerIndicesError if signer indices included in the
|
||||
// header do not encode a valid subset of the consensus committee
|
||||
DecodeSignerIDs(
|
||||
state *models.State[StateT],
|
||||
|
||||
@ -28,7 +28,7 @@ type Forks[StateT models.Unique] interface {
|
||||
// GetStatesForRank returns all known states for the given rank
|
||||
GetStatesForRank(rank uint64) []*models.State[StateT]
|
||||
|
||||
// GetState returns (*model.State, true) if the state with the specified
|
||||
// GetState returns (*models.State[*helper.TestState], true) if the state with the specified
|
||||
// id was found and (nil, false) otherwise.
|
||||
GetState(stateID models.Identity) (*models.State[StateT], bool)
|
||||
|
||||
|
||||
@ -36,7 +36,7 @@ type WeightedSignatureAggregator interface {
|
||||
// TotalWeight returns the total weight presented by the collected signatures.
|
||||
TotalWeight() uint64
|
||||
|
||||
// Aggregate aggregates the signatures and returns the aggregated signature.
|
||||
// Aggregate aggregates the signatures and returns the aggregated consensus.
|
||||
// The function performs a final verification and errors if the aggregated
|
||||
// signature is invalid. This is required for the function safety since
|
||||
// `TrustedAdd` allows adding invalid signatures.
|
||||
|
||||
@ -13,7 +13,7 @@ type Signer[StateT models.Unique, VoteT models.Unique] interface {
|
||||
// CreateTimeout creates a timeout for given rank. No errors return are
|
||||
// expected during normal operations(incl presence of byz. actors).
|
||||
CreateTimeout(
|
||||
curView uint64,
|
||||
curRank uint64,
|
||||
newestQC models.QuorumCertificate,
|
||||
previousRankTimeoutCert models.TimeoutCertificate,
|
||||
) (*models.TimeoutState[VoteT], error)
|
||||
|
||||
@ -53,7 +53,7 @@ type TimeoutCollector[VoteT models.Unique] interface {
|
||||
|
||||
// TimeoutProcessor ingests Timeout States for a particular rank. It
|
||||
// implements the algorithms for validating TSs, orchestrates their low-level
|
||||
// aggregation and emits `OnPartialTcCreated` and `OnTcConstructedFromTimeouts`
|
||||
// aggregation and emits `OnPartialTimeoutCertificateCreated` and `OnTimeoutCertificateConstructedFromTimeouts`
|
||||
// notifications. TimeoutProcessor cannot deduplicate TSs (this should be
|
||||
// handled by the higher-level TimeoutCollector) and errors instead. Depending
|
||||
// on their implementation, a TimeoutProcessor might drop timeouts or attempt to
|
||||
|
||||
@ -36,7 +36,7 @@ type Verifier[VoteT models.Unique] interface {
|
||||
// Return values:
|
||||
// * nil if `sigData` is cryptographically valid
|
||||
// * models.InsufficientSignaturesError if `signers is empty.
|
||||
// * models.InvalidFormatError if `signers`/`highQCViews` have differing
|
||||
// * models.InvalidFormatError if `signers`/`highQCRanks` have differing
|
||||
// lengths
|
||||
// * models.ErrInvalidSignature if a signature is invalid
|
||||
// * unexpected errors should be treated as symptoms of bugs or uncovered
|
||||
|
||||
@ -256,7 +256,7 @@ func (e *EventHandler[
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnPartialTcCreated handles notification produces by the internal timeout
|
||||
// OnPartialTimeoutCertificateCreated handles notification produces by the internal timeout
|
||||
// aggregator. If the notification is for the current rank, a corresponding
|
||||
// models.TimeoutState is broadcast to the consensus committee. No errors are
|
||||
// expected during normal operation.
|
||||
@ -426,9 +426,12 @@ func (e *EventHandler[
|
||||
|
||||
// check that I am the primary for this rank
|
||||
if e.committee.Self() != currentLeader {
|
||||
e.tracer.Trace("not primary")
|
||||
return nil
|
||||
}
|
||||
|
||||
e.tracer.Trace("primary")
|
||||
|
||||
// attempt to generate proposal:
|
||||
newestQC := e.paceMaker.LatestQuorumCertificate()
|
||||
previousRankTimeoutCert := e.paceMaker.PriorRankTimeoutCertificate()
|
||||
1120
consensus/eventhandler/event_handler_test.go
Normal file
1120
consensus/eventhandler/event_handler_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@ -22,17 +22,17 @@ type queuedProposal[StateT models.Unique, VoteT models.Unique] struct {
|
||||
// EventLoop buffers all incoming events to the hotstuff EventHandler, and feeds
|
||||
// EventHandler one event at a time.
|
||||
type EventLoop[StateT models.Unique, VoteT models.Unique] struct {
|
||||
ctx context.Context
|
||||
eventHandler consensus.EventHandler[StateT, VoteT]
|
||||
proposals chan queuedProposal[StateT, VoteT]
|
||||
newestSubmittedTc *tracker.NewestTCTracker
|
||||
newestSubmittedQc *tracker.NewestQCTracker
|
||||
newestSubmittedPartialTc *tracker.NewestPartialTcTracker
|
||||
tcSubmittedNotifier chan struct{}
|
||||
qcSubmittedNotifier chan struct{}
|
||||
partialTcCreatedNotifier chan struct{}
|
||||
startTime time.Time
|
||||
tracer consensus.TraceLogger
|
||||
ctx context.Context
|
||||
eventHandler consensus.EventHandler[StateT, VoteT]
|
||||
proposals chan queuedProposal[StateT, VoteT]
|
||||
newestSubmittedTimeoutCertificate *tracker.NewestTCTracker
|
||||
newestSubmittedQc *tracker.NewestQCTracker
|
||||
newestSubmittedPartialTimeoutCertificate *tracker.NewestPartialTimeoutCertificateTracker
|
||||
tcSubmittedNotifier chan struct{}
|
||||
qcSubmittedNotifier chan struct{}
|
||||
partialTimeoutCertificateCreatedNotifier chan struct{}
|
||||
startTime time.Time
|
||||
tracer consensus.TraceLogger
|
||||
}
|
||||
|
||||
var _ consensus.EventLoop[*nilUnique, *nilUnique] = (*EventLoop[*nilUnique, *nilUnique])(nil)
|
||||
@ -53,16 +53,16 @@ func NewEventLoop[StateT models.Unique, VoteT models.Unique](
|
||||
proposals := make(chan queuedProposal[StateT, VoteT], 1000)
|
||||
|
||||
el := &EventLoop[StateT, VoteT]{
|
||||
tracer: tracer,
|
||||
eventHandler: eventHandler,
|
||||
proposals: proposals,
|
||||
tcSubmittedNotifier: make(chan struct{}, 1),
|
||||
qcSubmittedNotifier: make(chan struct{}, 1),
|
||||
partialTcCreatedNotifier: make(chan struct{}, 1),
|
||||
newestSubmittedTc: tracker.NewNewestTCTracker(),
|
||||
newestSubmittedQc: tracker.NewNewestQCTracker(),
|
||||
newestSubmittedPartialTc: tracker.NewNewestPartialTcTracker(),
|
||||
startTime: startTime,
|
||||
tracer: tracer,
|
||||
eventHandler: eventHandler,
|
||||
proposals: proposals,
|
||||
tcSubmittedNotifier: make(chan struct{}, 1),
|
||||
qcSubmittedNotifier: make(chan struct{}, 1),
|
||||
partialTimeoutCertificateCreatedNotifier: make(chan struct{}, 1),
|
||||
newestSubmittedTimeoutCertificate: tracker.NewNewestTCTracker(),
|
||||
newestSubmittedQc: tracker.NewNewestQCTracker(),
|
||||
newestSubmittedPartialTimeoutCertificate: tracker.NewNewestPartialTimeoutCertificateTracker(),
|
||||
startTime: startTime,
|
||||
}
|
||||
|
||||
return el, nil
|
||||
@ -71,17 +71,19 @@ func NewEventLoop[StateT models.Unique, VoteT models.Unique](
|
||||
func (el *EventLoop[StateT, VoteT]) Start(ctx context.Context) error {
|
||||
el.ctx = ctx
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-time.After(time.Until(el.startTime)):
|
||||
el.tracer.Trace("starting event loop")
|
||||
err := el.loop(ctx)
|
||||
if err != nil {
|
||||
el.tracer.Error("irrecoverable event loop error", err)
|
||||
return err
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(time.Until(el.startTime)):
|
||||
el.tracer.Trace("starting event loop")
|
||||
err := el.loop(ctx)
|
||||
if err != nil {
|
||||
el.tracer.Error("irrecoverable event loop error", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -102,7 +104,7 @@ func (el *EventLoop[StateT, VoteT]) loop(ctx context.Context) error {
|
||||
shutdownSignaled := ctx.Done()
|
||||
timeoutCertificates := el.tcSubmittedNotifier
|
||||
quorumCertificates := el.qcSubmittedNotifier
|
||||
partialTCs := el.partialTcCreatedNotifier
|
||||
partialTCs := el.partialTimeoutCertificateCreatedNotifier
|
||||
|
||||
for {
|
||||
// Giving timeout events the priority to be processed first.
|
||||
@ -116,12 +118,14 @@ func (el *EventLoop[StateT, VoteT]) loop(ctx context.Context) error {
|
||||
|
||||
// if we receive the shutdown signal, exit the loop
|
||||
case <-shutdownSignaled:
|
||||
el.tracer.Trace("shutting down event loop")
|
||||
return nil
|
||||
|
||||
// processing timeout or partial TC event are top priority since
|
||||
// they allow node to contribute to TC aggregation when replicas can't
|
||||
// make progress on happy path
|
||||
case <-timeoutChannel:
|
||||
el.tracer.Trace("received timeout")
|
||||
err = el.eventHandler.OnLocalTimeout()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not process timeout: %w", err)
|
||||
@ -136,8 +140,9 @@ func (el *EventLoop[StateT, VoteT]) loop(ctx context.Context) error {
|
||||
continue
|
||||
|
||||
case <-partialTCs:
|
||||
el.tracer.Trace("received partial timeout")
|
||||
err = el.eventHandler.OnPartialTimeoutCertificateCreated(
|
||||
el.newestSubmittedPartialTc.NewestPartialTc(),
|
||||
el.newestSubmittedPartialTimeoutCertificate.NewestPartialTimeoutCertificate(),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not process partial created TC event: %w", err)
|
||||
@ -153,6 +158,8 @@ func (el *EventLoop[StateT, VoteT]) loop(ctx context.Context) error {
|
||||
continue
|
||||
|
||||
default:
|
||||
el.tracer.Trace("non-priority event")
|
||||
|
||||
// fall through to non-priority events
|
||||
}
|
||||
|
||||
@ -161,10 +168,13 @@ func (el *EventLoop[StateT, VoteT]) loop(ctx context.Context) error {
|
||||
|
||||
// same as before
|
||||
case <-shutdownSignaled:
|
||||
el.tracer.Trace("shutting down event loop")
|
||||
return nil
|
||||
|
||||
// same as before
|
||||
case <-timeoutChannel:
|
||||
el.tracer.Trace("received timeout")
|
||||
|
||||
err = el.eventHandler.OnLocalTimeout()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not process timeout: %w", err)
|
||||
@ -172,6 +182,8 @@ func (el *EventLoop[StateT, VoteT]) loop(ctx context.Context) error {
|
||||
|
||||
// if we have a new proposal, process it
|
||||
case queuedItem := <-el.proposals:
|
||||
el.tracer.Trace("received proposal")
|
||||
|
||||
proposal := queuedItem.proposal
|
||||
err = el.eventHandler.OnReceiveProposal(proposal)
|
||||
if err != nil {
|
||||
@ -186,6 +198,7 @@ func (el *EventLoop[StateT, VoteT]) loop(ctx context.Context) error {
|
||||
|
||||
// if we have a new QC, process it
|
||||
case <-quorumCertificates:
|
||||
el.tracer.Trace("received quorum certificate")
|
||||
err = el.eventHandler.OnReceiveQuorumCertificate(
|
||||
*el.newestSubmittedQc.NewestQC(),
|
||||
)
|
||||
@ -195,16 +208,18 @@ func (el *EventLoop[StateT, VoteT]) loop(ctx context.Context) error {
|
||||
|
||||
// if we have a new TC, process it
|
||||
case <-timeoutCertificates:
|
||||
el.tracer.Trace("received timeout certificate")
|
||||
err = el.eventHandler.OnReceiveTimeoutCertificate(
|
||||
*el.newestSubmittedTc.NewestTC(),
|
||||
*el.newestSubmittedTimeoutCertificate.NewestTC(),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not process TC: %w", err)
|
||||
}
|
||||
|
||||
case <-partialTCs:
|
||||
el.tracer.Trace("received partial timeout certificate")
|
||||
err = el.eventHandler.OnPartialTimeoutCertificateCreated(
|
||||
el.newestSubmittedPartialTc.NewestPartialTc(),
|
||||
el.newestSubmittedPartialTimeoutCertificate.NewestPartialTimeoutCertificate(),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could no process partial created TC event: %w", err)
|
||||
@ -239,7 +254,7 @@ func (el *EventLoop[StateT, VoteT]) onTrustedQC(qc *models.QuorumCertificate) {
|
||||
// onTrustedTC pushes the received TC (which MUST be validated) to the
|
||||
// timeoutCertificates channel
|
||||
func (el *EventLoop[StateT, VoteT]) onTrustedTC(tc *models.TimeoutCertificate) {
|
||||
if el.newestSubmittedTc.Track(tc) {
|
||||
if el.newestSubmittedTimeoutCertificate.Track(tc) {
|
||||
el.tcSubmittedNotifier <- struct{}{}
|
||||
} else {
|
||||
qc := (*tc).GetLatestQuorumCert()
|
||||
@ -257,8 +272,8 @@ func (el *EventLoop[StateT, VoteT]) OnTimeoutCertificateConstructedFromTimeouts(
|
||||
el.onTrustedTC(&tc)
|
||||
}
|
||||
|
||||
// OnPartialTimeoutCertificateCreated created a consensus.PartialTcCreated
|
||||
// payload and pushes it into partialTcCreated buffered channel for further
|
||||
// OnPartialTimeoutCertificateCreated created a consensus.PartialTimeoutCertificateCreated
|
||||
// payload and pushes it into partialTimeoutCertificateCreated buffered channel for further
|
||||
// processing by EventHandler. Since we use buffered channel this function can
|
||||
// block if buffer is full.
|
||||
func (el *EventLoop[StateT, VoteT]) OnPartialTimeoutCertificateCreated(
|
||||
@ -271,8 +286,8 @@ func (el *EventLoop[StateT, VoteT]) OnPartialTimeoutCertificateCreated(
|
||||
NewestQuorumCertificate: newestQC,
|
||||
PriorRankTimeoutCertificate: previousRankTimeoutCert,
|
||||
}
|
||||
if el.newestSubmittedPartialTc.Track(event) {
|
||||
el.partialTcCreatedNotifier <- struct{}{}
|
||||
if el.newestSubmittedPartialTimeoutCertificate.Track(event) {
|
||||
el.partialTimeoutCertificateCreatedNotifier <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
250
consensus/eventloop/event_loop_test.go
Normal file
250
consensus/eventloop/event_loop_test.go
Normal file
@ -0,0 +1,250 @@
|
||||
package eventloop
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"go.uber.org/atomic"
|
||||
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/mocks"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
|
||||
)
|
||||
|
||||
// TestEventLoop performs unit testing of event loop, checks if submitted events are propagated
|
||||
// to event handler as well as handling of timeouts.
|
||||
func TestEventLoop(t *testing.T) {
|
||||
suite.Run(t, new(EventLoopTestSuite))
|
||||
}
|
||||
|
||||
type EventLoopTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
eh *mocks.EventHandler[*helper.TestState, *helper.TestVote]
|
||||
cancel context.CancelFunc
|
||||
|
||||
eventLoop *EventLoop[*helper.TestState, *helper.TestVote]
|
||||
}
|
||||
|
||||
func (s *EventLoopTestSuite) SetupTest() {
|
||||
s.eh = mocks.NewEventHandler[*helper.TestState, *helper.TestVote](s.T())
|
||||
s.eh.On("Start", mock.Anything).Return(nil).Maybe()
|
||||
s.eh.On("TimeoutChannel").Return(make(<-chan time.Time, 1)).Maybe()
|
||||
s.eh.On("OnLocalTimeout").Return(nil).Maybe()
|
||||
|
||||
eventLoop, err := NewEventLoop(helper.Logger(), s.eh, time.Time{})
|
||||
require.NoError(s.T(), err)
|
||||
s.eventLoop = eventLoop
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s.cancel = cancel
|
||||
signalerCtx := ctx
|
||||
|
||||
s.eventLoop.Start(signalerCtx)
|
||||
}
|
||||
|
||||
func (s *EventLoopTestSuite) TearDownTest() {
|
||||
s.cancel()
|
||||
}
|
||||
|
||||
// TestReadyDone tests if event loop stops internal worker thread
|
||||
func (s *EventLoopTestSuite) TestReadyDone() {
|
||||
time.Sleep(1 * time.Second)
|
||||
go func() {
|
||||
s.cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
// Test_SubmitQC tests that submitted proposal is eventually sent to event handler for processing
|
||||
func (s *EventLoopTestSuite) Test_SubmitProposal() {
|
||||
proposal := helper.MakeSignedProposal[*helper.TestState, *helper.TestVote]()
|
||||
processed := atomic.NewBool(false)
|
||||
s.eh.On("OnReceiveProposal", proposal).Run(func(args mock.Arguments) {
|
||||
processed.Store(true)
|
||||
}).Return(nil).Once()
|
||||
s.eventLoop.SubmitProposal(proposal)
|
||||
require.Eventually(s.T(), processed.Load, time.Millisecond*100, time.Millisecond*10)
|
||||
}
|
||||
|
||||
// Test_SubmitQC tests that submitted QC is eventually sent to `EventHandler.OnReceiveQuorumCertificate` for processing
|
||||
func (s *EventLoopTestSuite) Test_SubmitQC() {
|
||||
// qcIngestionFunction is the archetype for EventLoop.OnQuorumCertificateConstructedFromVotes and EventLoop.OnNewQuorumCertificateDiscovered
|
||||
type qcIngestionFunction func(models.QuorumCertificate)
|
||||
|
||||
testQCIngestionFunction := func(f qcIngestionFunction, qcRank uint64) {
|
||||
qc := helper.MakeQC(helper.WithQCRank(qcRank))
|
||||
processed := atomic.NewBool(false)
|
||||
s.eh.On("OnReceiveQuorumCertificate", qc).Run(func(args mock.Arguments) {
|
||||
processed.Store(true)
|
||||
}).Return(nil).Once()
|
||||
f(qc)
|
||||
require.Eventually(s.T(), processed.Load, time.Millisecond*100, time.Millisecond*10)
|
||||
}
|
||||
|
||||
s.Run("QCs handed to EventLoop.OnQuorumCertificateConstructedFromVotes are forwarded to EventHandler", func() {
|
||||
testQCIngestionFunction(s.eventLoop.OnQuorumCertificateConstructedFromVotes, 100)
|
||||
})
|
||||
|
||||
s.Run("QCs handed to EventLoop.OnNewQuorumCertificateDiscovered are forwarded to EventHandler", func() {
|
||||
testQCIngestionFunction(s.eventLoop.OnNewQuorumCertificateDiscovered, 101)
|
||||
})
|
||||
}
|
||||
|
||||
// Test_SubmitTC tests that submitted TC is eventually sent to `EventHandler.OnReceiveTimeoutCertificate` for processing
|
||||
func (s *EventLoopTestSuite) Test_SubmitTC() {
|
||||
// tcIngestionFunction is the archetype for EventLoop.OnTimeoutCertificateConstructedFromTimeouts and EventLoop.OnNewTimeoutCertificateDiscovered
|
||||
type tcIngestionFunction func(models.TimeoutCertificate)
|
||||
|
||||
testTCIngestionFunction := func(f tcIngestionFunction, tcRank uint64) {
|
||||
tc := helper.MakeTC(helper.WithTCRank(tcRank))
|
||||
processed := atomic.NewBool(false)
|
||||
s.eh.On("OnReceiveTimeoutCertificate", tc).Run(func(args mock.Arguments) {
|
||||
processed.Store(true)
|
||||
}).Return(nil).Once()
|
||||
f(tc)
|
||||
require.Eventually(s.T(), processed.Load, time.Millisecond*100, time.Millisecond*10)
|
||||
}
|
||||
|
||||
s.Run("TCs handed to EventLoop.OnTimeoutCertificateConstructedFromTimeouts are forwarded to EventHandler", func() {
|
||||
testTCIngestionFunction(s.eventLoop.OnTimeoutCertificateConstructedFromTimeouts, 100)
|
||||
})
|
||||
|
||||
s.Run("TCs handed to EventLoop.OnNewTimeoutCertificateDiscovered are forwarded to EventHandler", func() {
|
||||
testTCIngestionFunction(s.eventLoop.OnNewTimeoutCertificateDiscovered, 101)
|
||||
})
|
||||
}
|
||||
|
||||
// Test_SubmitTC_IngestNewestQC tests that included QC in TC is eventually sent to `EventHandler.OnReceiveQuorumCertificate` for processing
|
||||
func (s *EventLoopTestSuite) Test_SubmitTC_IngestNewestQC() {
|
||||
// tcIngestionFunction is the archetype for EventLoop.OnTimeoutCertificateConstructedFromTimeouts and EventLoop.OnNewTimeoutCertificateDiscovered
|
||||
type tcIngestionFunction func(models.TimeoutCertificate)
|
||||
|
||||
testTCIngestionFunction := func(f tcIngestionFunction, tcRank, qcRank uint64) {
|
||||
tc := helper.MakeTC(helper.WithTCRank(tcRank),
|
||||
helper.WithTCNewestQC(helper.MakeQC(helper.WithQCRank(qcRank))))
|
||||
processed := atomic.NewBool(false)
|
||||
s.eh.On("OnReceiveQuorumCertificate", tc.GetLatestQuorumCert()).Run(func(args mock.Arguments) {
|
||||
processed.Store(true)
|
||||
}).Return(nil).Once()
|
||||
f(tc)
|
||||
require.Eventually(s.T(), processed.Load, time.Millisecond*100, time.Millisecond*10)
|
||||
}
|
||||
|
||||
// process initial TC, this will track the newest TC
|
||||
s.eh.On("OnReceiveTimeoutCertificate", mock.Anything).Return(nil).Once()
|
||||
s.eventLoop.OnTimeoutCertificateConstructedFromTimeouts(helper.MakeTC(
|
||||
helper.WithTCRank(100),
|
||||
helper.WithTCNewestQC(
|
||||
helper.MakeQC(
|
||||
helper.WithQCRank(80),
|
||||
),
|
||||
),
|
||||
))
|
||||
|
||||
s.Run("QCs handed to EventLoop.OnTimeoutCertificateConstructedFromTimeouts are forwarded to EventHandler", func() {
|
||||
testTCIngestionFunction(s.eventLoop.OnTimeoutCertificateConstructedFromTimeouts, 100, 99)
|
||||
})
|
||||
|
||||
s.Run("QCs handed to EventLoop.OnNewTimeoutCertificateDiscovered are forwarded to EventHandler", func() {
|
||||
testTCIngestionFunction(s.eventLoop.OnNewTimeoutCertificateDiscovered, 100, 100)
|
||||
})
|
||||
}
|
||||
|
||||
// Test_OnPartialTimeoutCertificateCreated tests that event loop delivers partialTimeoutCertificateCreated events to event handler.
|
||||
func (s *EventLoopTestSuite) Test_OnPartialTimeoutCertificateCreated() {
|
||||
rank := uint64(1000)
|
||||
newestQC := helper.MakeQC(helper.WithQCRank(rank - 10))
|
||||
previousRankTimeoutCert := helper.MakeTC(helper.WithTCRank(rank-1), helper.WithTCNewestQC(newestQC))
|
||||
|
||||
processed := atomic.NewBool(false)
|
||||
partialTimeoutCertificateCreated := &consensus.PartialTimeoutCertificateCreated{
|
||||
Rank: rank,
|
||||
NewestQuorumCertificate: newestQC,
|
||||
PriorRankTimeoutCertificate: previousRankTimeoutCert,
|
||||
}
|
||||
s.eh.On("OnPartialTimeoutCertificateCreated", partialTimeoutCertificateCreated).Run(func(args mock.Arguments) {
|
||||
processed.Store(true)
|
||||
}).Return(nil).Once()
|
||||
s.eventLoop.OnPartialTimeoutCertificateCreated(rank, newestQC, previousRankTimeoutCert)
|
||||
require.Eventually(s.T(), processed.Load, time.Millisecond*100, time.Millisecond*10)
|
||||
}
|
||||
|
||||
// TestEventLoop_Timeout tests that event loop delivers timeout events to event handler under pressure
|
||||
func TestEventLoop_Timeout(t *testing.T) {
|
||||
eh := &mocks.EventHandler[*helper.TestState, *helper.TestVote]{}
|
||||
processed := atomic.NewBool(false)
|
||||
eh.On("Start", mock.Anything).Return(nil).Once()
|
||||
eh.On("OnReceiveQuorumCertificate", mock.Anything).Return(nil).Maybe()
|
||||
eh.On("OnReceiveProposal", mock.Anything).Return(nil).Maybe()
|
||||
eh.On("OnLocalTimeout").Run(func(args mock.Arguments) {
|
||||
processed.Store(true)
|
||||
}).Return(nil).Once()
|
||||
|
||||
eventLoop, err := NewEventLoop(helper.Logger(), eh, time.Time{})
|
||||
require.NoError(t, err)
|
||||
|
||||
eh.On("TimeoutChannel").Return(time.After(100 * time.Millisecond))
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
signalerCtx := ctx
|
||||
eventLoop.Start(signalerCtx)
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
|
||||
// spam with proposals and QCs
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for !processed.Load() {
|
||||
qc := helper.MakeQC()
|
||||
eventLoop.OnQuorumCertificateConstructedFromVotes(qc)
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for !processed.Load() {
|
||||
eventLoop.SubmitProposal(helper.MakeSignedProposal[*helper.TestState, *helper.TestVote]())
|
||||
}
|
||||
}()
|
||||
|
||||
require.Eventually(t, processed.Load, time.Millisecond*200, time.Millisecond*10)
|
||||
|
||||
cancel()
|
||||
}
|
||||
|
||||
// TestReadyDoneWithStartTime tests that event loop correctly starts and schedules start of processing
|
||||
// when startTime argument is used
|
||||
func TestReadyDoneWithStartTime(t *testing.T) {
|
||||
eh := &mocks.EventHandler[*helper.TestState, *helper.TestVote]{}
|
||||
eh.On("Start", mock.Anything).Return(nil)
|
||||
eh.On("TimeoutChannel").Return(make(<-chan time.Time, 1))
|
||||
eh.On("OnLocalTimeout").Return(nil)
|
||||
|
||||
startTimeDuration := 2 * time.Second
|
||||
startTime := time.Now().Add(startTimeDuration)
|
||||
eventLoop, err := NewEventLoop(helper.Logger(), eh, startTime)
|
||||
require.NoError(t, err)
|
||||
|
||||
done := make(chan struct{})
|
||||
eh.On("OnReceiveProposal", mock.AnythingOfType("*models.SignedProposal")).Run(func(args mock.Arguments) {
|
||||
require.True(t, time.Now().After(startTime))
|
||||
close(done)
|
||||
}).Return(nil).Once()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
signalerCtx := ctx
|
||||
eventLoop.Start(signalerCtx)
|
||||
|
||||
eventLoop.SubmitProposal(helper.MakeSignedProposal[*helper.TestState, *helper.TestVote]())
|
||||
|
||||
cancel()
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@ -229,8 +229,8 @@ func (f *LevelledForest) registerWithParent(vertexContainer *vertexContainer) {
|
||||
return
|
||||
}
|
||||
|
||||
_, parentView := vertexContainer.vertex.Parent()
|
||||
if parentView < f.LowestLevel {
|
||||
_, parentRank := vertexContainer.vertex.Parent()
|
||||
if parentRank < f.LowestLevel {
|
||||
return
|
||||
}
|
||||
parentContainer := f.getOrCreateVertexContainer(
|
||||
|
||||
@ -20,7 +20,7 @@ type Vertex interface {
|
||||
func VertexToString(v Vertex) string {
|
||||
parentID, parentLevel := v.Parent()
|
||||
return fmt.Sprintf(
|
||||
"<id=%x level=%d parent_id=%d parent_level=%d>",
|
||||
"<id=%x level=%d parent_id=%s parent_level=%d>",
|
||||
v.VertexID(),
|
||||
v.Level(),
|
||||
parentID,
|
||||
|
||||
@ -354,7 +354,7 @@ func (f *Forks[StateT, VoteT]) AddValidatedState(
|
||||
err,
|
||||
)
|
||||
}
|
||||
err = f.checkForAdvancingFinalization(&certifiedParent)
|
||||
err = f.checkForAdvancingFinalization(certifiedParent)
|
||||
if err != nil {
|
||||
return fmt.Errorf("updating finalization failed: %w", err)
|
||||
}
|
||||
@ -650,39 +650,3 @@ func (f *Forks[StateT, VoteT]) collectStatesForFinalization(
|
||||
|
||||
return statesToBeFinalized, nil
|
||||
}
|
||||
|
||||
// Type used to satisfy generic arguments in compiler time type assertion check
|
||||
type nilUnique struct{}
|
||||
|
||||
// GetSignature implements models.Unique.
|
||||
func (n *nilUnique) GetSignature() []byte {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// GetTimestamp implements models.Unique.
|
||||
func (n *nilUnique) GetTimestamp() uint64 {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// Source implements models.Unique.
|
||||
func (n *nilUnique) Source() models.Identity {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// Clone implements models.Unique.
|
||||
func (n *nilUnique) Clone() models.Unique {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// GetRank implements models.Unique.
|
||||
func (n *nilUnique) GetRank() uint64 {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// Identity implements models.Unique.
|
||||
func (n *nilUnique) Identity() models.Identity {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
var _ models.Unique = (*nilUnique)(nil)
|
||||
|
||||
|
||||
950
consensus/forks/forks_test.go
Normal file
950
consensus/forks/forks_test.go
Normal file
@ -0,0 +1,950 @@
|
||||
package forks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/mocks"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
|
||||
)
|
||||
|
||||
/*****************************************************************************
|
||||
* NOTATION: *
|
||||
* A state is denoted as [◄(<qc_number>) <state_rank_number>]. *
|
||||
* For example, [◄(1) 2] means: a state of rank 2 that has a QC for rank 1. *
|
||||
*****************************************************************************/
|
||||
|
||||
// TestInitialization verifies that at initialization, Forks reports:
|
||||
// - the root / genesis state as finalized
|
||||
// - it has no finalization proof for the root / genesis state (state and its finalization is trusted)
|
||||
func TestInitialization(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
requireOnlyGenesisStateFinalized(t, forks)
|
||||
_, hasProof := forks.FinalityProof()
|
||||
require.False(t, hasProof)
|
||||
}
|
||||
|
||||
// TestFinalize_Direct1Chain tests adding a direct 1-chain on top of the genesis state:
|
||||
// - receives [◄(1) 2] [◄(2) 5]
|
||||
//
|
||||
// Expected behaviour:
|
||||
// - On the one hand, Forks should not finalize any _additional_ states, because there is
|
||||
// no finalizable 2-chain for [◄(1) 2]. Hence, finalization no events should be emitted.
|
||||
// - On the other hand, after adding the two states, Forks has enough knowledge to construct
|
||||
// a FinalityProof for the genesis state.
|
||||
func TestFinalize_Direct1Chain(t *testing.T) {
|
||||
builder := NewStateBuilder().
|
||||
Add(1, 2).
|
||||
Add(2, 3)
|
||||
states, err := builder.States()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
|
||||
// adding state [◄(1) 2] should not finalize anything
|
||||
// as the genesis state is trusted, there should be no FinalityProof available for it
|
||||
require.NoError(t, forks.AddValidatedState(states[0]))
|
||||
requireOnlyGenesisStateFinalized(t, forks)
|
||||
_, hasProof := forks.FinalityProof()
|
||||
require.False(t, hasProof)
|
||||
|
||||
// After adding state [◄(2) 3], Forks has enough knowledge to construct a FinalityProof for the
|
||||
// genesis state. However, finalization remains at the genesis state, so no events should be emitted.
|
||||
expectedFinalityProof := makeFinalityProof(t, builder.GenesisState().State, states[0], states[1].ParentQuorumCertificate)
|
||||
require.NoError(t, forks.AddValidatedState(states[1]))
|
||||
requireLatestFinalizedState(t, forks, builder.GenesisState().State)
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
})
|
||||
|
||||
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
|
||||
// After adding CertifiedState [◄(1) 2] ◄(2), Forks has enough knowledge to construct a FinalityProof for
|
||||
// the genesis state. However, finalization remains at the genesis state, so no events should be emitted.
|
||||
expectedFinalityProof := makeFinalityProof(t, builder.GenesisState().State, states[0], states[1].ParentQuorumCertificate)
|
||||
c, err := models.NewCertifiedState(states[0], states[1].ParentQuorumCertificate)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, forks.AddCertifiedState(c))
|
||||
requireLatestFinalizedState(t, forks, builder.GenesisState().State)
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
})
|
||||
}
|
||||
|
||||
// TestFinalize_Direct2Chain tests adding a direct 1-chain on a direct 1-chain (direct 2-chain).
|
||||
// - receives [◄(1) 2] [◄(2) 3] [◄(3) 4]
|
||||
// - Forks should finalize [◄(1) 2]
|
||||
func TestFinalize_Direct2Chain(t *testing.T) {
|
||||
states, err := NewStateBuilder().
|
||||
Add(1, 2).
|
||||
Add(2, 3).
|
||||
Add(3, 4).
|
||||
States()
|
||||
require.NoError(t, err)
|
||||
expectedFinalityProof := makeFinalityProof(t, states[0], states[1], states[2].ParentQuorumCertificate)
|
||||
|
||||
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
require.Nil(t, addValidatedStateToForks(forks, states))
|
||||
|
||||
requireLatestFinalizedState(t, forks, states[0])
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
})
|
||||
|
||||
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
require.Nil(t, addCertifiedStatesToForks(forks, states))
|
||||
|
||||
requireLatestFinalizedState(t, forks, states[0])
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
})
|
||||
}
|
||||
|
||||
// TestFinalize_DirectIndirect2Chain tests adding an indirect 1-chain on a direct 1-chain.
|
||||
// receives [◄(1) 2] [◄(2) 3] [◄(3) 5]
|
||||
// it should finalize [◄(1) 2]
|
||||
func TestFinalize_DirectIndirect2Chain(t *testing.T) {
|
||||
states, err := NewStateBuilder().
|
||||
Add(1, 2).
|
||||
Add(2, 3).
|
||||
Add(3, 5).
|
||||
States()
|
||||
require.NoError(t, err)
|
||||
expectedFinalityProof := makeFinalityProof(t, states[0], states[1], states[2].ParentQuorumCertificate)
|
||||
|
||||
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
require.Nil(t, addValidatedStateToForks(forks, states))
|
||||
|
||||
requireLatestFinalizedState(t, forks, states[0])
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
})
|
||||
|
||||
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
require.Nil(t, addCertifiedStatesToForks(forks, states))
|
||||
|
||||
requireLatestFinalizedState(t, forks, states[0])
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
})
|
||||
}
|
||||
|
||||
// TestFinalize_IndirectDirect2Chain tests adding a direct 1-chain on an indirect 1-chain.
|
||||
// - Forks receives [◄(1) 3] [◄(3) 5] [◄(7) 7]
|
||||
// - it should not finalize any states because there is no finalizable 2-chain.
|
||||
func TestFinalize_IndirectDirect2Chain(t *testing.T) {
|
||||
states, err := NewStateBuilder().
|
||||
Add(1, 3).
|
||||
Add(3, 5).
|
||||
Add(5, 7).
|
||||
States()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
require.Nil(t, addValidatedStateToForks(forks, states))
|
||||
|
||||
requireOnlyGenesisStateFinalized(t, forks)
|
||||
_, hasProof := forks.FinalityProof()
|
||||
require.False(t, hasProof)
|
||||
})
|
||||
|
||||
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
require.Nil(t, addCertifiedStatesToForks(forks, states))
|
||||
|
||||
requireOnlyGenesisStateFinalized(t, forks)
|
||||
_, hasProof := forks.FinalityProof()
|
||||
require.False(t, hasProof)
|
||||
})
|
||||
}
|
||||
|
||||
// TestFinalize_Direct2ChainOnIndirect tests adding a direct 2-chain on an indirect 2-chain:
|
||||
// - ingesting [◄(1) 3] [◄(3) 5] [◄(5) 6] [◄(6) 7] [◄(7) 8]
|
||||
// - should result in finalization of [◄(5) 6]
|
||||
func TestFinalize_Direct2ChainOnIndirect(t *testing.T) {
|
||||
states, err := NewStateBuilder().
|
||||
Add(1, 3).
|
||||
Add(3, 5).
|
||||
Add(5, 6).
|
||||
Add(6, 7).
|
||||
Add(7, 8).
|
||||
States()
|
||||
require.NoError(t, err)
|
||||
expectedFinalityProof := makeFinalityProof(t, states[2], states[3], states[4].ParentQuorumCertificate)
|
||||
|
||||
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
require.Nil(t, addValidatedStateToForks(forks, states))
|
||||
|
||||
requireLatestFinalizedState(t, forks, states[2])
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
})
|
||||
|
||||
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
require.Nil(t, addCertifiedStatesToForks(forks, states))
|
||||
|
||||
requireLatestFinalizedState(t, forks, states[2])
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
})
|
||||
}
|
||||
|
||||
// TestFinalize_Direct2ChainOnDirect tests adding a sequence of direct 2-chains:
|
||||
// - ingesting [◄(1) 2] [◄(2) 3] [◄(3) 4] [◄(4) 5] [◄(5) 6]
|
||||
// - should result in finalization of [◄(3) 4]
|
||||
func TestFinalize_Direct2ChainOnDirect(t *testing.T) {
|
||||
states, err := NewStateBuilder().
|
||||
Add(1, 2).
|
||||
Add(2, 3).
|
||||
Add(3, 4).
|
||||
Add(4, 5).
|
||||
Add(5, 6).
|
||||
States()
|
||||
require.NoError(t, err)
|
||||
expectedFinalityProof := makeFinalityProof(t, states[2], states[3], states[4].ParentQuorumCertificate)
|
||||
|
||||
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
require.Nil(t, addValidatedStateToForks(forks, states))
|
||||
|
||||
requireLatestFinalizedState(t, forks, states[2])
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
})
|
||||
|
||||
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
require.Nil(t, addCertifiedStatesToForks(forks, states))
|
||||
|
||||
requireLatestFinalizedState(t, forks, states[2])
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
})
|
||||
}
|
||||
|
||||
// TestFinalize_Multiple2Chains tests the case where a state can be finalized by different 2-chains.
|
||||
// - ingesting [◄(1) 2] [◄(2) 3] [◄(3) 5] [◄(3) 6] [◄(3) 7]
|
||||
// - should result in finalization of [◄(1) 2]
|
||||
func TestFinalize_Multiple2Chains(t *testing.T) {
|
||||
states, err := NewStateBuilder().
|
||||
Add(1, 2).
|
||||
Add(2, 3).
|
||||
Add(3, 5).
|
||||
Add(3, 6).
|
||||
Add(3, 7).
|
||||
States()
|
||||
require.NoError(t, err)
|
||||
expectedFinalityProof := makeFinalityProof(t, states[0], states[1], states[2].ParentQuorumCertificate)
|
||||
|
||||
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
require.Nil(t, addValidatedStateToForks(forks, states))
|
||||
|
||||
requireLatestFinalizedState(t, forks, states[0])
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
})
|
||||
|
||||
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
require.Nil(t, addCertifiedStatesToForks(forks, states))
|
||||
|
||||
requireLatestFinalizedState(t, forks, states[0])
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
})
|
||||
}
|
||||
|
||||
// TestFinalize_OrphanedFork tests that we can finalize a state which causes a conflicting fork to be orphaned.
|
||||
// We ingest the following state tree:
|
||||
//
|
||||
// [◄(1) 2] [◄(2) 3]
|
||||
// [◄(2) 4] [◄(4) 5] [◄(5) 6]
|
||||
//
|
||||
// which should result in finalization of [◄(2) 4] and pruning of [◄(2) 3]
|
||||
func TestFinalize_OrphanedFork(t *testing.T) {
|
||||
states, err := NewStateBuilder().
|
||||
Add(1, 2). // [◄(1) 2]
|
||||
Add(2, 3). // [◄(2) 3], should eventually be pruned
|
||||
Add(2, 4). // [◄(2) 4], should eventually be finalized
|
||||
Add(4, 5). // [◄(4) 5]
|
||||
Add(5, 6). // [◄(5) 6]
|
||||
States()
|
||||
require.NoError(t, err)
|
||||
expectedFinalityProof := makeFinalityProof(t, states[2], states[3], states[4].ParentQuorumCertificate)
|
||||
|
||||
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
require.Nil(t, addValidatedStateToForks(forks, states))
|
||||
|
||||
require.False(t, forks.IsKnownState(states[1].Identifier))
|
||||
requireLatestFinalizedState(t, forks, states[2])
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
})
|
||||
|
||||
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
require.Nil(t, addCertifiedStatesToForks(forks, states))
|
||||
|
||||
require.False(t, forks.IsKnownState(states[1].Identifier))
|
||||
requireLatestFinalizedState(t, forks, states[2])
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
})
|
||||
}
|
||||
|
||||
// TestDuplication tests that delivering the same state/qc multiple times has
|
||||
// the same end state as delivering the state/qc once.
|
||||
// - Forks receives [◄(1) 2] [◄(2) 3] [◄(2) 3] [◄(3) 4] [◄(3) 4] [◄(4) 5] [◄(4) 5]
|
||||
// - it should finalize [◄(2) 3]
|
||||
func TestDuplication(t *testing.T) {
|
||||
states, err := NewStateBuilder().
|
||||
Add(1, 2).
|
||||
Add(2, 3).
|
||||
Add(2, 3).
|
||||
Add(3, 4).
|
||||
Add(3, 4).
|
||||
Add(4, 5).
|
||||
Add(4, 5).
|
||||
States()
|
||||
require.NoError(t, err)
|
||||
expectedFinalityProof := makeFinalityProof(t, states[1], states[3], states[5].ParentQuorumCertificate)
|
||||
|
||||
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
require.Nil(t, addValidatedStateToForks(forks, states))
|
||||
|
||||
requireLatestFinalizedState(t, forks, states[1])
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
})
|
||||
|
||||
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
require.Nil(t, addCertifiedStatesToForks(forks, states))
|
||||
|
||||
requireLatestFinalizedState(t, forks, states[1])
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
})
|
||||
}
|
||||
|
||||
// TestIgnoreStatesBelowFinalizedRank tests that states below finalized rank are ignored.
|
||||
// - Forks receives [◄(1) 2] [◄(2) 3] [◄(3) 4] [◄(1) 5]
|
||||
// - it should finalize [◄(1) 2]
|
||||
func TestIgnoreStatesBelowFinalizedRank(t *testing.T) {
|
||||
builder := NewStateBuilder().
|
||||
Add(1, 2). // [◄(1) 2]
|
||||
Add(2, 3). // [◄(2) 3]
|
||||
Add(3, 4). // [◄(3) 4]
|
||||
Add(1, 5) // [◄(1) 5]
|
||||
states, err := builder.States()
|
||||
require.NoError(t, err)
|
||||
expectedFinalityProof := makeFinalityProof(t, states[0], states[1], states[2].ParentQuorumCertificate)
|
||||
|
||||
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
|
||||
// initialize forks and add first 3 states:
|
||||
// * state [◄(1) 2] should then be finalized
|
||||
// * and state [1] should be pruned
|
||||
forks, _ := newForks(t)
|
||||
require.Nil(t, addValidatedStateToForks(forks, states[:3]))
|
||||
|
||||
// sanity checks to confirm correct test setup
|
||||
requireLatestFinalizedState(t, forks, states[0])
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
require.False(t, forks.IsKnownState(builder.GenesisState().Identifier()))
|
||||
|
||||
// adding state [◄(1) 5]: note that QC is _below_ the pruning threshold, i.e. cannot resolve the parent
|
||||
// * Forks should store state, despite the parent already being pruned
|
||||
// * finalization should not change
|
||||
orphanedState := states[3]
|
||||
require.Nil(t, forks.AddValidatedState(orphanedState))
|
||||
require.True(t, forks.IsKnownState(orphanedState.Identifier))
|
||||
requireLatestFinalizedState(t, forks, states[0])
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
})
|
||||
|
||||
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
|
||||
// initialize forks and add first 3 states:
|
||||
// * state [◄(1) 2] should then be finalized
|
||||
// * and state [1] should be pruned
|
||||
forks, _ := newForks(t)
|
||||
require.Nil(t, addCertifiedStatesToForks(forks, states[:3]))
|
||||
// sanity checks to confirm correct test setup
|
||||
requireLatestFinalizedState(t, forks, states[0])
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
require.False(t, forks.IsKnownState(builder.GenesisState().Identifier()))
|
||||
|
||||
// adding state [◄(1) 5]: note that QC is _below_ the pruning threshold, i.e. cannot resolve the parent
|
||||
// * Forks should store state, despite the parent already being pruned
|
||||
// * finalization should not change
|
||||
certStateWithUnknownParent := toCertifiedState(t, states[3])
|
||||
require.Nil(t, forks.AddCertifiedState(certStateWithUnknownParent))
|
||||
require.True(t, forks.IsKnownState(certStateWithUnknownParent.State.Identifier))
|
||||
requireLatestFinalizedState(t, forks, states[0])
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
})
|
||||
}
|
||||
|
||||
// TestDoubleProposal tests that the DoubleProposal notification is emitted when two different
|
||||
// states for the same rank are added. We ingest the following state tree:
|
||||
//
|
||||
// / [◄(1) 2]
|
||||
// [1]
|
||||
// \ [◄(1) 2']
|
||||
//
|
||||
// which should result in a DoubleProposal event referencing the states [◄(1) 2] and [◄(1) 2']
|
||||
func TestDoubleProposal(t *testing.T) {
|
||||
states, err := NewStateBuilder().
|
||||
Add(1, 2). // [◄(1) 2]
|
||||
AddVersioned(1, 2, 0, 1). // [◄(1) 2']
|
||||
States()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
|
||||
forks, notifier := newForks(t)
|
||||
notifier.On("OnDoubleProposeDetected", states[1], states[0]).Once()
|
||||
|
||||
err = addValidatedStateToForks(forks, states)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
|
||||
forks, notifier := newForks(t)
|
||||
notifier.On("OnDoubleProposeDetected", states[1], states[0]).Once()
|
||||
|
||||
err = forks.AddCertifiedState(toCertifiedState(t, states[0])) // add [◄(1) 2] as certified state
|
||||
require.NoError(t, err)
|
||||
err = forks.AddCertifiedState(toCertifiedState(t, states[1])) // add [◄(1) 2'] as certified state
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
// TestConflictingQCs checks that adding 2 conflicting QCs should return models.ByzantineThresholdExceededError
|
||||
// We ingest the following state tree:
|
||||
//
|
||||
// [◄(1) 2] [◄(2) 3] [◄(3) 4] [◄(4) 6]
|
||||
// [◄(2) 3'] [◄(3') 5]
|
||||
//
|
||||
// which should result in a `ByzantineThresholdExceededError`, because conflicting states 3 and 3' both have QCs
|
||||
func TestConflictingQCs(t *testing.T) {
|
||||
states, err := NewStateBuilder().
|
||||
Add(1, 2). // [◄(1) 2]
|
||||
Add(2, 3). // [◄(2) 3]
|
||||
AddVersioned(2, 3, 0, 1). // [◄(2) 3']
|
||||
Add(3, 4). // [◄(3) 4]
|
||||
Add(4, 6). // [◄(4) 6]
|
||||
AddVersioned(3, 5, 1, 0). // [◄(3') 5]
|
||||
States()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
|
||||
forks, notifier := newForks(t)
|
||||
notifier.On("OnDoubleProposeDetected", states[2], states[1]).Return(nil)
|
||||
|
||||
err = addValidatedStateToForks(forks, states)
|
||||
assert.True(t, models.IsByzantineThresholdExceededError(err))
|
||||
})
|
||||
|
||||
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
|
||||
forks, notifier := newForks(t)
|
||||
notifier.On("OnDoubleProposeDetected", states[2], states[1]).Return(nil)
|
||||
|
||||
// As [◄(3') 5] is not certified, it will not be added to Forks. However, its QC ◄(3') is
|
||||
// delivered to Forks as part of the *certified* state [◄(2) 3'].
|
||||
err = addCertifiedStatesToForks(forks, states)
|
||||
assert.True(t, models.IsByzantineThresholdExceededError(err))
|
||||
})
|
||||
}
|
||||
|
||||
// TestConflictingFinalizedForks checks that finalizing 2 conflicting forks should return models.ByzantineThresholdExceededError
|
||||
// We ingest the following state tree:
|
||||
//
|
||||
// [◄(1) 2] [◄(2) 3] [◄(3) 4] [◄(4) 5]
|
||||
// [◄(2) 6] [◄(6) 7] [◄(7) 8]
|
||||
//
|
||||
// Here, both states [◄(2) 3] and [◄(2) 6] satisfy the finalization condition, i.e. we have a fork
|
||||
// in the finalized states, which should result in a models.ByzantineThresholdExceededError exception.
|
||||
func TestConflictingFinalizedForks(t *testing.T) {
|
||||
states, err := NewStateBuilder().
|
||||
Add(1, 2).
|
||||
Add(2, 3).
|
||||
Add(3, 4).
|
||||
Add(4, 5). // finalizes [◄(2) 3]
|
||||
Add(2, 6).
|
||||
Add(6, 7).
|
||||
Add(7, 8). // finalizes [◄(2) 6], conflicting with conflicts with [◄(2) 3]
|
||||
States()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
err = addValidatedStateToForks(forks, states)
|
||||
assert.True(t, models.IsByzantineThresholdExceededError(err))
|
||||
})
|
||||
|
||||
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
err = addCertifiedStatesToForks(forks, states)
|
||||
assert.True(t, models.IsByzantineThresholdExceededError(err))
|
||||
})
|
||||
}
|
||||
|
||||
// TestAddDisconnectedState checks that adding a state which does not connect to the
|
||||
// latest finalized state returns a `models.MissingStateError`
|
||||
// - receives [◄(2) 3]
|
||||
// - should return `models.MissingStateError`, because the parent is above the pruning
|
||||
// threshold, but Forks does not know its parent
|
||||
func TestAddDisconnectedState(t *testing.T) {
|
||||
states, err := NewStateBuilder().
|
||||
Add(1, 2). // we will skip this state [◄(1) 2]
|
||||
Add(2, 3). // [◄(2) 3]
|
||||
States()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
err := forks.AddValidatedState(states[1])
|
||||
require.Error(t, err)
|
||||
assert.True(t, models.IsMissingStateError(err))
|
||||
})
|
||||
|
||||
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
|
||||
forks, _ := newForks(t)
|
||||
err := forks.AddCertifiedState(toCertifiedState(t, states[1]))
|
||||
require.Error(t, err)
|
||||
assert.True(t, models.IsMissingStateError(err))
|
||||
})
|
||||
}
|
||||
|
||||
// TestGetState tests that we can retrieve stored states. Here, we test that
|
||||
// attempting to retrieve nonexistent or pruned states fails without causing an exception.
|
||||
// - Forks receives [◄(1) 2] [◄(2) 3] [◄(3) 4], then [◄(4) 5]
|
||||
// - should finalize [◄(1) 2], then [◄(2) 3]
|
||||
func TestGetState(t *testing.T) {
|
||||
states, err := NewStateBuilder().
|
||||
Add(1, 2). // [◄(1) 2]
|
||||
Add(2, 3). // [◄(2) 3]
|
||||
Add(3, 4). // [◄(3) 4]
|
||||
Add(4, 5). // [◄(4) 5]
|
||||
States()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
|
||||
statesAddedFirst := states[:3] // [◄(1) 2] [◄(2) 3] [◄(3) 4]
|
||||
remainingState := states[3] // [◄(4) 5]
|
||||
forks, _ := newForks(t)
|
||||
|
||||
// should be unable to retrieve a state before it is added
|
||||
_, ok := forks.GetState(states[0].Identifier)
|
||||
assert.False(t, ok)
|
||||
|
||||
// add first 3 states - should finalize [◄(1) 2]
|
||||
err = addValidatedStateToForks(forks, statesAddedFirst)
|
||||
require.NoError(t, err)
|
||||
|
||||
// should be able to retrieve all stored states
|
||||
for _, state := range statesAddedFirst {
|
||||
b, ok := forks.GetState(state.Identifier)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, state, b)
|
||||
}
|
||||
|
||||
// add remaining state [◄(4) 5] - should finalize [◄(2) 3] and prune [◄(1) 2]
|
||||
require.Nil(t, forks.AddValidatedState(remainingState))
|
||||
|
||||
// should be able to retrieve just added state
|
||||
b, ok := forks.GetState(remainingState.Identifier)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, remainingState, b)
|
||||
|
||||
// should be unable to retrieve pruned state
|
||||
_, ok = forks.GetState(statesAddedFirst[0].Identifier)
|
||||
assert.False(t, ok)
|
||||
})
|
||||
|
||||
// Caution: finalization is driven by QCs. Therefore, we include the QC for state 3
|
||||
// in the first batch of states that we add. This is analogous to previous test case,
|
||||
// except that we are delivering the QC ◄(3) as part of the certified state of rank 2
|
||||
// [◄(2) 3] ◄(3)
|
||||
// while in the previous sub-test, the QC ◄(3) was delivered as part of state [◄(3) 4]
|
||||
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
|
||||
statesAddedFirst := toCertifiedStates(t, states[:2]...) // [◄(1) 2] [◄(2) 3] ◄(3)
|
||||
remainingState := toCertifiedState(t, states[2]) // [◄(3) 4] ◄(4)
|
||||
forks, _ := newForks(t)
|
||||
|
||||
// should be unable to retrieve a state before it is added
|
||||
_, ok := forks.GetState(states[0].Identifier)
|
||||
assert.False(t, ok)
|
||||
|
||||
// add first states - should finalize [◄(1) 2]
|
||||
err := forks.AddCertifiedState(statesAddedFirst[0])
|
||||
require.NoError(t, err)
|
||||
err = forks.AddCertifiedState(statesAddedFirst[1])
|
||||
require.NoError(t, err)
|
||||
|
||||
// should be able to retrieve all stored states
|
||||
for _, state := range statesAddedFirst {
|
||||
b, ok := forks.GetState(state.State.Identifier)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, state.State, b)
|
||||
}
|
||||
|
||||
// add remaining state [◄(4) 5] - should finalize [◄(2) 3] and prune [◄(1) 2]
|
||||
require.Nil(t, forks.AddCertifiedState(remainingState))
|
||||
|
||||
// should be able to retrieve just added state
|
||||
b, ok := forks.GetState(remainingState.State.Identifier)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, remainingState.State, b)
|
||||
|
||||
// should be unable to retrieve pruned state
|
||||
_, ok = forks.GetState(statesAddedFirst[0].State.Identifier)
|
||||
assert.False(t, ok)
|
||||
})
|
||||
}
|
||||
|
||||
// TestGetStatesForRank tests retrieving states for a rank (also including double proposals).
|
||||
// - Forks receives [◄(1) 2] [◄(2) 4] [◄(2) 4'],
|
||||
// where [◄(2) 4'] is a double proposal, because it has the same rank as [◄(2) 4]
|
||||
//
|
||||
// Expected behaviour:
|
||||
// - Forks should store all the states
|
||||
// - Forks should emit a `OnDoubleProposeDetected` notification
|
||||
// - we can retrieve all states, including the double proposals
|
||||
func TestGetStatesForRank(t *testing.T) {
|
||||
states, err := NewStateBuilder().
|
||||
Add(1, 2). // [◄(1) 2]
|
||||
Add(2, 4). // [◄(2) 4]
|
||||
AddVersioned(2, 4, 0, 1). // [◄(2) 4']
|
||||
States()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
|
||||
forks, notifier := newForks(t)
|
||||
notifier.On("OnDoubleProposeDetected", states[2], states[1]).Once()
|
||||
|
||||
err = addValidatedStateToForks(forks, states)
|
||||
require.NoError(t, err)
|
||||
|
||||
// expect 1 state at rank 2
|
||||
storedStates := forks.GetStatesForRank(2)
|
||||
assert.Len(t, storedStates, 1)
|
||||
assert.Equal(t, states[0], storedStates[0])
|
||||
|
||||
// expect 2 states at rank 4
|
||||
storedStates = forks.GetStatesForRank(4)
|
||||
assert.Len(t, storedStates, 2)
|
||||
assert.ElementsMatch(t, states[1:], storedStates)
|
||||
|
||||
// expect 0 states at rank 3
|
||||
storedStates = forks.GetStatesForRank(3)
|
||||
assert.Len(t, storedStates, 0)
|
||||
})
|
||||
|
||||
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
|
||||
forks, notifier := newForks(t)
|
||||
notifier.On("OnDoubleProposeDetected", states[2], states[1]).Once()
|
||||
|
||||
err := forks.AddCertifiedState(toCertifiedState(t, states[0]))
|
||||
require.NoError(t, err)
|
||||
err = forks.AddCertifiedState(toCertifiedState(t, states[1]))
|
||||
require.NoError(t, err)
|
||||
err = forks.AddCertifiedState(toCertifiedState(t, states[2]))
|
||||
require.NoError(t, err)
|
||||
|
||||
// expect 1 state at rank 2
|
||||
storedStates := forks.GetStatesForRank(2)
|
||||
assert.Len(t, storedStates, 1)
|
||||
assert.Equal(t, states[0], storedStates[0])
|
||||
|
||||
// expect 2 states at rank 4
|
||||
storedStates = forks.GetStatesForRank(4)
|
||||
assert.Len(t, storedStates, 2)
|
||||
assert.ElementsMatch(t, states[1:], storedStates)
|
||||
|
||||
// expect 0 states at rank 3
|
||||
storedStates = forks.GetStatesForRank(3)
|
||||
assert.Len(t, storedStates, 0)
|
||||
})
|
||||
}
|
||||
|
||||
// TestNotifications tests that Forks emits the expected events:
|
||||
// - Forks receives [◄(1) 2] [◄(2) 3] [◄(3) 4]
|
||||
//
|
||||
// Expected Behaviour:
|
||||
// - Each of the ingested states should result in an `OnStateIncorporated` notification
|
||||
// - Forks should finalize [◄(1) 2], resulting in a `MakeFinal` event and an `OnFinalizedState` event
|
||||
func TestNotifications(t *testing.T) {
|
||||
builder := NewStateBuilder().
|
||||
Add(1, 2).
|
||||
Add(2, 3).
|
||||
Add(3, 4)
|
||||
states, err := builder.States()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
|
||||
notifier := &mocks.Consumer[*helper.TestState, *helper.TestVote]{}
|
||||
// 4 states including the genesis are incorporated
|
||||
notifier.On("OnStateIncorporated", mock.Anything).Return(nil).Times(4)
|
||||
notifier.On("OnFinalizedState", states[0]).Once()
|
||||
finalizationCallback := mocks.NewFinalizer(t)
|
||||
finalizationCallback.On("MakeFinal", states[0].Identifier).Return(nil).Once()
|
||||
|
||||
forks, err := NewForks(builder.GenesisState(), finalizationCallback, notifier)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, addValidatedStateToForks(forks, states))
|
||||
})
|
||||
|
||||
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
|
||||
notifier := &mocks.Consumer[*helper.TestState, *helper.TestVote]{}
|
||||
// 4 states including the genesis are incorporated
|
||||
notifier.On("OnStateIncorporated", mock.Anything).Return(nil).Times(4)
|
||||
notifier.On("OnFinalizedState", states[0]).Once()
|
||||
finalizationCallback := mocks.NewFinalizer(t)
|
||||
finalizationCallback.On("MakeFinal", states[0].Identifier).Return(nil).Once()
|
||||
|
||||
forks, err := NewForks(builder.GenesisState(), finalizationCallback, notifier)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, addCertifiedStatesToForks(forks, states))
|
||||
})
|
||||
}
|
||||
|
||||
// TestFinalizingMultipleStates tests that `OnFinalizedState` notifications are emitted in correct order
|
||||
// when there are multiple states finalized by adding a _single_ state.
|
||||
// - receiving [◄(1) 3] [◄(3) 5] [◄(5) 7] [◄(7) 11] [◄(11) 12] should not finalize any states,
|
||||
// because there is no 2-chain with the first chain link being a _direct_ 1-chain
|
||||
// - adding [◄(12) 22] should finalize up to state [◄(6) 11]
|
||||
//
|
||||
// This test verifies the following expected properties:
|
||||
// 1. Safety under reentrancy:
|
||||
// While Forks is single-threaded, there is still the possibility of reentrancy. Specifically, the
|
||||
// consumers of our finalization events are served by the goroutine executing Forks. It is conceivable
|
||||
// that a consumer might access Forks and query the latest finalization proof. This would be legal, if
|
||||
// the component supplying the goroutine to Forks also consumes the notifications. Therefore, for API
|
||||
// safety, we require forks to _first update_ its `FinalityProof()` before it emits _any_ events.
|
||||
// 2. For each finalized state, `finalizationCallback` event is executed _before_ `OnFinalizedState` notifications.
|
||||
// 3. States are finalized in order of increasing height (without skipping any states).
|
||||
func TestFinalizingMultipleStates(t *testing.T) {
|
||||
builder := NewStateBuilder().
|
||||
Add(1, 3). // index 0: [◄(1) 2]
|
||||
Add(3, 5). // index 1: [◄(2) 4]
|
||||
Add(5, 7). // index 2: [◄(4) 6]
|
||||
Add(7, 11). // index 3: [◄(6) 11] -- expected to be finalized
|
||||
Add(11, 12). // index 4: [◄(11) 12]
|
||||
Add(12, 22) // index 5: [◄(12) 22]
|
||||
states, err := builder.States()
|
||||
require.NoError(t, err)
|
||||
|
||||
// The Finality Proof should right away point to the _latest_ finalized state. Subsequently emitting
|
||||
// Finalization events for lower states is fine, because notifications are guaranteed to be
|
||||
// _eventually_ arriving. I.e. consumers expect notifications / events to be potentially lagging behind.
|
||||
expectedFinalityProof := makeFinalityProof(t, states[3], states[4], states[5].ParentQuorumCertificate)
|
||||
|
||||
setupForksAndAssertions := func() (*Forks[*helper.TestState, *helper.TestVote], *mocks.Finalizer, *mocks.Consumer[*helper.TestState, *helper.TestVote]) {
|
||||
// initialize Forks with custom event consumers so we can check order of emitted events
|
||||
notifier := &mocks.Consumer[*helper.TestState, *helper.TestVote]{}
|
||||
finalizationCallback := mocks.NewFinalizer(t)
|
||||
notifier.On("OnStateIncorporated", mock.Anything).Return(nil)
|
||||
forks, err := NewForks(builder.GenesisState(), finalizationCallback, notifier)
|
||||
require.NoError(t, err)
|
||||
|
||||
// expecting finalization of [◄(1) 2] [◄(2) 4] [◄(4) 6] [◄(6) 11] in this order
|
||||
statesAwaitingFinalization := toStateAwaitingFinalization(states[:4])
|
||||
|
||||
finalizationCallback.On("MakeFinal", mock.Anything).Run(func(args mock.Arguments) {
|
||||
requireFinalityProof(t, forks, expectedFinalityProof) // Requirement 1: forks should _first update_ its `FinalityProof()` before it emits _any_ events
|
||||
|
||||
// Requirement 3: finalized in order of increasing height (without skipping any states).
|
||||
expectedNextFinalizationEvents := statesAwaitingFinalization[0]
|
||||
require.Equal(t, expectedNextFinalizationEvents.State.Identifier, args[0])
|
||||
|
||||
// Requirement 2: finalized state, `finalizationCallback` event is executed _before_ `OnFinalizedState` notifications.
|
||||
// no duplication of events under normal operations expected
|
||||
require.False(t, expectedNextFinalizationEvents.MakeFinalCalled)
|
||||
require.False(t, expectedNextFinalizationEvents.OnFinalizedStateEmitted)
|
||||
expectedNextFinalizationEvents.MakeFinalCalled = true
|
||||
}).Return(nil).Times(4)
|
||||
|
||||
notifier.On("OnFinalizedState", mock.Anything).Run(func(args mock.Arguments) {
|
||||
requireFinalityProof(t, forks, expectedFinalityProof) // Requirement 1: forks should _first update_ its `FinalityProof()` before it emits _any_ events
|
||||
|
||||
// Requirement 3: finalized in order of increasing height (without skipping any states).
|
||||
expectedNextFinalizationEvents := statesAwaitingFinalization[0]
|
||||
require.Equal(t, expectedNextFinalizationEvents.State, args[0])
|
||||
|
||||
// Requirement 2: finalized state, `finalizationCallback` event is executed _before_ `OnFinalizedState` notifications.
|
||||
// no duplication of events under normal operations expected
|
||||
require.True(t, expectedNextFinalizationEvents.MakeFinalCalled)
|
||||
require.False(t, expectedNextFinalizationEvents.OnFinalizedStateEmitted)
|
||||
expectedNextFinalizationEvents.OnFinalizedStateEmitted = true
|
||||
|
||||
// At this point, `MakeFinal` and `OnFinalizedState` have both been emitted for the state, so we are done with it
|
||||
statesAwaitingFinalization = statesAwaitingFinalization[1:]
|
||||
}).Times(4)
|
||||
|
||||
return forks, finalizationCallback, notifier
|
||||
}
|
||||
|
||||
t.Run("consensus participant mode: ingest validated states", func(t *testing.T) {
|
||||
forks, finalizationCallback, notifier := setupForksAndAssertions()
|
||||
err = addValidatedStateToForks(forks, states[:5]) // adding [◄(1) 2] [◄(2) 4] [◄(4) 6] [◄(6) 11] [◄(11) 12]
|
||||
require.NoError(t, err)
|
||||
requireOnlyGenesisStateFinalized(t, forks) // finalization should still be at the genesis state
|
||||
|
||||
require.NoError(t, forks.AddValidatedState(states[5])) // adding [◄(12) 22] should trigger finalization events
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
finalizationCallback.AssertExpectations(t)
|
||||
notifier.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("consensus follower mode: ingest certified states", func(t *testing.T) {
|
||||
forks, finalizationCallback, notifier := setupForksAndAssertions()
|
||||
// adding [◄(1) 2] [◄(2) 4] [◄(4) 6] [◄(6) 11] ◄(11)
|
||||
require.NoError(t, forks.AddCertifiedState(toCertifiedState(t, states[0])))
|
||||
require.NoError(t, forks.AddCertifiedState(toCertifiedState(t, states[1])))
|
||||
require.NoError(t, forks.AddCertifiedState(toCertifiedState(t, states[2])))
|
||||
require.NoError(t, forks.AddCertifiedState(toCertifiedState(t, states[3])))
|
||||
require.NoError(t, err)
|
||||
requireOnlyGenesisStateFinalized(t, forks) // finalization should still be at the genesis state
|
||||
|
||||
// adding certified state [◄(11) 12] ◄(12) should trigger finalization events
|
||||
require.NoError(t, forks.AddCertifiedState(toCertifiedState(t, states[4])))
|
||||
requireFinalityProof(t, forks, expectedFinalityProof)
|
||||
finalizationCallback.AssertExpectations(t)
|
||||
notifier.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
//* ************************************* internal functions ************************************* */
|
||||
|
||||
func newForks(t *testing.T) (*Forks[*helper.TestState, *helper.TestVote], *mocks.Consumer[*helper.TestState, *helper.TestVote]) {
|
||||
notifier := mocks.NewConsumer[*helper.TestState, *helper.TestVote](t)
|
||||
notifier.On("OnStateIncorporated", mock.Anything).Return(nil).Maybe()
|
||||
notifier.On("OnFinalizedState", mock.Anything).Maybe()
|
||||
finalizationCallback := mocks.NewFinalizer(t)
|
||||
finalizationCallback.On("MakeFinal", mock.Anything).Return(nil).Maybe()
|
||||
|
||||
genesisBQ := makeGenesis()
|
||||
|
||||
forks, err := NewForks(genesisBQ, finalizationCallback, notifier)
|
||||
|
||||
require.NoError(t, err)
|
||||
return forks, notifier
|
||||
}
|
||||
|
||||
// addValidatedStateToForks adds all the given states to Forks, in order.
|
||||
// If any errors occur, returns the first one.
|
||||
func addValidatedStateToForks(forks *Forks[*helper.TestState, *helper.TestVote], states []*models.State[*helper.TestState]) error {
|
||||
for _, state := range states {
|
||||
err := forks.AddValidatedState(state)
|
||||
if err != nil {
|
||||
return fmt.Errorf("test failed to add state for rank %d: %w", state.Rank, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// addCertifiedStatesToForks iterates over all states, caches them locally in a map,
|
||||
// constructs certified states whenever possible and adds the certified states to forks,
|
||||
// Note: if states is a single fork, the _last state_ in the slice will not be added,
|
||||
//
|
||||
// because there is no qc for it
|
||||
//
|
||||
// If any errors occur, returns the first one.
|
||||
func addCertifiedStatesToForks(forks *Forks[*helper.TestState, *helper.TestVote], states []*models.State[*helper.TestState]) error {
|
||||
uncertifiedStates := make(map[models.Identity]*models.State[*helper.TestState])
|
||||
for _, b := range states {
|
||||
uncertifiedStates[b.Identifier] = b
|
||||
parentID := b.ParentQuorumCertificate.GetSelector()
|
||||
parent, found := uncertifiedStates[parentID]
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
delete(uncertifiedStates, parentID)
|
||||
|
||||
certParent, err := models.NewCertifiedState(parent, b.ParentQuorumCertificate)
|
||||
if err != nil {
|
||||
return fmt.Errorf("test failed to creat certified state for rank %d: %w", certParent.State.Rank, err)
|
||||
}
|
||||
err = forks.AddCertifiedState(certParent)
|
||||
if err != nil {
|
||||
return fmt.Errorf("test failed to add certified state for rank %d: %w", certParent.State.Rank, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// requireLatestFinalizedState asserts that the latest finalized state has the given rank and qc rank.
|
||||
func requireLatestFinalizedState(t *testing.T, forks *Forks[*helper.TestState, *helper.TestVote], expectedFinalized *models.State[*helper.TestState]) {
|
||||
require.Equal(t, expectedFinalized, forks.FinalizedState(), "finalized state is not as expected")
|
||||
require.Equal(t, forks.FinalizedRank(), expectedFinalized.Rank, "FinalizedRank returned wrong value")
|
||||
}
|
||||
|
||||
// requireOnlyGenesisStateFinalized asserts that no states have been finalized beyond the genesis state.
|
||||
// Caution: does not inspect output of `forks.FinalityProof()`
|
||||
func requireOnlyGenesisStateFinalized(t *testing.T, forks *Forks[*helper.TestState, *helper.TestVote]) {
|
||||
genesis := makeGenesis()
|
||||
require.Equal(t, forks.FinalizedState(), genesis.State, "finalized state is not the genesis state")
|
||||
require.Equal(t, forks.FinalizedState().Rank, genesis.State.Rank)
|
||||
require.Equal(t, forks.FinalizedState().Rank, genesis.CertifyingQuorumCertificate.GetRank())
|
||||
require.Equal(t, forks.FinalizedRank(), genesis.State.Rank, "finalized state has wrong qc")
|
||||
|
||||
finalityProof, isKnown := forks.FinalityProof()
|
||||
require.Nil(t, finalityProof, "expecting finality proof to be nil for genesis state at initialization")
|
||||
require.False(t, isKnown, "no finality proof should be known for genesis state at initialization")
|
||||
}
|
||||
|
||||
// requireNoStatesFinalized asserts that no states have been finalized (genesis is latest finalized state).
|
||||
func requireFinalityProof(t *testing.T, forks *Forks[*helper.TestState, *helper.TestVote], expectedFinalityProof *consensus.FinalityProof[*helper.TestState]) {
|
||||
finalityProof, isKnown := forks.FinalityProof()
|
||||
require.True(t, isKnown)
|
||||
require.Equal(t, expectedFinalityProof, finalityProof)
|
||||
require.Equal(t, forks.FinalizedState(), expectedFinalityProof.State)
|
||||
require.Equal(t, forks.FinalizedRank(), expectedFinalityProof.State.Rank)
|
||||
}
|
||||
|
||||
// toCertifiedState generates a QC for the given state and returns their combination as a certified state
|
||||
func toCertifiedState(t *testing.T, state *models.State[*helper.TestState]) *models.CertifiedState[*helper.TestState] {
|
||||
qc := &helper.TestQuorumCertificate{
|
||||
Rank: state.Rank,
|
||||
Selector: state.Identifier,
|
||||
}
|
||||
cb, err := models.NewCertifiedState(state, qc)
|
||||
require.NoError(t, err)
|
||||
return cb
|
||||
}
|
||||
|
||||
// toCertifiedStates generates a QC for the given state and returns their combination as a certified states
|
||||
func toCertifiedStates(t *testing.T, states ...*models.State[*helper.TestState]) []*models.CertifiedState[*helper.TestState] {
|
||||
certStates := make([]*models.CertifiedState[*helper.TestState], 0, len(states))
|
||||
for _, b := range states {
|
||||
certStates = append(certStates, toCertifiedState(t, b))
|
||||
}
|
||||
return certStates
|
||||
}
|
||||
|
||||
func makeFinalityProof(t *testing.T, state *models.State[*helper.TestState], directChild *models.State[*helper.TestState], qcCertifyingChild models.QuorumCertificate) *consensus.FinalityProof[*helper.TestState] {
|
||||
c, err := models.NewCertifiedState(directChild, qcCertifyingChild) // certified child of FinalizedState
|
||||
require.NoError(t, err)
|
||||
return &consensus.FinalityProof[*helper.TestState]{State: state, CertifiedChild: c}
|
||||
}
|
||||
|
||||
// stateAwaitingFinalization is intended for tracking finalization events and their order for a specific state
|
||||
type stateAwaitingFinalization struct {
|
||||
State *models.State[*helper.TestState]
|
||||
MakeFinalCalled bool // indicates whether `Finalizer.MakeFinal` was called
|
||||
OnFinalizedStateEmitted bool // indicates whether `OnFinalizedStateCalled` notification was emitted
|
||||
}
|
||||
|
||||
// toStateAwaitingFinalization creates a `stateAwaitingFinalization` tracker for each input state
|
||||
func toStateAwaitingFinalization(states []*models.State[*helper.TestState]) []*stateAwaitingFinalization {
|
||||
trackers := make([]*stateAwaitingFinalization, 0, len(states))
|
||||
for _, b := range states {
|
||||
tracker := &stateAwaitingFinalization{b, false, false}
|
||||
trackers = append(trackers, tracker)
|
||||
}
|
||||
return trackers
|
||||
}
|
||||
165
consensus/forks/state_builder_test.go
Normal file
165
consensus/forks/state_builder_test.go
Normal file
@ -0,0 +1,165 @@
|
||||
package forks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
|
||||
)
|
||||
|
||||
// StateRank specifies the data to create a state
|
||||
type StateRank struct {
|
||||
// Rank is the rank of the state to be created
|
||||
Rank uint64
|
||||
// StateVersion is the version of the state for that rank.
|
||||
// Useful for creating conflicting states at the same rank.
|
||||
StateVersion int
|
||||
// QCRank is the rank of the QC embedded in this state (also: the rank of the state's parent)
|
||||
QCRank uint64
|
||||
// QCVersion is the version of the QC for that rank.
|
||||
QCVersion int
|
||||
}
|
||||
|
||||
// QCIndex returns a unique identifier for the state's QC.
|
||||
func (bv *StateRank) QCIndex() string {
|
||||
return fmt.Sprintf("%v-%v", bv.QCRank, bv.QCVersion)
|
||||
}
|
||||
|
||||
// StateIndex returns a unique identifier for the state.
|
||||
func (bv *StateRank) StateIndex() string {
|
||||
return fmt.Sprintf("%v-%v", bv.Rank, bv.StateVersion)
|
||||
}
|
||||
|
||||
// StateBuilder is a test utility for creating state structure fixtures.
|
||||
type StateBuilder struct {
|
||||
stateRanks []*StateRank
|
||||
}
|
||||
|
||||
func NewStateBuilder() *StateBuilder {
|
||||
return &StateBuilder{
|
||||
stateRanks: make([]*StateRank, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a state with the given qcRank and stateRank. Returns self-reference for chaining.
|
||||
func (bb *StateBuilder) Add(qcRank uint64, stateRank uint64) *StateBuilder {
|
||||
bb.stateRanks = append(bb.stateRanks, &StateRank{
|
||||
Rank: stateRank,
|
||||
QCRank: qcRank,
|
||||
})
|
||||
return bb
|
||||
}
|
||||
|
||||
// GenesisState returns the genesis state, which is always finalized.
|
||||
func (bb *StateBuilder) GenesisState() *models.CertifiedState[*helper.TestState] {
|
||||
return makeGenesis()
|
||||
}
|
||||
|
||||
// AddVersioned adds a state with the given qcRank and stateRank.
|
||||
// In addition, the version identifier of the QC embedded within the state
|
||||
// is specified by `qcVersion`. The version identifier for the state itself
|
||||
// (primarily for emulating different state ID) is specified by `stateVersion`.
|
||||
// [(◄3) 4] denotes a state of rank 4, with a qc for rank 3
|
||||
// [(◄3) 4'] denotes a state of rank 4 that is different than [(◄3) 4], with a qc for rank 3
|
||||
// [(◄3) 4'] can be created by AddVersioned(3, 4, 0, 1)
|
||||
// [(◄3') 4] can be created by AddVersioned(3, 4, 1, 0)
|
||||
// Returns self-reference for chaining.
|
||||
func (bb *StateBuilder) AddVersioned(qcRank uint64, stateRank uint64, qcVersion int, stateVersion int) *StateBuilder {
|
||||
bb.stateRanks = append(bb.stateRanks, &StateRank{
|
||||
Rank: stateRank,
|
||||
QCRank: qcRank,
|
||||
StateVersion: stateVersion,
|
||||
QCVersion: qcVersion,
|
||||
})
|
||||
return bb
|
||||
}
|
||||
|
||||
// Proposals returns a list of all proposals added to the StateBuilder.
|
||||
// Returns an error if the states do not form a connected tree rooted at genesis.
|
||||
func (bb *StateBuilder) Proposals() ([]*models.Proposal[*helper.TestState], error) {
|
||||
states := make([]*models.Proposal[*helper.TestState], 0, len(bb.stateRanks))
|
||||
|
||||
genesisState := makeGenesis()
|
||||
genesisBV := &StateRank{
|
||||
Rank: genesisState.State.Rank,
|
||||
QCRank: genesisState.CertifyingQuorumCertificate.GetRank(),
|
||||
}
|
||||
|
||||
qcs := make(map[string]models.QuorumCertificate)
|
||||
qcs[genesisBV.QCIndex()] = genesisState.CertifyingQuorumCertificate
|
||||
|
||||
for _, bv := range bb.stateRanks {
|
||||
qc, ok := qcs[bv.QCIndex()]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("test fail: no qc found for qc index: %v", bv.QCIndex())
|
||||
}
|
||||
var previousRankTimeoutCert models.TimeoutCertificate
|
||||
if qc.GetRank()+1 != bv.Rank {
|
||||
previousRankTimeoutCert = helper.MakeTC(helper.WithTCRank(bv.Rank - 1))
|
||||
}
|
||||
proposal := &models.Proposal[*helper.TestState]{
|
||||
State: &models.State[*helper.TestState]{
|
||||
Rank: bv.Rank,
|
||||
ParentQuorumCertificate: qc,
|
||||
},
|
||||
PreviousRankTimeoutCertificate: previousRankTimeoutCert,
|
||||
}
|
||||
proposal.State.Identifier = makeIdentifier(proposal.State, bv.StateVersion)
|
||||
|
||||
states = append(states, proposal)
|
||||
|
||||
// generate QC for the new proposal
|
||||
qcs[bv.StateIndex()] = &helper.TestQuorumCertificate{
|
||||
Rank: proposal.State.Rank,
|
||||
Selector: proposal.State.Identifier,
|
||||
AggregatedSignature: nil,
|
||||
}
|
||||
}
|
||||
|
||||
return states, nil
|
||||
}
|
||||
|
||||
// States returns a list of all states added to the StateBuilder.
|
||||
// Returns an error if the states do not form a connected tree rooted at genesis.
|
||||
func (bb *StateBuilder) States() ([]*models.State[*helper.TestState], error) {
|
||||
proposals, err := bb.Proposals()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("StateBuilder failed to generate proposals: %w", err)
|
||||
}
|
||||
return toStates(proposals), nil
|
||||
}
|
||||
|
||||
// makeIdentifier creates a state identifier based on the state's rank, QC, and state version.
|
||||
// This is used to identify states uniquely, in this specific test setup.
|
||||
// ATTENTION: this should not be confused with the state ID used in production code which is a collision-resistant hash
|
||||
// of the full state content.
|
||||
func makeIdentifier(state *models.State[*helper.TestState], stateVersion int) models.Identity {
|
||||
return fmt.Sprintf("%d-%s-%d", state.Rank, state.Identifier, stateVersion)
|
||||
}
|
||||
|
||||
// constructs the genesis state (identical for all calls)
|
||||
func makeGenesis() *models.CertifiedState[*helper.TestState] {
|
||||
genesis := &models.State[*helper.TestState]{
|
||||
Rank: 1,
|
||||
}
|
||||
genesis.Identifier = makeIdentifier(genesis, 0)
|
||||
|
||||
genesisQC := &helper.TestQuorumCertificate{
|
||||
Rank: 1,
|
||||
Selector: genesis.Identifier,
|
||||
}
|
||||
certifiedGenesisState, err := models.NewCertifiedState(genesis, genesisQC)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("combining genesis state and genensis QC to certified state failed: %s", err.Error()))
|
||||
}
|
||||
return certifiedGenesisState
|
||||
}
|
||||
|
||||
// toStates converts the given proposals to slice of states
|
||||
func toStates(proposals []*models.Proposal[*helper.TestState]) []*models.State[*helper.TestState] {
|
||||
states := make([]*models.State[*helper.TestState], 0, len(proposals))
|
||||
for _, b := range proposals {
|
||||
states = append(states, b.State)
|
||||
}
|
||||
return states
|
||||
}
|
||||
@ -1,9 +1,8 @@
|
||||
module source.quilibrium.com/quilibrium/monorepo/consensus
|
||||
|
||||
go 1.23.2
|
||||
|
||||
toolchain go1.23.4
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.9
|
||||
|
||||
replace github.com/multiformats/go-multiaddr => ../go-multiaddr
|
||||
|
||||
@ -13,36 +12,27 @@ replace github.com/libp2p/go-libp2p => ../go-libp2p
|
||||
|
||||
replace github.com/libp2p/go-libp2p-kad-dht => ../go-libp2p-kad-dht
|
||||
|
||||
require go.uber.org/zap v1.27.0
|
||||
require (
|
||||
github.com/gammazero/workerpool v1.1.3
|
||||
github.com/rs/zerolog v1.34.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/gammazero/deque v0.2.0 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
go.uber.org/goleak v1.3.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/cloudflare/circl v1.6.1 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.17 // indirect
|
||||
github.com/ipfs/go-cid v0.5.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||
github.com/multiformats/go-multiaddr v0.16.1 // indirect
|
||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||
github.com/multiformats/go-multicodec v0.9.1 // indirect
|
||||
github.com/multiformats/go-multihash v0.2.3 // indirect
|
||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.39.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
|
||||
golang.org/x/net v0.41.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/text v0.26.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect
|
||||
google.golang.org/grpc v1.72.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
lukechampine.com/blake3 v1.4.1 // indirect
|
||||
github.com/pkg/errors v0.9.1
|
||||
)
|
||||
github.com/stretchr/testify v1.11.1
|
||||
go.uber.org/atomic v1.11.0
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
)
|
||||
|
||||
117
consensus/go.sum
117
consensus/go.sum
@ -1,92 +1,49 @@
|
||||
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
|
||||
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
|
||||
github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY=
|
||||
github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
|
||||
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
||||
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
||||
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
|
||||
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
|
||||
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
|
||||
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
|
||||
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
|
||||
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
|
||||
github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=
|
||||
github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
|
||||
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
|
||||
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
|
||||
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
|
||||
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
|
||||
github.com/gammazero/deque v0.2.0 h1:SkieyNB4bg2/uZZLxvya0Pq6diUlwx7m2TeT7GAIWaA=
|
||||
github.com/gammazero/deque v0.2.0/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU=
|
||||
github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q=
|
||||
github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
|
||||
go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
|
||||
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
|
||||
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
|
||||
go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
|
||||
go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
|
||||
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
|
||||
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
|
||||
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
|
||||
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
|
||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
|
||||
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
|
||||
google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM=
|
||||
google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
|
||||
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
|
||||
|
||||
@ -3,6 +3,7 @@ package helper
|
||||
import (
|
||||
"bytes"
|
||||
crand "crypto/rand"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
@ -100,15 +101,15 @@ func MakeQC(options ...func(*TestQuorumCertificate)) models.QuorumCertificate {
|
||||
return qc
|
||||
}
|
||||
|
||||
func WithQCState[StateT models.Unique](state *models.State[StateT]) func(TestQuorumCertificate) {
|
||||
return func(qc TestQuorumCertificate) {
|
||||
func WithQCState[StateT models.Unique](state *models.State[StateT]) func(*TestQuorumCertificate) {
|
||||
return func(qc *TestQuorumCertificate) {
|
||||
qc.Rank = state.Rank
|
||||
qc.Selector = state.Identifier
|
||||
}
|
||||
}
|
||||
|
||||
func WithQCSigners(signerIndices []byte) func(TestQuorumCertificate) {
|
||||
return func(qc TestQuorumCertificate) {
|
||||
func WithQCSigners(signerIndices []byte) func(*TestQuorumCertificate) {
|
||||
return func(qc *TestQuorumCertificate) {
|
||||
qc.AggregatedSignature.(*TestAggregatedSignature).Bitmask = signerIndices
|
||||
}
|
||||
}
|
||||
@ -116,5 +117,6 @@ func WithQCSigners(signerIndices []byte) func(TestQuorumCertificate) {
|
||||
func WithQCRank(rank uint64) func(*TestQuorumCertificate) {
|
||||
return func(qc *TestQuorumCertificate) {
|
||||
qc.Rank = rank
|
||||
qc.Selector = fmt.Sprintf("%d", rank)
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,12 +2,202 @@ package helper
|
||||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
|
||||
)
|
||||
|
||||
type TestWeightedIdentity struct {
|
||||
ID string
|
||||
}
|
||||
|
||||
// Identity implements models.WeightedIdentity.
|
||||
func (t *TestWeightedIdentity) Identity() models.Identity {
|
||||
return t.ID
|
||||
}
|
||||
|
||||
// PublicKey implements models.WeightedIdentity.
|
||||
func (t *TestWeightedIdentity) PublicKey() []byte {
|
||||
return make([]byte, 585)
|
||||
}
|
||||
|
||||
// Weight implements models.WeightedIdentity.
|
||||
func (t *TestWeightedIdentity) Weight() uint64 {
|
||||
return 1000
|
||||
}
|
||||
|
||||
var _ models.WeightedIdentity = (*TestWeightedIdentity)(nil)
|
||||
|
||||
type TestState struct {
|
||||
Rank uint64
|
||||
Signature []byte
|
||||
Timestamp uint64
|
||||
ID models.Identity
|
||||
Prover models.Identity
|
||||
}
|
||||
|
||||
// Clone implements models.Unique.
|
||||
func (t *TestState) Clone() models.Unique {
|
||||
return &TestState{
|
||||
Rank: t.Rank,
|
||||
Signature: slices.Clone(t.Signature),
|
||||
Timestamp: t.Timestamp,
|
||||
ID: t.ID,
|
||||
Prover: t.Prover,
|
||||
}
|
||||
}
|
||||
|
||||
// GetRank implements models.Unique.
|
||||
func (t *TestState) GetRank() uint64 {
|
||||
return t.Rank
|
||||
}
|
||||
|
||||
// GetSignature implements models.Unique.
|
||||
func (t *TestState) GetSignature() []byte {
|
||||
return t.Signature
|
||||
}
|
||||
|
||||
// GetTimestamp implements models.Unique.
|
||||
func (t *TestState) GetTimestamp() uint64 {
|
||||
return t.Timestamp
|
||||
}
|
||||
|
||||
// Identity implements models.Unique.
|
||||
func (t *TestState) Identity() models.Identity {
|
||||
return t.ID
|
||||
}
|
||||
|
||||
// Source implements models.Unique.
|
||||
func (t *TestState) Source() models.Identity {
|
||||
return t.Prover
|
||||
}
|
||||
|
||||
type TestVote struct {
|
||||
Rank uint64
|
||||
Signature []byte
|
||||
Timestamp uint64
|
||||
ID models.Identity
|
||||
StateID models.Identity
|
||||
}
|
||||
|
||||
// Clone implements models.Unique.
|
||||
func (t *TestVote) Clone() models.Unique {
|
||||
return &TestVote{
|
||||
Rank: t.Rank,
|
||||
Signature: slices.Clone(t.Signature),
|
||||
Timestamp: t.Timestamp,
|
||||
ID: t.ID,
|
||||
StateID: t.StateID,
|
||||
}
|
||||
}
|
||||
|
||||
// GetRank implements models.Unique.
|
||||
func (t *TestVote) GetRank() uint64 {
|
||||
return t.Rank
|
||||
}
|
||||
|
||||
// GetSignature implements models.Unique.
|
||||
func (t *TestVote) GetSignature() []byte {
|
||||
return t.Signature
|
||||
}
|
||||
|
||||
// GetTimestamp implements models.Unique.
|
||||
func (t *TestVote) GetTimestamp() uint64 {
|
||||
return t.Timestamp
|
||||
}
|
||||
|
||||
// Identity implements models.Unique.
|
||||
func (t *TestVote) Identity() models.Identity {
|
||||
return t.ID
|
||||
}
|
||||
|
||||
// Source implements models.Unique.
|
||||
func (t *TestVote) Source() models.Identity {
|
||||
return t.StateID
|
||||
}
|
||||
|
||||
type TestPeer struct {
|
||||
PeerID string
|
||||
}
|
||||
|
||||
// Clone implements models.Unique.
|
||||
func (t *TestPeer) Clone() models.Unique {
|
||||
return &TestPeer{
|
||||
PeerID: t.PeerID,
|
||||
}
|
||||
}
|
||||
|
||||
// GetRank implements models.Unique.
|
||||
func (t *TestPeer) GetRank() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// GetSignature implements models.Unique.
|
||||
func (t *TestPeer) GetSignature() []byte {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
// GetTimestamp implements models.Unique.
|
||||
func (t *TestPeer) GetTimestamp() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Identity implements models.Unique.
|
||||
func (t *TestPeer) Identity() models.Identity {
|
||||
return t.PeerID
|
||||
}
|
||||
|
||||
// Source implements models.Unique.
|
||||
func (t *TestPeer) Source() models.Identity {
|
||||
return t.PeerID
|
||||
}
|
||||
|
||||
type TestCollected struct {
|
||||
Rank uint64
|
||||
TXs [][]byte
|
||||
}
|
||||
|
||||
// Clone implements models.Unique.
|
||||
func (t *TestCollected) Clone() models.Unique {
|
||||
return &TestCollected{
|
||||
Rank: t.Rank,
|
||||
TXs: slices.Clone(t.TXs),
|
||||
}
|
||||
}
|
||||
|
||||
// GetRank implements models.Unique.
|
||||
func (t *TestCollected) GetRank() uint64 {
|
||||
return t.Rank
|
||||
}
|
||||
|
||||
// GetSignature implements models.Unique.
|
||||
func (t *TestCollected) GetSignature() []byte {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
// GetTimestamp implements models.Unique.
|
||||
func (t *TestCollected) GetTimestamp() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Identity implements models.Unique.
|
||||
func (t *TestCollected) Identity() models.Identity {
|
||||
return fmt.Sprintf("%d", t.Rank)
|
||||
}
|
||||
|
||||
// Source implements models.Unique.
|
||||
func (t *TestCollected) Source() models.Identity {
|
||||
return ""
|
||||
}
|
||||
|
||||
var _ models.Unique = (*TestState)(nil)
|
||||
var _ models.Unique = (*TestVote)(nil)
|
||||
var _ models.Unique = (*TestPeer)(nil)
|
||||
var _ models.Unique = (*TestCollected)(nil)
|
||||
|
||||
func MakeIdentity() models.Identity {
|
||||
s := make([]byte, 32)
|
||||
crand.Read(s)
|
||||
@ -110,3 +300,43 @@ func WithPreviousRankTimeoutCertificate[StateT models.Unique](previousRankTimeou
|
||||
proposal.PreviousRankTimeoutCertificate = previousRankTimeoutCert
|
||||
}
|
||||
}
|
||||
|
||||
func WithWeightedIdentityList(count int) []models.WeightedIdentity {
|
||||
wi := []models.WeightedIdentity{}
|
||||
for i := range count {
|
||||
wi = append(wi, &TestWeightedIdentity{
|
||||
ID: fmt.Sprintf("%d", i),
|
||||
})
|
||||
}
|
||||
return wi
|
||||
}
|
||||
|
||||
func VoteForStateFixture[StateT models.Unique, VoteT models.Unique](state *models.State[StateT], ops ...func(vote *VoteT)) VoteT {
|
||||
v := new(VoteT)
|
||||
for _, op := range ops {
|
||||
op(v)
|
||||
}
|
||||
return *v
|
||||
}
|
||||
|
||||
func VoteFixture[VoteT models.Unique](op func(vote *VoteT)) VoteT {
|
||||
v := new(VoteT)
|
||||
op(v)
|
||||
return *v
|
||||
}
|
||||
|
||||
type FmtLog struct{}
|
||||
|
||||
// Error implements consensus.TraceLogger.
|
||||
func (n *FmtLog) Error(message string, err error) {
|
||||
fmt.Printf("ERROR: %s: %v\n", message, err)
|
||||
}
|
||||
|
||||
// Trace implements consensus.TraceLogger.
|
||||
func (n *FmtLog) Trace(message string) {
|
||||
fmt.Printf("TRACE: %s\n", message)
|
||||
}
|
||||
|
||||
func Logger() *FmtLog {
|
||||
return &FmtLog{}
|
||||
}
|
||||
|
||||
@ -125,9 +125,21 @@ func TimeoutStateFixture[VoteT models.Unique](
|
||||
opt(timeout)
|
||||
}
|
||||
|
||||
if timeout.Vote == nil {
|
||||
panic("WithTimeoutVote must be called")
|
||||
}
|
||||
|
||||
return timeout
|
||||
}
|
||||
|
||||
func WithTimeoutVote[VoteT models.Unique](
|
||||
vote VoteT,
|
||||
) func(*models.TimeoutState[VoteT]) {
|
||||
return func(state *models.TimeoutState[VoteT]) {
|
||||
state.Vote = &vote
|
||||
}
|
||||
}
|
||||
|
||||
func WithTimeoutNewestQC[VoteT models.Unique](
|
||||
newestQC models.QuorumCertificate,
|
||||
) func(*models.TimeoutState[VoteT]) {
|
||||
@ -149,5 +161,11 @@ func WithTimeoutStateRank[VoteT models.Unique](
|
||||
) func(*models.TimeoutState[VoteT]) {
|
||||
return func(timeout *models.TimeoutState[VoteT]) {
|
||||
timeout.Rank = rank
|
||||
if timeout.LatestQuorumCertificate != nil {
|
||||
timeout.LatestQuorumCertificate.(*TestQuorumCertificate).Rank = rank
|
||||
}
|
||||
if timeout.PriorRankTimeoutCertificate != nil {
|
||||
timeout.PriorRankTimeoutCertificate.(*TestTimeoutCertificate).Rank = rank - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -103,23 +103,23 @@ func (_m *DynamicCommittee) IdentityByRank(rank uint64, participantID models.Ide
|
||||
}
|
||||
|
||||
// IdentityByState provides a mock function with given fields: stateID, participantID
|
||||
func (_m *DynamicCommittee) IdentityByState(stateID models.Identity, participantID models.Identity) (*models.WeightedIdentity, error) {
|
||||
func (_m *DynamicCommittee) IdentityByState(stateID models.Identity, participantID models.Identity) (models.WeightedIdentity, error) {
|
||||
ret := _m.Called(stateID, participantID)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for IdentityByState")
|
||||
}
|
||||
|
||||
var r0 *models.WeightedIdentity
|
||||
var r0 models.WeightedIdentity
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(models.Identity, models.Identity) (*models.WeightedIdentity, error)); ok {
|
||||
if rf, ok := ret.Get(0).(func(models.Identity, models.Identity) (models.WeightedIdentity, error)); ok {
|
||||
return rf(stateID, participantID)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(models.Identity, models.Identity) *models.WeightedIdentity); ok {
|
||||
if rf, ok := ret.Get(0).(func(models.Identity, models.Identity) models.WeightedIdentity); ok {
|
||||
r0 = rf(stateID, participantID)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*models.WeightedIdentity)
|
||||
r0 = ret.Get(0).(models.WeightedIdentity)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -12,9 +12,9 @@ type Signer[StateT models.Unique, VoteT models.Unique] struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// CreateTimeout provides a mock function with given fields: curView, newestQC, previousRankTimeoutCert
|
||||
func (_m *Signer[StateT, VoteT]) CreateTimeout(curView uint64, newestQC models.QuorumCertificate, previousRankTimeoutCert models.TimeoutCertificate) (*models.TimeoutState[VoteT], error) {
|
||||
ret := _m.Called(curView, newestQC, previousRankTimeoutCert)
|
||||
// CreateTimeout provides a mock function with given fields: curRank, newestQC, previousRankTimeoutCert
|
||||
func (_m *Signer[StateT, VoteT]) CreateTimeout(curRank uint64, newestQC models.QuorumCertificate, previousRankTimeoutCert models.TimeoutCertificate) (*models.TimeoutState[VoteT], error) {
|
||||
ret := _m.Called(curRank, newestQC, previousRankTimeoutCert)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for CreateTimeout")
|
||||
@ -23,10 +23,10 @@ func (_m *Signer[StateT, VoteT]) CreateTimeout(curView uint64, newestQC models.Q
|
||||
var r0 *models.TimeoutState[VoteT]
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) (*models.TimeoutState[VoteT], error)); ok {
|
||||
return rf(curView, newestQC, previousRankTimeoutCert)
|
||||
return rf(curRank, newestQC, previousRankTimeoutCert)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) *models.TimeoutState[VoteT]); ok {
|
||||
r0 = rf(curView, newestQC, previousRankTimeoutCert)
|
||||
r0 = rf(curRank, newestQC, previousRankTimeoutCert)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*models.TimeoutState[VoteT])
|
||||
@ -34,7 +34,7 @@ func (_m *Signer[StateT, VoteT]) CreateTimeout(curView uint64, newestQC models.Q
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(uint64, models.QuorumCertificate, models.TimeoutCertificate) error); ok {
|
||||
r1 = rf(curView, newestQC, previousRankTimeoutCert)
|
||||
r1 = rf(curRank, newestQC, previousRankTimeoutCert)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
@ -33,19 +33,19 @@ func (_m *VerifyingVoteProcessor[StateT, VoteT]) Process(vote *VoteT) error {
|
||||
}
|
||||
|
||||
// State provides a mock function with no fields
|
||||
func (_m *VerifyingVoteProcessor[StateT, VoteT]) State() *StateT {
|
||||
func (_m *VerifyingVoteProcessor[StateT, VoteT]) State() *models.State[StateT] {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for State")
|
||||
}
|
||||
|
||||
var r0 *StateT
|
||||
if rf, ok := ret.Get(0).(func() *StateT); ok {
|
||||
var r0 *models.State[StateT]
|
||||
if rf, ok := ret.Get(0).(func() *models.State[StateT]); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*StateT)
|
||||
r0 = ret.Get(0).(*models.State[StateT])
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -50,6 +50,24 @@ func (_m *VoteCollector[StateT, VoteT]) ProcessState(state *models.SignedProposa
|
||||
return r0
|
||||
}
|
||||
|
||||
// Rank provides a mock function with no fields
|
||||
func (_m *VoteCollector[StateT, VoteT]) Rank() uint64 {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Rank")
|
||||
}
|
||||
|
||||
var r0 uint64
|
||||
if rf, ok := ret.Get(0).(func() uint64); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(uint64)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// RegisterVoteConsumer provides a mock function with given fields: consumer
|
||||
func (_m *VoteCollector[StateT, VoteT]) RegisterVoteConsumer(consumer consensus.VoteConsumer[VoteT]) {
|
||||
_m.Called(consumer)
|
||||
@ -73,24 +91,6 @@ func (_m *VoteCollector[StateT, VoteT]) Status() consensus.VoteCollectorStatus {
|
||||
return r0
|
||||
}
|
||||
|
||||
// View provides a mock function with no fields
|
||||
func (_m *VoteCollector[StateT, VoteT]) View() uint64 {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for View")
|
||||
}
|
||||
|
||||
var r0 uint64
|
||||
if rf, ok := ret.Get(0).(func() uint64); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(uint64)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// NewVoteCollector creates a new instance of VoteCollector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewVoteCollector[StateT models.Unique, VoteT models.Unique](t interface {
|
||||
|
||||
@ -14,9 +14,9 @@ type VoteProcessorFactory[StateT models.Unique, VoteT models.Unique] struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// Create provides a mock function with given fields: tracer, proposal
|
||||
func (_m *VoteProcessorFactory[StateT, VoteT]) Create(tracer consensus.TraceLogger, proposal *models.SignedProposal[StateT, VoteT]) (consensus.VerifyingVoteProcessor[StateT, VoteT], error) {
|
||||
ret := _m.Called(tracer, proposal)
|
||||
// Create provides a mock function with given fields: tracer, proposal, dsTag, aggregator
|
||||
func (_m *VoteProcessorFactory[StateT, VoteT]) Create(tracer consensus.TraceLogger, proposal *models.SignedProposal[StateT, VoteT], dsTag []byte, aggregator consensus.SignatureAggregator) (consensus.VerifyingVoteProcessor[StateT, VoteT], error) {
|
||||
ret := _m.Called(tracer, proposal, dsTag, aggregator)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Create")
|
||||
@ -24,19 +24,19 @@ func (_m *VoteProcessorFactory[StateT, VoteT]) Create(tracer consensus.TraceLogg
|
||||
|
||||
var r0 consensus.VerifyingVoteProcessor[StateT, VoteT]
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(consensus.TraceLogger, *models.SignedProposal[StateT, VoteT]) (consensus.VerifyingVoteProcessor[StateT, VoteT], error)); ok {
|
||||
return rf(tracer, proposal)
|
||||
if rf, ok := ret.Get(0).(func(consensus.TraceLogger, *models.SignedProposal[StateT, VoteT], []byte, consensus.SignatureAggregator) (consensus.VerifyingVoteProcessor[StateT, VoteT], error)); ok {
|
||||
return rf(tracer, proposal, dsTag, aggregator)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(consensus.TraceLogger, *models.SignedProposal[StateT, VoteT]) consensus.VerifyingVoteProcessor[StateT, VoteT]); ok {
|
||||
r0 = rf(tracer, proposal)
|
||||
if rf, ok := ret.Get(0).(func(consensus.TraceLogger, *models.SignedProposal[StateT, VoteT], []byte, consensus.SignatureAggregator) consensus.VerifyingVoteProcessor[StateT, VoteT]); ok {
|
||||
r0 = rf(tracer, proposal, dsTag, aggregator)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(consensus.VerifyingVoteProcessor[StateT, VoteT])
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(consensus.TraceLogger, *models.SignedProposal[StateT, VoteT]) error); ok {
|
||||
r1 = rf(tracer, proposal)
|
||||
if rf, ok := ret.Get(1).(func(consensus.TraceLogger, *models.SignedProposal[StateT, VoteT], []byte, consensus.SignatureAggregator) error); ok {
|
||||
r1 = rf(tracer, proposal, dsTag, aggregator)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
@ -14,6 +14,36 @@ type VotingProvider[StateT models.Unique, VoteT models.Unique, PeerIDT models.Un
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// FinalizeQuorumCertificate provides a mock function with given fields: ctx, state, aggregatedSignature
|
||||
func (_m *VotingProvider[StateT, VoteT, PeerIDT]) FinalizeQuorumCertificate(ctx context.Context, state *models.State[StateT], aggregatedSignature models.AggregatedSignature) (models.QuorumCertificate, error) {
|
||||
ret := _m.Called(ctx, state, aggregatedSignature)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for FinalizeQuorumCertificate")
|
||||
}
|
||||
|
||||
var r0 models.QuorumCertificate
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *models.State[StateT], models.AggregatedSignature) (models.QuorumCertificate, error)); ok {
|
||||
return rf(ctx, state, aggregatedSignature)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *models.State[StateT], models.AggregatedSignature) models.QuorumCertificate); ok {
|
||||
r0 = rf(ctx, state, aggregatedSignature)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(models.QuorumCertificate)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *models.State[StateT], models.AggregatedSignature) error); ok {
|
||||
r1 = rf(ctx, state, aggregatedSignature)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// FinalizeTimeout provides a mock function with given fields: ctx, filter, rank, latestQuorumCertificateRanks, aggregatedSignature
|
||||
func (_m *VotingProvider[StateT, VoteT, PeerIDT]) FinalizeTimeout(ctx context.Context, filter []byte, rank uint64, latestQuorumCertificateRanks []uint64, aggregatedSignature models.AggregatedSignature) (models.TimeoutCertificate, error) {
|
||||
ret := _m.Called(ctx, filter, rank, latestQuorumCertificateRanks, aggregatedSignature)
|
||||
|
||||
@ -65,9 +65,9 @@ type CertifiedState[StateT Unique] struct {
|
||||
func NewCertifiedState[StateT Unique](
|
||||
state *State[StateT],
|
||||
quorumCertificate QuorumCertificate,
|
||||
) (CertifiedState[StateT], error) {
|
||||
) (*CertifiedState[StateT], error) {
|
||||
if state.Rank != quorumCertificate.GetRank() {
|
||||
return CertifiedState[StateT]{},
|
||||
return &CertifiedState[StateT]{},
|
||||
fmt.Errorf(
|
||||
"state's rank (%d) should equal the qc's rank (%d)",
|
||||
state.Rank,
|
||||
@ -75,14 +75,14 @@ func NewCertifiedState[StateT Unique](
|
||||
)
|
||||
}
|
||||
if state.Identifier != quorumCertificate.GetSelector() {
|
||||
return CertifiedState[StateT]{},
|
||||
return &CertifiedState[StateT]{},
|
||||
fmt.Errorf(
|
||||
"state's ID (%x) should equal the state referenced by the qc (%x)",
|
||||
state.Identifier,
|
||||
quorumCertificate.GetSelector(),
|
||||
)
|
||||
}
|
||||
return CertifiedState[StateT]{
|
||||
return &CertifiedState[StateT]{
|
||||
State: state,
|
||||
CertifyingQuorumCertificate: quorumCertificate,
|
||||
}, nil
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
package models
|
||||
|
||||
import "bytes"
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
// TimeoutState represents the stored state change step relevant to the point of
|
||||
// rank of a given instance of the consensus state machine.
|
||||
@ -36,10 +38,21 @@ func (t *TimeoutState[VoteT]) Equals(other *TimeoutState[VoteT]) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
if t.Vote != other.Vote && (other.Vote == nil || t.Vote == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
// both are not nil, so we can compare the fields
|
||||
return t.Rank == other.Rank &&
|
||||
t.LatestQuorumCertificate.Equals(other.LatestQuorumCertificate) &&
|
||||
t.PriorRankTimeoutCertificate.Equals(other.PriorRankTimeoutCertificate) &&
|
||||
(*t.Vote).Source() == (*other.Vote).Source() &&
|
||||
bytes.Equal((*t.Vote).GetSignature(), (*other.Vote).GetSignature())
|
||||
((t.LatestQuorumCertificate == nil &&
|
||||
other.LatestQuorumCertificate == nil) ||
|
||||
t.LatestQuorumCertificate.Equals(other.LatestQuorumCertificate)) &&
|
||||
((t.PriorRankTimeoutCertificate == nil &&
|
||||
other.PriorRankTimeoutCertificate == nil) ||
|
||||
t.PriorRankTimeoutCertificate.Equals(
|
||||
other.PriorRankTimeoutCertificate,
|
||||
)) &&
|
||||
((t.Vote == other.Vote) ||
|
||||
((*t.Vote).Source() == (*other.Vote).Source()) &&
|
||||
bytes.Equal((*t.Vote).GetSignature(), (*other.Vote).GetSignature()))
|
||||
}
|
||||
|
||||
@ -10,7 +10,7 @@ import (
|
||||
|
||||
// CommunicatorDistributor ingests outbound consensus messages from HotStuff's
|
||||
// core logic and distributes them to consumers. This logic only runs inside
|
||||
// active consensus participants proposing state, voting, collecting +
|
||||
// active consensus participants proposing states, voting, collecting +
|
||||
// aggregating votes to QCs, and participating in the pacemaker (sending
|
||||
// timeouts, collecting + aggregating timeouts to TCs).
|
||||
// Concurrency safe.
|
||||
|
||||
@ -52,108 +52,108 @@ func (
|
||||
|
||||
func (
|
||||
d *ParticipantDistributor[StateT, VoteT],
|
||||
) OnStart(currentView uint64) {
|
||||
) OnStart(currentRank uint64) {
|
||||
d.lock.RLock()
|
||||
defer d.lock.RUnlock()
|
||||
for _, subscriber := range d.consumers {
|
||||
subscriber.OnStart(currentView)
|
||||
subscriber.OnStart(currentRank)
|
||||
}
|
||||
}
|
||||
|
||||
func (
|
||||
d *ParticipantDistributor[StateT, VoteT],
|
||||
) OnReceiveProposal(
|
||||
currentView uint64,
|
||||
currentRank uint64,
|
||||
proposal *models.SignedProposal[StateT, VoteT],
|
||||
) {
|
||||
d.lock.RLock()
|
||||
defer d.lock.RUnlock()
|
||||
for _, subscriber := range d.consumers {
|
||||
subscriber.OnReceiveProposal(currentView, proposal)
|
||||
subscriber.OnReceiveProposal(currentRank, proposal)
|
||||
}
|
||||
}
|
||||
|
||||
func (
|
||||
d *ParticipantDistributor[StateT, VoteT],
|
||||
) OnReceiveQuorumCertificate(currentView uint64, qc models.QuorumCertificate) {
|
||||
) OnReceiveQuorumCertificate(currentRank uint64, qc models.QuorumCertificate) {
|
||||
d.lock.RLock()
|
||||
defer d.lock.RUnlock()
|
||||
for _, subscriber := range d.consumers {
|
||||
subscriber.OnReceiveQuorumCertificate(currentView, qc)
|
||||
subscriber.OnReceiveQuorumCertificate(currentRank, qc)
|
||||
}
|
||||
}
|
||||
|
||||
func (
|
||||
d *ParticipantDistributor[StateT, VoteT],
|
||||
) OnReceiveTimeoutCertificate(
|
||||
currentView uint64,
|
||||
currentRank uint64,
|
||||
tc models.TimeoutCertificate,
|
||||
) {
|
||||
d.lock.RLock()
|
||||
defer d.lock.RUnlock()
|
||||
for _, subscriber := range d.consumers {
|
||||
subscriber.OnReceiveTimeoutCertificate(currentView, tc)
|
||||
subscriber.OnReceiveTimeoutCertificate(currentRank, tc)
|
||||
}
|
||||
}
|
||||
|
||||
func (
|
||||
d *ParticipantDistributor[StateT, VoteT],
|
||||
) OnPartialTimeoutCertificate(
|
||||
currentView uint64,
|
||||
partialTc *consensus.PartialTimeoutCertificateCreated,
|
||||
currentRank uint64,
|
||||
partialTimeoutCertificate *consensus.PartialTimeoutCertificateCreated,
|
||||
) {
|
||||
d.lock.RLock()
|
||||
defer d.lock.RUnlock()
|
||||
for _, subscriber := range d.consumers {
|
||||
subscriber.OnPartialTimeoutCertificate(currentView, partialTc)
|
||||
subscriber.OnPartialTimeoutCertificate(currentRank, partialTimeoutCertificate)
|
||||
}
|
||||
}
|
||||
|
||||
func (
|
||||
d *ParticipantDistributor[StateT, VoteT],
|
||||
) OnLocalTimeout(currentView uint64) {
|
||||
) OnLocalTimeout(currentRank uint64) {
|
||||
d.lock.RLock()
|
||||
defer d.lock.RUnlock()
|
||||
for _, subscriber := range d.consumers {
|
||||
subscriber.OnLocalTimeout(currentView)
|
||||
subscriber.OnLocalTimeout(currentRank)
|
||||
}
|
||||
}
|
||||
|
||||
func (
|
||||
d *ParticipantDistributor[StateT, VoteT],
|
||||
) OnRankChange(oldView, newView uint64) {
|
||||
) OnRankChange(oldRank, newRank uint64) {
|
||||
d.lock.RLock()
|
||||
defer d.lock.RUnlock()
|
||||
for _, subscriber := range d.consumers {
|
||||
subscriber.OnRankChange(oldView, newView)
|
||||
subscriber.OnRankChange(oldRank, newRank)
|
||||
}
|
||||
}
|
||||
|
||||
func (
|
||||
d *ParticipantDistributor[StateT, VoteT],
|
||||
) OnQuorumCertificateTriggeredRankChange(
|
||||
oldView uint64,
|
||||
newView uint64,
|
||||
oldRank uint64,
|
||||
newRank uint64,
|
||||
qc models.QuorumCertificate,
|
||||
) {
|
||||
d.lock.RLock()
|
||||
defer d.lock.RUnlock()
|
||||
for _, subscriber := range d.consumers {
|
||||
subscriber.OnQuorumCertificateTriggeredRankChange(oldView, newView, qc)
|
||||
subscriber.OnQuorumCertificateTriggeredRankChange(oldRank, newRank, qc)
|
||||
}
|
||||
}
|
||||
|
||||
func (
|
||||
d *ParticipantDistributor[StateT, VoteT],
|
||||
) OnTimeoutCertificateTriggeredRankChange(
|
||||
oldView uint64,
|
||||
newView uint64,
|
||||
oldRank uint64,
|
||||
newRank uint64,
|
||||
tc models.TimeoutCertificate,
|
||||
) {
|
||||
d.lock.RLock()
|
||||
defer d.lock.RUnlock()
|
||||
for _, subscriber := range d.consumers {
|
||||
subscriber.OnTimeoutCertificateTriggeredRankChange(oldView, newView, tc)
|
||||
subscriber.OnTimeoutCertificateTriggeredRankChange(oldRank, newRank, tc)
|
||||
}
|
||||
}
|
||||
|
||||
@ -170,12 +170,12 @@ func (
|
||||
func (
|
||||
d *ParticipantDistributor[StateT, VoteT],
|
||||
) OnCurrentRankDetails(
|
||||
currentView, finalizedView uint64,
|
||||
currentRank, finalizedRank uint64,
|
||||
currentLeader models.Identity,
|
||||
) {
|
||||
d.lock.RLock()
|
||||
defer d.lock.RUnlock()
|
||||
for _, subscriber := range d.consumers {
|
||||
subscriber.OnCurrentRankDetails(currentView, finalizedView, currentLeader)
|
||||
subscriber.OnCurrentRankDetails(currentRank, finalizedRank, currentLeader)
|
||||
}
|
||||
}
|
||||
|
||||
@ -48,6 +48,7 @@ func NewPacemaker[
|
||||
store: store,
|
||||
traceLogger: traceLogger,
|
||||
livenessState: livenessState,
|
||||
backoffTimer: consensus.NewBackoffTimer(),
|
||||
started: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -207,7 +207,7 @@ func (r *SafetyRules[StateT, VoteT]) produceVote(
|
||||
}
|
||||
}
|
||||
|
||||
vote, err := r.signer.CreateVote(state.State)
|
||||
vote, err := r.signer.CreateVote(state)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not vote for state: %w", err)
|
||||
}
|
||||
@ -223,7 +223,7 @@ func (r *SafetyRules[StateT, VoteT]) produceVote(
|
||||
return nil, fmt.Errorf("could not persist safety data: %w", err)
|
||||
}
|
||||
|
||||
return &vote, nil
|
||||
return vote, nil
|
||||
}
|
||||
|
||||
// ProduceTimeout takes current rank, highest locally known QC and TC (optional,
|
||||
@ -252,6 +252,7 @@ func (r *SafetyRules[StateT, VoteT]) ProduceTimeout(
|
||||
LatestQuorumCertificate: lastTimeout.LatestQuorumCertificate,
|
||||
PriorRankTimeoutCertificate: lastTimeout.PriorRankTimeoutCertificate,
|
||||
TimeoutTick: lastTimeout.TimeoutTick + 1,
|
||||
Vote: lastTimeout.Vote,
|
||||
}
|
||||
|
||||
// persist updated TimeoutState in `safetyData` and return it
|
||||
|
||||
834
consensus/safetyrules/safety_rules_test.go
Normal file
834
consensus/safetyrules/safety_rules_test.go
Normal file
@ -0,0 +1,834 @@
|
||||
package safetyrules
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/mocks"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
|
||||
)
|
||||
|
||||
func TestSafetyRules(t *testing.T) {
|
||||
suite.Run(t, new(SafetyRulesTestSuite))
|
||||
}
|
||||
|
||||
// SafetyRulesTestSuite is a test suite for testing SafetyRules related functionality.
|
||||
// SafetyRulesTestSuite setups mocks for injected modules and creates models.ConsensusState[*helper.TestVote]
|
||||
// based on next configuration:
|
||||
// R <- B[QC_R] <- P[QC_B]
|
||||
// B.Rank = S.Rank + 1
|
||||
// B - bootstrapped state, we are creating SafetyRules at state B
|
||||
// Based on this LatestAcknowledgedRank = B.Rank and
|
||||
type SafetyRulesTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
bootstrapState *models.State[*helper.TestState]
|
||||
proposal *models.SignedProposal[*helper.TestState, *helper.TestVote]
|
||||
proposerIdentity models.Identity
|
||||
ourIdentity models.Identity
|
||||
signer *mocks.Signer[*helper.TestState, *helper.TestVote]
|
||||
persister *mocks.ConsensusStore[*helper.TestVote]
|
||||
committee *mocks.DynamicCommittee
|
||||
safetyData *models.ConsensusState[*helper.TestVote]
|
||||
safety *SafetyRules[*helper.TestState, *helper.TestVote]
|
||||
}
|
||||
|
||||
func (s *SafetyRulesTestSuite) SetupTest() {
|
||||
s.ourIdentity = helper.MakeIdentity()
|
||||
s.signer = &mocks.Signer[*helper.TestState, *helper.TestVote]{}
|
||||
s.persister = &mocks.ConsensusStore[*helper.TestVote]{}
|
||||
s.committee = &mocks.DynamicCommittee{}
|
||||
s.proposerIdentity = helper.MakeIdentity()
|
||||
|
||||
// bootstrap at random bootstrapState
|
||||
s.bootstrapState = helper.MakeState(helper.WithStateRank[*helper.TestState](100))
|
||||
s.proposal = helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal(
|
||||
helper.WithState[*helper.TestState](
|
||||
helper.MakeState[*helper.TestState](
|
||||
helper.WithParentState[*helper.TestState](s.bootstrapState),
|
||||
helper.WithStateRank[*helper.TestState](s.bootstrapState.Rank+1),
|
||||
helper.WithStateProposer[*helper.TestState](s.proposerIdentity)),
|
||||
))))
|
||||
|
||||
s.committee.On("Self").Return(s.ourIdentity).Maybe()
|
||||
s.committee.On("LeaderForRank", mock.Anything).Return(s.proposerIdentity, nil).Maybe()
|
||||
s.committee.On("IdentityByState", mock.Anything, s.ourIdentity).Return(&helper.TestWeightedIdentity{ID: s.ourIdentity}, nil).Maybe()
|
||||
s.committee.On("IdentityByState", s.proposal.State.Identifier, s.proposal.State.ProposerID).Return(&helper.TestWeightedIdentity{ID: s.proposerIdentity}, nil).Maybe()
|
||||
s.committee.On("IdentityByRank", mock.Anything, s.ourIdentity).Return(&helper.TestWeightedIdentity{ID: s.ourIdentity}, nil).Maybe()
|
||||
|
||||
s.safetyData = &models.ConsensusState[*helper.TestVote]{
|
||||
FinalizedRank: s.bootstrapState.Rank,
|
||||
LatestAcknowledgedRank: s.bootstrapState.Rank,
|
||||
}
|
||||
|
||||
s.persister.On("GetConsensusState").Return(s.safetyData, nil).Once()
|
||||
var err error
|
||||
s.safety, err = NewSafetyRules(s.signer, s.persister, s.committee)
|
||||
require.NoError(s.T(), err)
|
||||
}
|
||||
|
||||
// TestProduceVote_ShouldVote test basic happy path scenario where we vote for first state after bootstrap
|
||||
// and next rank ended with TC
|
||||
func (s *SafetyRulesTestSuite) TestProduceVote_ShouldVote() {
|
||||
expectedSafetyData := &models.ConsensusState[*helper.TestVote]{
|
||||
FinalizedRank: s.proposal.State.ParentQuorumCertificate.GetRank(),
|
||||
LatestAcknowledgedRank: s.proposal.State.Rank,
|
||||
}
|
||||
|
||||
expectedVote := makeVote(s.proposal.State)
|
||||
s.signer.On("CreateVote", s.proposal.State).Return(&expectedVote, nil).Once()
|
||||
s.persister.On("PutConsensusState", expectedSafetyData).Return(nil).Once()
|
||||
|
||||
vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank)
|
||||
require.NoError(s.T(), err)
|
||||
require.NotNil(s.T(), vote)
|
||||
require.Equal(s.T(), &expectedVote, vote)
|
||||
|
||||
s.persister.AssertCalled(s.T(), "PutConsensusState", expectedSafetyData)
|
||||
|
||||
// producing vote for same rank yields an error since we have voted already for this rank
|
||||
otherVote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank)
|
||||
require.True(s.T(), models.IsNoVoteError(err))
|
||||
require.Nil(s.T(), otherVote)
|
||||
|
||||
previousRankTimeoutCert := helper.MakeTC(
|
||||
helper.WithTCRank(s.proposal.State.Rank+1),
|
||||
helper.WithTCNewestQC(s.proposal.State.ParentQuorumCertificate))
|
||||
|
||||
// voting on proposal where last rank ended with TC
|
||||
proposalWithTC := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal[*helper.TestState](
|
||||
helper.WithState[*helper.TestState](
|
||||
helper.MakeState[*helper.TestState](
|
||||
helper.WithParentState[*helper.TestState](s.bootstrapState),
|
||||
helper.WithStateRank[*helper.TestState](s.proposal.State.Rank+2),
|
||||
helper.WithStateProposer[*helper.TestState](s.proposerIdentity))),
|
||||
helper.WithPreviousRankTimeoutCertificate[*helper.TestState](previousRankTimeoutCert))))
|
||||
|
||||
expectedSafetyData = &models.ConsensusState[*helper.TestVote]{
|
||||
FinalizedRank: s.proposal.State.ParentQuorumCertificate.GetRank(),
|
||||
LatestAcknowledgedRank: proposalWithTC.State.Rank,
|
||||
}
|
||||
|
||||
expectedVote = makeVote(proposalWithTC.State)
|
||||
s.signer.On("CreateVote", proposalWithTC.State).Return(&expectedVote, nil).Once()
|
||||
s.persister.On("PutConsensusState", expectedSafetyData).Return(nil).Once()
|
||||
s.committee.On("IdentityByState", proposalWithTC.State.Identifier, proposalWithTC.State.ProposerID).Return(&helper.TestWeightedIdentity{ID: s.proposerIdentity}, nil).Maybe()
|
||||
|
||||
vote, err = s.safety.ProduceVote(proposalWithTC, proposalWithTC.State.Rank)
|
||||
require.NoError(s.T(), err)
|
||||
require.NotNil(s.T(), vote)
|
||||
require.Equal(s.T(), &expectedVote, vote)
|
||||
s.signer.AssertExpectations(s.T())
|
||||
s.persister.AssertCalled(s.T(), "PutConsensusState", expectedSafetyData)
|
||||
}
|
||||
|
||||
// TestProduceVote_IncludedQCHigherThanTCsQC checks specific scenario where previous round resulted in TC and leader
|
||||
// knows about QC which is not part of TC and qc.Rank > tc.NewestQC.Rank. We want to allow this, in this case leader
|
||||
// includes their QC into proposal satisfies next condition: State.ParentQuorumCertificate.GetRank() > previousRankTimeoutCert.NewestQC.Rank
|
||||
func (s *SafetyRulesTestSuite) TestProduceVote_IncludedQCHigherThanTCsQC() {
|
||||
previousRankTimeoutCert := helper.MakeTC(
|
||||
helper.WithTCRank(s.proposal.State.Rank+1),
|
||||
helper.WithTCNewestQC(s.proposal.State.ParentQuorumCertificate))
|
||||
|
||||
// voting on proposal where last rank ended with TC
|
||||
proposalWithTC := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal[*helper.TestState](
|
||||
helper.WithState[*helper.TestState](
|
||||
helper.MakeState[*helper.TestState](
|
||||
helper.WithParentState[*helper.TestState](s.proposal.State),
|
||||
helper.WithStateRank[*helper.TestState](s.proposal.State.Rank+2),
|
||||
helper.WithStateProposer[*helper.TestState](s.proposerIdentity))),
|
||||
helper.WithPreviousRankTimeoutCertificate[*helper.TestState](previousRankTimeoutCert))))
|
||||
|
||||
expectedSafetyData := &models.ConsensusState[*helper.TestVote]{
|
||||
FinalizedRank: proposalWithTC.State.ParentQuorumCertificate.GetRank(),
|
||||
LatestAcknowledgedRank: proposalWithTC.State.Rank,
|
||||
}
|
||||
|
||||
require.Greater(s.T(), proposalWithTC.State.ParentQuorumCertificate.GetRank(), proposalWithTC.PreviousRankTimeoutCertificate.GetLatestQuorumCert().GetRank(),
|
||||
"for this test case we specifically require that qc.Rank > previousRankTimeoutCert.NewestQC.Rank")
|
||||
|
||||
expectedVote := makeVote(proposalWithTC.State)
|
||||
s.signer.On("CreateVote", proposalWithTC.State).Return(&expectedVote, nil).Once()
|
||||
s.persister.On("PutConsensusState", expectedSafetyData).Return(nil).Once()
|
||||
s.committee.On("IdentityByState", proposalWithTC.State.Identifier, proposalWithTC.State.ProposerID).Return(&helper.TestWeightedIdentity{ID: s.proposerIdentity}, nil).Maybe()
|
||||
|
||||
vote, err := s.safety.ProduceVote(proposalWithTC, proposalWithTC.State.Rank)
|
||||
require.NoError(s.T(), err)
|
||||
require.NotNil(s.T(), vote)
|
||||
require.Equal(s.T(), &expectedVote, vote)
|
||||
s.signer.AssertExpectations(s.T())
|
||||
s.persister.AssertCalled(s.T(), "PutConsensusState", expectedSafetyData)
|
||||
}
|
||||
|
||||
// TestProduceVote_UpdateFinalizedRank tests that FinalizedRank is updated when sees a higher QC.
|
||||
// Note: `FinalizedRank` is only updated when the replica votes.
|
||||
func (s *SafetyRulesTestSuite) TestProduceVote_UpdateFinalizedRank() {
|
||||
s.safety.consensusState.FinalizedRank = 0
|
||||
|
||||
require.NotEqual(s.T(), s.safety.consensusState.FinalizedRank, s.proposal.State.ParentQuorumCertificate.GetRank(),
|
||||
"in this test FinalizedRank is lower so it needs to be updated")
|
||||
|
||||
expectedSafetyData := &models.ConsensusState[*helper.TestVote]{
|
||||
FinalizedRank: s.proposal.State.ParentQuorumCertificate.GetRank(),
|
||||
LatestAcknowledgedRank: s.proposal.State.Rank,
|
||||
}
|
||||
|
||||
expectedVote := makeVote(s.proposal.State)
|
||||
s.signer.On("CreateVote", s.proposal.State).Return(&expectedVote, nil).Once()
|
||||
s.persister.On("PutConsensusState", expectedSafetyData).Return(nil).Once()
|
||||
|
||||
vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank)
|
||||
require.NoError(s.T(), err)
|
||||
require.NotNil(s.T(), vote)
|
||||
require.Equal(s.T(), &expectedVote, vote)
|
||||
s.signer.AssertExpectations(s.T())
|
||||
s.persister.AssertCalled(s.T(), "PutConsensusState", expectedSafetyData)
|
||||
}
|
||||
|
||||
// TestProduceVote_InvalidCurrentRank tests that no vote is created if `curRank` has invalid values.
|
||||
// In particular, `SafetyRules` requires that:
|
||||
// - the state's rank matches `curRank`
|
||||
// - that values for `curRank` are monotonicly increasing
|
||||
//
|
||||
// Failing any of these conditions is a symptom of an internal bug; hence `SafetyRules` should
|
||||
// _not_ return a `NoVoteError`.
|
||||
func (s *SafetyRulesTestSuite) TestProduceVote_InvalidCurrentRank() {
|
||||
|
||||
s.Run("state-rank-does-not-match", func() {
|
||||
vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank+1)
|
||||
require.Nil(s.T(), vote)
|
||||
require.Error(s.T(), err)
|
||||
require.False(s.T(), models.IsNoVoteError(err))
|
||||
})
|
||||
s.Run("rank-not-monotonicly-increasing", func() {
|
||||
// create state with rank < LatestAcknowledgedRank
|
||||
proposal := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal(
|
||||
helper.WithState(
|
||||
helper.MakeState(
|
||||
func(state *models.State[*helper.TestState]) {
|
||||
state.ParentQuorumCertificate = helper.MakeQC(helper.WithQCRank(s.safetyData.LatestAcknowledgedRank - 2))
|
||||
},
|
||||
helper.WithStateRank[*helper.TestState](s.safetyData.LatestAcknowledgedRank-1))))))
|
||||
vote, err := s.safety.ProduceVote(proposal, proposal.State.Rank)
|
||||
require.Nil(s.T(), vote)
|
||||
require.Error(s.T(), err)
|
||||
require.False(s.T(), models.IsNoVoteError(err))
|
||||
})
|
||||
|
||||
s.persister.AssertNotCalled(s.T(), "PutConsensusState")
|
||||
}
|
||||
|
||||
// TestProduceVote_CommitteeLeaderException verifies that SafetyRules handles unexpected error returns from
|
||||
// the DynamicCommittee correctly. Specifically, generic exceptions and `models.ErrRankUnknown`
|
||||
// returned by the committee when requesting the leader for the state's rank is propagated up the call stack.
|
||||
// SafetyRules should *not* wrap unexpected exceptions into an expected NoVoteError.
|
||||
func (s *SafetyRulesTestSuite) TestProduceVote_CommitteeLeaderException() {
|
||||
*s.committee = mocks.DynamicCommittee{}
|
||||
for _, exception := range []error{
|
||||
errors.New("invalid-leader-identity"),
|
||||
models.ErrRankUnknown,
|
||||
} {
|
||||
s.committee.On("LeaderForRank", s.proposal.State.Rank).Return("", exception).Once()
|
||||
vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank)
|
||||
require.Nil(s.T(), vote)
|
||||
require.ErrorIs(s.T(), err, exception)
|
||||
require.False(s.T(), models.IsNoVoteError(err))
|
||||
s.persister.AssertNotCalled(s.T(), "PutConsensusState")
|
||||
}
|
||||
}
|
||||
|
||||
// TestProduceVote_DifferentProposerFromLeader tests that no vote is created if the proposer is different from the leader for
|
||||
// current rank. This is a byzantine behavior and should be handled by the compliance layer but nevertheless we want to
|
||||
// have a sanity check for other code paths like voting on an own proposal created by the current leader.
|
||||
func (s *SafetyRulesTestSuite) TestProduceVote_DifferentProposerFromLeader() {
|
||||
s.proposal.State.ProposerID = helper.MakeIdentity()
|
||||
vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank)
|
||||
require.Error(s.T(), err)
|
||||
require.False(s.T(), models.IsNoVoteError(err))
|
||||
require.Nil(s.T(), vote)
|
||||
s.persister.AssertNotCalled(s.T(), "PutConsensusState")
|
||||
}
|
||||
|
||||
// TestProduceVote_NodeEjected tests that no vote is created if state proposer is ejected
|
||||
func (s *SafetyRulesTestSuite) TestProduceVote_ProposerEjected() {
|
||||
*s.committee = mocks.DynamicCommittee{}
|
||||
s.committee.On("Self").Return(s.ourIdentity).Maybe()
|
||||
s.committee.On("IdentityByState", s.proposal.State.Identifier, s.proposal.State.ProposerID).Return(nil, models.NewInvalidSignerErrorf("node-ejected")).Once()
|
||||
s.committee.On("LeaderForRank", s.proposal.State.Rank).Return(s.proposerIdentity, nil).Once()
|
||||
|
||||
vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank)
|
||||
require.Nil(s.T(), vote)
|
||||
require.True(s.T(), models.IsNoVoteError(err))
|
||||
s.persister.AssertNotCalled(s.T(), "PutConsensusState")
|
||||
}
|
||||
|
||||
// TestProduceVote_InvalidProposerIdentity tests that no vote is created if there was an exception retrieving proposer identity
|
||||
// We are specifically testing that unexpected errors are handled correctly, i.e.
|
||||
// that SafetyRules does not erroneously wrap unexpected exceptions into the expected NoVoteError.
|
||||
func (s *SafetyRulesTestSuite) TestProduceVote_InvalidProposerIdentity() {
|
||||
*s.committee = mocks.DynamicCommittee{}
|
||||
exception := errors.New("invalid-signer-identity")
|
||||
s.committee.On("Self").Return(s.ourIdentity).Maybe()
|
||||
s.committee.On("LeaderForRank", s.proposal.State.Rank).Return(s.proposerIdentity, nil).Once()
|
||||
s.committee.On("IdentityByState", s.proposal.State.Identifier, s.proposal.State.ProposerID).Return(nil, exception).Once()
|
||||
|
||||
vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank)
|
||||
require.Nil(s.T(), vote)
|
||||
require.ErrorIs(s.T(), err, exception)
|
||||
require.False(s.T(), models.IsNoVoteError(err))
|
||||
s.persister.AssertNotCalled(s.T(), "PutConsensusState")
|
||||
}
|
||||
|
||||
// TestProduceVote_NodeNotAuthorizedToVote tests that no vote is created if the voter is not authorized to vote.
|
||||
// Nodes have zero weight in the grace periods around the epochs where they are authorized to participate.
|
||||
// We don't want zero-weight nodes to vote in the first place, to avoid unnecessary traffic.
|
||||
// Note: this also covers ejected nodes. In both cases, the committee will return an `InvalidSignerError`.
|
||||
func (s *SafetyRulesTestSuite) TestProduceVote_NodeEjected() {
|
||||
*s.committee = mocks.DynamicCommittee{}
|
||||
s.committee.On("Self").Return(s.ourIdentity)
|
||||
s.committee.On("LeaderForRank", s.proposal.State.Rank).Return(s.proposerIdentity, nil).Once()
|
||||
s.committee.On("IdentityByState", s.proposal.State.Identifier, s.proposal.State.ProposerID).Return(&helper.TestWeightedIdentity{ID: s.proposerIdentity}, nil).Maybe()
|
||||
s.committee.On("IdentityByState", s.proposal.State.Identifier, s.ourIdentity).Return(nil, models.NewInvalidSignerErrorf("node-ejected")).Once()
|
||||
|
||||
vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank)
|
||||
require.Nil(s.T(), vote)
|
||||
require.True(s.T(), models.IsNoVoteError(err))
|
||||
s.persister.AssertNotCalled(s.T(), "PutConsensusState")
|
||||
}
|
||||
|
||||
// TestProduceVote_InvalidVoterIdentity tests that no vote is created if there was an exception retrieving voter identity
|
||||
// We are specifically testing that unexpected errors are handled correctly, i.e.
|
||||
// that SafetyRules does not erroneously wrap unexpected exceptions into the expected NoVoteError.
|
||||
func (s *SafetyRulesTestSuite) TestProduceVote_InvalidVoterIdentity() {
|
||||
*s.committee = mocks.DynamicCommittee{}
|
||||
s.committee.On("Self").Return(s.ourIdentity)
|
||||
exception := errors.New("invalid-signer-identity")
|
||||
s.committee.On("LeaderForRank", s.proposal.State.Rank).Return(s.proposerIdentity, nil).Once()
|
||||
s.committee.On("IdentityByState", s.proposal.State.Identifier, s.proposal.State.ProposerID).Return(&helper.TestWeightedIdentity{ID: s.proposerIdentity}, nil).Maybe()
|
||||
s.committee.On("IdentityByState", s.proposal.State.Identifier, s.ourIdentity).Return(nil, exception).Once()
|
||||
|
||||
vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank)
|
||||
require.Nil(s.T(), vote)
|
||||
require.ErrorIs(s.T(), err, exception)
|
||||
require.False(s.T(), models.IsNoVoteError(err))
|
||||
s.persister.AssertNotCalled(s.T(), "PutConsensusState")
|
||||
}
|
||||
|
||||
// TestProduceVote_CreateVoteException tests that no vote is created if vote creation raised an exception
|
||||
func (s *SafetyRulesTestSuite) TestProduceVote_CreateVoteException() {
|
||||
exception := errors.New("create-vote-exception")
|
||||
s.signer.On("CreateVote", s.proposal.State).Return(nil, exception).Once()
|
||||
vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank)
|
||||
require.Nil(s.T(), vote)
|
||||
require.ErrorIs(s.T(), err, exception)
|
||||
require.False(s.T(), models.IsNoVoteError(err))
|
||||
s.persister.AssertNotCalled(s.T(), "PutConsensusState")
|
||||
}
|
||||
|
||||
// TestProduceVote_PersistStateException tests that no vote is created if persisting state failed
|
||||
func (s *SafetyRulesTestSuite) TestProduceVote_PersistStateException() {
|
||||
exception := errors.New("persister-exception")
|
||||
s.persister.On("PutConsensusState", mock.Anything).Return(exception)
|
||||
|
||||
vote := makeVote(s.proposal.State)
|
||||
s.signer.On("CreateVote", s.proposal.State).Return(&vote, nil).Once()
|
||||
votePtr, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank)
|
||||
require.Nil(s.T(), votePtr)
|
||||
require.ErrorIs(s.T(), err, exception)
|
||||
}
|
||||
|
||||
// TestProduceVote_VotingOnInvalidProposals tests different scenarios where we try to vote on unsafe states
|
||||
// SafetyRules contain a variety of checks to confirm that QC and TC have the desired relationship to each other.
|
||||
// In particular, we test:
|
||||
//
|
||||
// (i) A TC should be included in a proposal, if and only of the QC is not the prior rank.
|
||||
// (ii) When the proposal includes a TC (i.e. the QC not being for the prior rank), the TC must be for the prior rank.
|
||||
// (iii) The QC in the state must have a smaller rank than the state.
|
||||
// (iv) If the state contains a TC, the TC cannot contain a newer QC than the state itself.
|
||||
//
|
||||
// Conditions (i) - (iv) are validity requirements for the state and all states that SafetyRules processes
|
||||
// are supposed to be pre-validated. Hence, failing any of those conditions means we have an internal bug.
|
||||
// Consequently, we expect SafetyRules to return exceptions but _not_ `NoVoteError`, because the latter
|
||||
// indicates that the input state was valid, but we didn't want to vote.
|
||||
func (s *SafetyRulesTestSuite) TestProduceVote_VotingOnInvalidProposals() {
|
||||
|
||||
// a proposal which includes a QC for the previous round should not contain a TC
|
||||
s.Run("proposal-includes-last-rank-qc-and-tc", func() {
|
||||
proposal := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal(
|
||||
helper.WithState(
|
||||
helper.MakeState(
|
||||
helper.WithParentState(s.bootstrapState),
|
||||
helper.WithStateRank[*helper.TestState](s.bootstrapState.Rank+1))),
|
||||
helper.WithPreviousRankTimeoutCertificate[*helper.TestState](helper.MakeTC()))))
|
||||
s.committee.On("IdentityByState", proposal.State.Identifier, proposal.State.ProposerID).Return(&helper.TestWeightedIdentity{ID: s.proposerIdentity}, nil).Maybe()
|
||||
vote, err := s.safety.ProduceVote(proposal, proposal.State.Rank)
|
||||
require.Error(s.T(), err)
|
||||
require.False(s.T(), models.IsNoVoteError(err))
|
||||
require.Nil(s.T(), vote)
|
||||
})
|
||||
s.Run("no-last-rank-tc", func() {
|
||||
// create state where State.Rank != State.ParentQuorumCertificate.GetRank()+1 and PreviousRankTimeoutCertificate = nil
|
||||
proposal := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal(
|
||||
helper.WithState(
|
||||
helper.MakeState(
|
||||
helper.WithParentState(s.bootstrapState),
|
||||
helper.WithStateRank[*helper.TestState](s.bootstrapState.Rank+2))))))
|
||||
vote, err := s.safety.ProduceVote(proposal, proposal.State.Rank)
|
||||
require.Error(s.T(), err)
|
||||
require.False(s.T(), models.IsNoVoteError(err))
|
||||
require.Nil(s.T(), vote)
|
||||
})
|
||||
s.Run("last-rank-tc-invalid-rank", func() {
|
||||
// create state where State.Rank != State.ParentQuorumCertificate.GetRank()+1 and
|
||||
// State.Rank != PreviousRankTimeoutCertificate.Rank+1
|
||||
proposal := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal(
|
||||
helper.WithState(
|
||||
helper.MakeState(
|
||||
helper.WithParentState(s.bootstrapState),
|
||||
helper.WithStateRank[*helper.TestState](s.bootstrapState.Rank+2))),
|
||||
helper.WithPreviousRankTimeoutCertificate[*helper.TestState](
|
||||
helper.MakeTC(
|
||||
helper.WithTCRank(s.bootstrapState.Rank))))))
|
||||
vote, err := s.safety.ProduceVote(proposal, proposal.State.Rank)
|
||||
require.Error(s.T(), err)
|
||||
require.False(s.T(), models.IsNoVoteError(err))
|
||||
require.Nil(s.T(), vote)
|
||||
})
|
||||
s.Run("proposal-includes-QC-for-higher-rank", func() {
|
||||
// create state where State.Rank != State.ParentQuorumCertificate.GetRank()+1 and
|
||||
// State.Rank == PreviousRankTimeoutCertificate.Rank+1 and State.ParentQuorumCertificate.GetRank() >= State.Rank
|
||||
// in this case state is not safe to extend since proposal includes QC which is newer than the proposal itself.
|
||||
proposal := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal(
|
||||
helper.WithState(
|
||||
helper.MakeState(
|
||||
helper.WithParentState(s.bootstrapState),
|
||||
helper.WithStateRank[*helper.TestState](s.bootstrapState.Rank+2),
|
||||
func(state *models.State[*helper.TestState]) {
|
||||
state.ParentQuorumCertificate = helper.MakeQC(helper.WithQCRank(s.bootstrapState.Rank + 10))
|
||||
})),
|
||||
helper.WithPreviousRankTimeoutCertificate[*helper.TestState](
|
||||
helper.MakeTC(
|
||||
helper.WithTCRank(s.bootstrapState.Rank+1))))))
|
||||
vote, err := s.safety.ProduceVote(proposal, proposal.State.Rank)
|
||||
require.Error(s.T(), err)
|
||||
require.False(s.T(), models.IsNoVoteError(err))
|
||||
require.Nil(s.T(), vote)
|
||||
})
|
||||
s.Run("last-rank-tc-invalid-highest-qc", func() {
|
||||
// create state where State.Rank != State.ParentQuorumCertificate.GetRank()+1 and
|
||||
// State.Rank == PreviousRankTimeoutCertificate.Rank+1 and State.ParentQuorumCertificate.GetRank() < PreviousRankTimeoutCertificate.NewestQC.Rank
|
||||
// in this case state is not safe to extend since proposal is built on top of QC, which is lower
|
||||
// than QC presented in PreviousRankTimeoutCertificate.
|
||||
TONewestQC := helper.MakeQC(helper.WithQCRank(s.bootstrapState.Rank + 1))
|
||||
proposal := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal(
|
||||
helper.WithState(
|
||||
helper.MakeState(
|
||||
helper.WithParentState(s.bootstrapState),
|
||||
helper.WithStateRank[*helper.TestState](s.bootstrapState.Rank+2))),
|
||||
helper.WithPreviousRankTimeoutCertificate[*helper.TestState](
|
||||
helper.MakeTC(
|
||||
helper.WithTCRank(s.bootstrapState.Rank+1),
|
||||
helper.WithTCNewestQC(TONewestQC))))))
|
||||
vote, err := s.safety.ProduceVote(proposal, proposal.State.Rank)
|
||||
require.Error(s.T(), err)
|
||||
require.False(s.T(), models.IsNoVoteError(err))
|
||||
require.Nil(s.T(), vote)
|
||||
})
|
||||
|
||||
s.signer.AssertNotCalled(s.T(), "CreateVote")
|
||||
s.persister.AssertNotCalled(s.T(), "PutConsensusState")
|
||||
}
|
||||
|
||||
// TestProduceVote_VoteEquivocation tests scenario when we try to vote twice in same rank. We require that replica
|
||||
// follows next rules:
|
||||
// - replica votes once per rank
|
||||
// - replica votes in monotonicly increasing ranks
|
||||
//
|
||||
// Voting twice per round on equivocating proposals is considered a byzantine behavior.
|
||||
// Expect a `models.NoVoteError` sentinel in such scenario.
|
||||
func (s *SafetyRulesTestSuite) TestProduceVote_VoteEquivocation() {
|
||||
expectedVote := makeVote(s.proposal.State)
|
||||
s.signer.On("CreateVote", s.proposal.State).Return(&expectedVote, nil).Once()
|
||||
s.persister.On("PutConsensusState", mock.Anything).Return(nil).Once()
|
||||
|
||||
vote, err := s.safety.ProduceVote(s.proposal, s.proposal.State.Rank)
|
||||
require.NoError(s.T(), err)
|
||||
require.NotNil(s.T(), vote)
|
||||
require.Equal(s.T(), &expectedVote, vote)
|
||||
|
||||
equivocatingProposal := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal(
|
||||
helper.WithState(
|
||||
helper.MakeState(
|
||||
helper.WithParentState(s.bootstrapState),
|
||||
helper.WithStateRank[*helper.TestState](s.bootstrapState.Rank+1),
|
||||
helper.WithStateProposer[*helper.TestState](s.proposerIdentity)),
|
||||
))))
|
||||
|
||||
// voting at same rank(even different proposal) should result in NoVoteError
|
||||
vote, err = s.safety.ProduceVote(equivocatingProposal, s.proposal.State.Rank)
|
||||
require.True(s.T(), models.IsNoVoteError(err))
|
||||
require.Nil(s.T(), vote)
|
||||
|
||||
s.proposal.State.ProposerID = s.ourIdentity
|
||||
|
||||
// proposing at the same rank should result in NoVoteError since we have already voted
|
||||
vote, err = s.safety.SignOwnProposal(&s.proposal.Proposal)
|
||||
require.True(s.T(), models.IsNoVoteError(err))
|
||||
require.Nil(s.T(), vote)
|
||||
}
|
||||
|
||||
// TestProduceVote_AfterTimeout tests a scenario where we first timeout for rank and then try to produce a vote for
|
||||
// same rank, this should result in error since producing a timeout means that we have given up on this rank
|
||||
// and are in process of moving forward, no vote should be created.
|
||||
func (s *SafetyRulesTestSuite) TestProduceVote_AfterTimeout() {
|
||||
rank := s.proposal.State.Rank
|
||||
newestQC := helper.MakeQC(helper.WithQCRank(rank - 1))
|
||||
expectedTimeout := &models.TimeoutState[*helper.TestVote]{
|
||||
Rank: rank,
|
||||
LatestQuorumCertificate: newestQC,
|
||||
}
|
||||
s.signer.On("CreateTimeout", rank, newestQC, nil).Return(expectedTimeout, nil).Once()
|
||||
s.persister.On("PutConsensusState", mock.Anything).Return(nil).Once()
|
||||
|
||||
// first timeout, then try to vote
|
||||
timeout, err := s.safety.ProduceTimeout(rank, newestQC, nil)
|
||||
require.NoError(s.T(), err)
|
||||
require.NotNil(s.T(), timeout)
|
||||
|
||||
// voting in same rank after producing timeout is not allowed
|
||||
vote, err := s.safety.ProduceVote(s.proposal, rank)
|
||||
require.True(s.T(), models.IsNoVoteError(err))
|
||||
require.Nil(s.T(), vote)
|
||||
|
||||
s.signer.AssertExpectations(s.T())
|
||||
s.persister.AssertExpectations(s.T())
|
||||
}
|
||||
|
||||
// TestProduceTimeout_ShouldTimeout tests that we can produce timeout in cases where
|
||||
// last rank was successful or not. Also tests last timeout caching.
|
||||
func (s *SafetyRulesTestSuite) TestProduceTimeout_ShouldTimeout() {
|
||||
rank := s.proposal.State.Rank
|
||||
newestQC := helper.MakeQC(helper.WithQCRank(rank - 1))
|
||||
expectedTimeout := &models.TimeoutState[*helper.TestVote]{
|
||||
Rank: rank,
|
||||
LatestQuorumCertificate: newestQC,
|
||||
// don't care about actual data
|
||||
Vote: helper.MakeVote[*helper.TestVote](),
|
||||
}
|
||||
|
||||
expectedSafetyData := &models.ConsensusState[*helper.TestVote]{
|
||||
FinalizedRank: s.safetyData.FinalizedRank,
|
||||
LatestAcknowledgedRank: rank,
|
||||
LatestTimeout: expectedTimeout,
|
||||
}
|
||||
s.signer.On("CreateTimeout", rank, newestQC, nil).Return(expectedTimeout, nil).Once()
|
||||
s.persister.On("PutConsensusState", expectedSafetyData).Return(nil).Once()
|
||||
timeout, err := s.safety.ProduceTimeout(rank, newestQC, nil)
|
||||
require.NoError(s.T(), err)
|
||||
require.Equal(s.T(), expectedTimeout, timeout)
|
||||
|
||||
s.persister.AssertCalled(s.T(), "PutConsensusState", expectedSafetyData)
|
||||
|
||||
s.persister.On("PutConsensusState", mock.MatchedBy(func(s *models.ConsensusState[*helper.TestVote]) bool {
|
||||
return s.LatestTimeout.TimeoutTick == 1
|
||||
})).Return(nil).Once()
|
||||
|
||||
otherTimeout, err := s.safety.ProduceTimeout(rank, newestQC, nil)
|
||||
require.NoError(s.T(), err)
|
||||
require.True(s.T(), timeout.Equals(otherTimeout))
|
||||
require.Equal(s.T(), timeout.TimeoutTick+1, otherTimeout.TimeoutTick)
|
||||
|
||||
// to create new TO we need to provide a TC
|
||||
previousRankTimeoutCert := helper.MakeTC(helper.WithTCRank(rank),
|
||||
helper.WithTCNewestQC(newestQC))
|
||||
|
||||
expectedTimeout = &models.TimeoutState[*helper.TestVote]{
|
||||
Rank: rank + 1,
|
||||
LatestQuorumCertificate: newestQC,
|
||||
PriorRankTimeoutCertificate: previousRankTimeoutCert,
|
||||
}
|
||||
s.signer.On("CreateTimeout", rank+1, newestQC, previousRankTimeoutCert).Return(expectedTimeout, nil).Once()
|
||||
expectedSafetyData = &models.ConsensusState[*helper.TestVote]{
|
||||
FinalizedRank: s.safetyData.FinalizedRank,
|
||||
LatestAcknowledgedRank: rank + 1,
|
||||
LatestTimeout: expectedTimeout,
|
||||
}
|
||||
s.persister.On("PutConsensusState", expectedSafetyData).Return(nil).Once()
|
||||
|
||||
// creating new timeout should invalidate cache
|
||||
otherTimeout, err = s.safety.ProduceTimeout(rank+1, newestQC, previousRankTimeoutCert)
|
||||
require.NoError(s.T(), err)
|
||||
require.NotNil(s.T(), otherTimeout)
|
||||
}
|
||||
|
||||
// TestProduceTimeout_NotSafeToTimeout tests that we don't produce a timeout when it's not safe
|
||||
// We expect that the EventHandler to feed only request timeouts for the current rank, providing valid set of inputs.
|
||||
// Hence, the cases tested here would be symptoms of an internal bugs, and therefore should not result in an NoVoteError.
|
||||
func (s *SafetyRulesTestSuite) TestProduceTimeout_NotSafeToTimeout() {
|
||||
|
||||
s.Run("newest-qc-nil", func() {
|
||||
// newestQC cannot be nil
|
||||
timeout, err := s.safety.ProduceTimeout(s.safetyData.FinalizedRank, nil, nil)
|
||||
require.Error(s.T(), err)
|
||||
require.Nil(s.T(), timeout)
|
||||
})
|
||||
// if a QC for the previous rank is provided, a last rank TC is unnecessary and must not be provided
|
||||
s.Run("includes-last-rank-qc-and-tc", func() {
|
||||
newestQC := helper.MakeQC(helper.WithQCRank(s.safetyData.FinalizedRank))
|
||||
|
||||
// tc not needed but included
|
||||
timeout, err := s.safety.ProduceTimeout(newestQC.GetRank()+1, newestQC, helper.MakeTC())
|
||||
require.Error(s.T(), err)
|
||||
require.Nil(s.T(), timeout)
|
||||
})
|
||||
s.Run("last-rank-tc-nil", func() {
|
||||
newestQC := helper.MakeQC(helper.WithQCRank(s.safetyData.FinalizedRank))
|
||||
|
||||
// tc needed but not included
|
||||
timeout, err := s.safety.ProduceTimeout(newestQC.GetRank()+2, newestQC, nil)
|
||||
require.Error(s.T(), err)
|
||||
require.Nil(s.T(), timeout)
|
||||
})
|
||||
s.Run("last-rank-tc-for-wrong-rank", func() {
|
||||
newestQC := helper.MakeQC(helper.WithQCRank(s.safetyData.FinalizedRank))
|
||||
// previousRankTimeoutCert should be for newestQC.GetRank()+1
|
||||
previousRankTimeoutCert := helper.MakeTC(helper.WithTCRank(newestQC.GetRank()))
|
||||
|
||||
timeout, err := s.safety.ProduceTimeout(newestQC.GetRank()+2, newestQC, previousRankTimeoutCert)
|
||||
require.Error(s.T(), err)
|
||||
require.Nil(s.T(), timeout)
|
||||
})
|
||||
s.Run("cur-rank-equal-to-highest-QC", func() {
|
||||
newestQC := helper.MakeQC(helper.WithQCRank(s.safetyData.FinalizedRank))
|
||||
previousRankTimeoutCert := helper.MakeTC(helper.WithTCRank(s.safetyData.FinalizedRank - 1))
|
||||
|
||||
timeout, err := s.safety.ProduceTimeout(s.safetyData.FinalizedRank, newestQC, previousRankTimeoutCert)
|
||||
require.Error(s.T(), err)
|
||||
require.Nil(s.T(), timeout)
|
||||
})
|
||||
s.Run("cur-rank-below-highest-QC", func() {
|
||||
newestQC := helper.MakeQC(helper.WithQCRank(s.safetyData.FinalizedRank))
|
||||
previousRankTimeoutCert := helper.MakeTC(helper.WithTCRank(newestQC.GetRank() - 2))
|
||||
|
||||
timeout, err := s.safety.ProduceTimeout(newestQC.GetRank()-1, newestQC, previousRankTimeoutCert)
|
||||
require.Error(s.T(), err)
|
||||
require.Nil(s.T(), timeout)
|
||||
})
|
||||
s.Run("last-rank-tc-is-newer", func() {
|
||||
newestQC := helper.MakeQC(helper.WithQCRank(s.safetyData.FinalizedRank))
|
||||
// newest QC included in TC cannot be higher than the newest QC known to replica
|
||||
previousRankTimeoutCert := helper.MakeTC(helper.WithTCRank(newestQC.GetRank()+1),
|
||||
helper.WithTCNewestQC(helper.MakeQC(helper.WithQCRank(newestQC.GetRank()+1))))
|
||||
|
||||
timeout, err := s.safety.ProduceTimeout(newestQC.GetRank()+2, newestQC, previousRankTimeoutCert)
|
||||
require.Error(s.T(), err)
|
||||
require.Nil(s.T(), timeout)
|
||||
})
|
||||
s.Run("highest-qc-below-locked-round", func() {
|
||||
newestQC := helper.MakeQC(helper.WithQCRank(s.safetyData.FinalizedRank - 1))
|
||||
|
||||
timeout, err := s.safety.ProduceTimeout(newestQC.GetRank()+1, newestQC, nil)
|
||||
require.Error(s.T(), err)
|
||||
require.Nil(s.T(), timeout)
|
||||
})
|
||||
s.Run("cur-rank-below-highest-acknowledged-rank", func() {
|
||||
newestQC := helper.MakeQC(helper.WithQCRank(s.safetyData.FinalizedRank))
|
||||
// modify highest acknowledged rank in a way that it's definitely bigger than the newest QC rank
|
||||
s.safetyData.LatestAcknowledgedRank = newestQC.GetRank() + 10
|
||||
|
||||
timeout, err := s.safety.ProduceTimeout(newestQC.GetRank()+1, newestQC, nil)
|
||||
require.Error(s.T(), err)
|
||||
require.Nil(s.T(), timeout)
|
||||
})
|
||||
|
||||
s.signer.AssertNotCalled(s.T(), "CreateTimeout")
|
||||
s.signer.AssertNotCalled(s.T(), "PutConsensusState")
|
||||
}
|
||||
|
||||
// TestProduceTimeout_CreateTimeoutException tests that no timeout is created if timeout creation raised an exception
|
||||
func (s *SafetyRulesTestSuite) TestProduceTimeout_CreateTimeoutException() {
|
||||
rank := s.proposal.State.Rank
|
||||
newestQC := helper.MakeQC(helper.WithQCRank(rank - 1))
|
||||
|
||||
exception := errors.New("create-timeout-exception")
|
||||
s.signer.On("CreateTimeout", rank, newestQC, nil).Return(nil, exception).Once()
|
||||
vote, err := s.safety.ProduceTimeout(rank, newestQC, nil)
|
||||
require.Nil(s.T(), vote)
|
||||
require.ErrorIs(s.T(), err, exception)
|
||||
require.False(s.T(), models.IsNoVoteError(err))
|
||||
s.persister.AssertNotCalled(s.T(), "PutConsensusState")
|
||||
}
|
||||
|
||||
// TestProduceTimeout_PersistStateException tests that no timeout is created if persisting state failed
|
||||
func (s *SafetyRulesTestSuite) TestProduceTimeout_PersistStateException() {
|
||||
exception := errors.New("persister-exception")
|
||||
s.persister.On("PutConsensusState", mock.Anything).Return(exception)
|
||||
|
||||
rank := s.proposal.State.Rank
|
||||
newestQC := helper.MakeQC(helper.WithQCRank(rank - 1))
|
||||
expectedTimeout := &models.TimeoutState[*helper.TestVote]{
|
||||
Rank: rank,
|
||||
LatestQuorumCertificate: newestQC,
|
||||
}
|
||||
|
||||
s.signer.On("CreateTimeout", rank, newestQC, nil).Return(expectedTimeout, nil).Once()
|
||||
timeout, err := s.safety.ProduceTimeout(rank, newestQC, nil)
|
||||
require.Nil(s.T(), timeout)
|
||||
require.ErrorIs(s.T(), err, exception)
|
||||
}
|
||||
|
||||
// TestProduceTimeout_AfterVote tests a case where we first produce a vote and then try to timeout
|
||||
// for same rank. This behavior is expected and should result in valid timeout without any errors.
|
||||
func (s *SafetyRulesTestSuite) TestProduceTimeout_AfterVote() {
|
||||
expectedVote := makeVote(s.proposal.State)
|
||||
s.signer.On("CreateVote", s.proposal.State).Return(&expectedVote, nil).Once()
|
||||
s.persister.On("PutConsensusState", mock.Anything).Return(nil).Times(2)
|
||||
|
||||
rank := s.proposal.State.Rank
|
||||
|
||||
// first produce vote, then try to timeout
|
||||
vote, err := s.safety.ProduceVote(s.proposal, rank)
|
||||
require.NoError(s.T(), err)
|
||||
require.NotNil(s.T(), vote)
|
||||
|
||||
newestQC := helper.MakeQC(helper.WithQCRank(rank - 1))
|
||||
|
||||
expectedTimeout := &models.TimeoutState[*helper.TestVote]{
|
||||
Rank: rank,
|
||||
LatestQuorumCertificate: newestQC,
|
||||
}
|
||||
|
||||
s.signer.On("CreateTimeout", rank, newestQC, nil).Return(expectedTimeout, nil).Once()
|
||||
|
||||
// timing out for same rank should be possible
|
||||
timeout, err := s.safety.ProduceTimeout(rank, newestQC, nil)
|
||||
require.NoError(s.T(), err)
|
||||
require.NotNil(s.T(), timeout)
|
||||
|
||||
s.persister.AssertExpectations(s.T())
|
||||
s.signer.AssertExpectations(s.T())
|
||||
}
|
||||
|
||||
// TestProduceTimeout_InvalidProposerIdentity tests that no timeout is created if there was an exception retrieving proposer identity
|
||||
// We are specifically testing that unexpected errors are handled correctly, i.e.
|
||||
// that SafetyRules does not erroneously wrap unexpected exceptions into the expected models.NoTimeoutError.
|
||||
func (s *SafetyRulesTestSuite) TestProduceTimeout_InvalidProposerIdentity() {
|
||||
rank := s.proposal.State.Rank
|
||||
newestQC := helper.MakeQC(helper.WithQCRank(rank - 1))
|
||||
*s.committee = mocks.DynamicCommittee{}
|
||||
exception := errors.New("invalid-signer-identity")
|
||||
s.committee.On("IdentityByRank", rank, s.ourIdentity).Return(nil, exception).Once()
|
||||
s.committee.On("Self").Return(s.ourIdentity)
|
||||
|
||||
timeout, err := s.safety.ProduceTimeout(rank, newestQC, nil)
|
||||
require.Nil(s.T(), timeout)
|
||||
require.ErrorIs(s.T(), err, exception)
|
||||
require.False(s.T(), models.IsNoTimeoutError(err))
|
||||
s.persister.AssertNotCalled(s.T(), "PutConsensusState")
|
||||
}
|
||||
|
||||
// TestProduceTimeout_NodeEjected tests that no timeout is created if the replica is not authorized to create timeout.
|
||||
// Nodes have zero weight in the grace periods around the epochs where they are authorized to participate.
|
||||
// We don't want zero-weight nodes to participate in the first place, to avoid unnecessary traffic.
|
||||
// Note: this also covers ejected nodes. In both cases, the committee will return an `InvalidSignerError`.
|
||||
func (s *SafetyRulesTestSuite) TestProduceTimeout_NodeEjected() {
|
||||
rank := s.proposal.State.Rank
|
||||
newestQC := helper.MakeQC(helper.WithQCRank(rank - 1))
|
||||
*s.committee = mocks.DynamicCommittee{}
|
||||
s.committee.On("Self").Return(s.ourIdentity)
|
||||
s.committee.On("IdentityByRank", rank, s.ourIdentity).Return(nil, models.NewInvalidSignerErrorf("")).Maybe()
|
||||
|
||||
timeout, err := s.safety.ProduceTimeout(rank, newestQC, nil)
|
||||
require.Nil(s.T(), timeout)
|
||||
require.True(s.T(), models.IsNoTimeoutError(err))
|
||||
s.persister.AssertNotCalled(s.T(), "PutConsensusState")
|
||||
}
|
||||
|
||||
// TestSignOwnProposal tests a happy path scenario where leader can sign their own proposal.
|
||||
func (s *SafetyRulesTestSuite) TestSignOwnProposal() {
|
||||
s.proposal.State.ProposerID = s.ourIdentity
|
||||
expectedSafetyData := &models.ConsensusState[*helper.TestVote]{
|
||||
FinalizedRank: s.proposal.State.ParentQuorumCertificate.GetRank(),
|
||||
LatestAcknowledgedRank: s.proposal.State.Rank,
|
||||
}
|
||||
expectedVote := makeVote(s.proposal.State)
|
||||
s.committee.On("LeaderForRank").Unset()
|
||||
s.committee.On("LeaderForRank", s.proposal.State.Rank).Return(s.ourIdentity, nil).Once()
|
||||
s.signer.On("CreateVote", s.proposal.State).Return(&expectedVote, nil).Once()
|
||||
s.persister.On("PutConsensusState", expectedSafetyData).Return(nil).Once()
|
||||
vote, err := s.safety.SignOwnProposal(&s.proposal.Proposal)
|
||||
require.NoError(s.T(), err)
|
||||
require.Equal(s.T(), vote, &expectedVote)
|
||||
}
|
||||
|
||||
// TestSignOwnProposal_ProposalNotSelf tests that we cannot sign a proposal that is not ours. We
|
||||
// verify that SafetyRules returns an exception and not the benign sentinel error NoVoteError.
|
||||
func (s *SafetyRulesTestSuite) TestSignOwnProposal_ProposalNotSelf() {
|
||||
vote, err := s.safety.SignOwnProposal(&s.proposal.Proposal)
|
||||
require.Error(s.T(), err)
|
||||
require.False(s.T(), models.IsNoVoteError(err))
|
||||
require.Nil(s.T(), vote)
|
||||
}
|
||||
|
||||
// TestSignOwnProposal_SelfInvalidLeader tests that we cannot sign a proposal if we are not the leader for the rank.
|
||||
// We verify that SafetyRules returns and exception and does not the benign sentinel error NoVoteError.
|
||||
func (s *SafetyRulesTestSuite) TestSignOwnProposal_SelfInvalidLeader() {
|
||||
s.proposal.State.ProposerID = s.ourIdentity
|
||||
otherID := helper.MakeIdentity()
|
||||
require.NotEqual(s.T(), otherID, s.ourIdentity)
|
||||
s.committee.On("LeaderForRank").Unset()
|
||||
s.committee.On("LeaderForRank", s.proposal.State.Rank).Return(otherID, nil).Once()
|
||||
vote, err := s.safety.SignOwnProposal(&s.proposal.Proposal)
|
||||
require.Error(s.T(), err)
|
||||
require.False(s.T(), models.IsNoVoteError(err))
|
||||
require.Nil(s.T(), vote)
|
||||
}
|
||||
|
||||
// TestSignOwnProposal_ProposalEquivocation verifies that SafetyRules will refuse to sign multiple proposals for the same rank.
|
||||
// We require that leader complies with the following next rules:
|
||||
// - leader proposes once per rank
|
||||
// - leader's proposals follow safety rules
|
||||
//
|
||||
// Signing repeatedly for one rank (either proposals or voting) can lead to equivocating (byzantine behavior).
|
||||
// Expect a `models.NoVoteError` sentinel in such scenario.
|
||||
func (s *SafetyRulesTestSuite) TestSignOwnProposal_ProposalEquivocation() {
|
||||
s.proposal.State.ProposerID = s.ourIdentity
|
||||
expectedSafetyData := &models.ConsensusState[*helper.TestVote]{
|
||||
FinalizedRank: s.proposal.State.ParentQuorumCertificate.GetRank(),
|
||||
LatestAcknowledgedRank: s.proposal.State.Rank,
|
||||
}
|
||||
expectedVote := makeVote(s.proposal.State)
|
||||
s.committee.On("LeaderForRank").Unset()
|
||||
s.committee.On("LeaderForRank", s.proposal.State.Rank).Return(s.ourIdentity, nil).Once()
|
||||
s.signer.On("CreateVote", s.proposal.State).Return(&expectedVote, nil).Once()
|
||||
s.persister.On("PutConsensusState", expectedSafetyData).Return(nil).Once()
|
||||
|
||||
vote, err := s.safety.SignOwnProposal(&s.proposal.Proposal)
|
||||
require.NoError(s.T(), err)
|
||||
require.Equal(s.T(), &expectedVote, vote)
|
||||
|
||||
// signing same proposal again should return an error since we have already created a proposal for this rank
|
||||
vote, err = s.safety.SignOwnProposal(&s.proposal.Proposal)
|
||||
require.Error(s.T(), err)
|
||||
require.True(s.T(), models.IsNoVoteError(err))
|
||||
require.Nil(s.T(), vote)
|
||||
|
||||
// voting for same rank should also return an error since we have already proposed
|
||||
vote, err = s.safety.ProduceVote(s.proposal, s.proposal.State.Rank)
|
||||
require.Error(s.T(), err)
|
||||
require.True(s.T(), models.IsNoVoteError(err))
|
||||
require.Nil(s.T(), vote)
|
||||
}
|
||||
|
||||
func makeVote(state *models.State[*helper.TestState]) *helper.TestVote {
|
||||
return &helper.TestVote{
|
||||
StateID: state.Identifier,
|
||||
Rank: state.Rank,
|
||||
ID: helper.MakeIdentity(),
|
||||
}
|
||||
}
|
||||
@ -29,7 +29,7 @@ var _ consensus.StateSignerDecoder[*nilUnique] = (*StateSignerDecoder[*nilUnique
|
||||
// validity of parent state. Consequently, the returned IdentifierList contains
|
||||
// the consensus participants that signed the parent state. Expected Error
|
||||
// returns during normal operations:
|
||||
// - signature.InvalidSignerIndicesError if signer indices included in the
|
||||
// - consensus.InvalidSignerIndicesError if signer indices included in the
|
||||
// state do not encode a valid subset of the consensus committee
|
||||
// - state.ErrUnknownSnapshotReference if the input state is not a known
|
||||
// incorporated state.
|
||||
|
||||
@ -17,11 +17,11 @@ type signerInfo struct {
|
||||
}
|
||||
|
||||
// WeightedSignatureAggregator implements consensus.WeightedSignatureAggregator.
|
||||
// It is a wrapper around signature.SignatureAggregatorSameMessage, which
|
||||
// It is a wrapper around consensus.SignatureAggregatorSameMessage, which
|
||||
// implements a mapping from node IDs (as used by HotStuff) to index-based
|
||||
// addressing of authorized signers (as used by SignatureAggregatorSameMessage).
|
||||
//
|
||||
// Similarly to module/signature.SignatureAggregatorSameMessage, this module
|
||||
// Similarly to module/consensus.SignatureAggregatorSameMessage, this module
|
||||
// assumes proofs of possession (PoP) of all identity public keys are valid.
|
||||
type WeightedSignatureAggregator struct {
|
||||
aggregator consensus.SignatureAggregator
|
||||
@ -166,7 +166,7 @@ func (w *WeightedSignatureAggregator) TotalWeight() uint64 {
|
||||
return w.totalWeight
|
||||
}
|
||||
|
||||
// Aggregate aggregates the signatures and returns the aggregated signature.
|
||||
// Aggregate aggregates the signatures and returns the aggregated consensus.
|
||||
// The function performs a final verification and errors if the aggregated
|
||||
// signature is invalid. This is required for the function safety since
|
||||
// `TrustedAdd` allows adding invalid signatures. The function errors with:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
133
consensus/timeoutaggregator/timeout_aggregator_test.go
Normal file
133
consensus/timeoutaggregator/timeout_aggregator_test.go
Normal file
@ -0,0 +1,133 @@
|
||||
package timeoutaggregator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"go.uber.org/atomic"
|
||||
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/mocks"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
|
||||
)
|
||||
|
||||
func TestTimeoutAggregator(t *testing.T) {
|
||||
suite.Run(t, new(TimeoutAggregatorTestSuite))
|
||||
}
|
||||
|
||||
// TimeoutAggregatorTestSuite is a test suite for isolated testing of TimeoutAggregator.
|
||||
// Contains mocked state which is used to verify correct behavior of TimeoutAggregator.
|
||||
// Automatically starts and stops module.Startable in SetupTest and TearDownTest respectively.
|
||||
type TimeoutAggregatorTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
lowestRetainedRank uint64
|
||||
highestKnownRank uint64
|
||||
aggregator *TimeoutAggregator[*helper.TestVote]
|
||||
collectors *mocks.TimeoutCollectors[*helper.TestVote]
|
||||
stopAggregator context.CancelFunc
|
||||
}
|
||||
|
||||
func (s *TimeoutAggregatorTestSuite) SetupTest() {
|
||||
var err error
|
||||
s.collectors = mocks.NewTimeoutCollectors[*helper.TestVote](s.T())
|
||||
|
||||
s.lowestRetainedRank = 100
|
||||
|
||||
s.aggregator, err = NewTimeoutAggregator(
|
||||
helper.Logger(),
|
||||
s.lowestRetainedRank,
|
||||
s.collectors,
|
||||
)
|
||||
require.NoError(s.T(), err)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
signalerCtx := ctx
|
||||
s.stopAggregator = cancel
|
||||
s.aggregator.Start(signalerCtx)
|
||||
}
|
||||
|
||||
func (s *TimeoutAggregatorTestSuite) TearDownTest() {
|
||||
s.stopAggregator()
|
||||
}
|
||||
|
||||
// TestAddTimeout_HappyPath tests a happy path when multiple threads are adding timeouts for processing
|
||||
// Eventually every timeout has to be processed by TimeoutCollector
|
||||
func (s *TimeoutAggregatorTestSuite) TestAddTimeout_HappyPath() {
|
||||
timeoutsCount := 20
|
||||
collector := mocks.NewTimeoutCollector[*helper.TestVote](s.T())
|
||||
callCount := atomic.NewUint64(0)
|
||||
collector.On("AddTimeout", mock.Anything).Run(func(mock.Arguments) {
|
||||
callCount.Add(1)
|
||||
}).Return(nil).Times(timeoutsCount)
|
||||
s.collectors.On("GetOrCreateCollector", s.lowestRetainedRank).Return(collector, true, nil).Times(timeoutsCount)
|
||||
|
||||
var start sync.WaitGroup
|
||||
start.Add(timeoutsCount)
|
||||
for i := 0; i < timeoutsCount; i++ {
|
||||
go func() {
|
||||
timeout := helper.TimeoutStateFixture[*helper.TestVote](helper.WithTimeoutStateRank[*helper.TestVote](s.lowestRetainedRank))
|
||||
|
||||
start.Done()
|
||||
// Wait for last worker routine to signal ready. Then,
|
||||
// feed all timeouts into cache
|
||||
start.Wait()
|
||||
|
||||
s.aggregator.AddTimeout(timeout)
|
||||
}()
|
||||
}
|
||||
|
||||
start.Wait()
|
||||
|
||||
require.Eventually(s.T(), func() bool {
|
||||
return callCount.Load() == uint64(timeoutsCount)
|
||||
}, time.Second, time.Millisecond*20)
|
||||
}
|
||||
|
||||
// TestAddTimeout_EpochUnknown tests if timeout states targeting unknown epoch should be ignored
|
||||
func (s *TimeoutAggregatorTestSuite) TestAddTimeout_EpochUnknown() {
|
||||
timeout := helper.TimeoutStateFixture(helper.WithTimeoutStateRank[*helper.TestVote](s.lowestRetainedRank))
|
||||
*s.collectors = *mocks.NewTimeoutCollectors[*helper.TestVote](s.T())
|
||||
done := make(chan struct{})
|
||||
s.collectors.On("GetOrCreateCollector", timeout.Rank).Return(nil, false, models.ErrRankUnknown).Run(func(args mock.Arguments) {
|
||||
close(done)
|
||||
}).Once()
|
||||
s.aggregator.AddTimeout(timeout)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
// TestPruneUpToRank tests that pruning removes collectors lower that retained rank
|
||||
func (s *TimeoutAggregatorTestSuite) TestPruneUpToRank() {
|
||||
s.collectors.On("PruneUpToRank", s.lowestRetainedRank+1).Once()
|
||||
s.aggregator.PruneUpToRank(s.lowestRetainedRank + 1)
|
||||
}
|
||||
|
||||
// TestOnQuorumCertificateTriggeredRankChange tests if entering rank event gets processed when send through `TimeoutAggregator`.
|
||||
// Tests the whole processing pipeline.
|
||||
func (s *TimeoutAggregatorTestSuite) TestOnQuorumCertificateTriggeredRankChange() {
|
||||
done := make(chan struct{})
|
||||
s.collectors.On("PruneUpToRank", s.lowestRetainedRank+1).Run(func(args mock.Arguments) {
|
||||
close(done)
|
||||
}).Once()
|
||||
qc := helper.MakeQC(helper.WithQCRank(s.lowestRetainedRank))
|
||||
s.aggregator.OnRankChange(qc.GetRank(), qc.GetRank()+1)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
// TestOnTimeoutCertificateTriggeredRankChange tests if entering rank event gets processed when send through `TimeoutAggregator`.
|
||||
// Tests the whole processing pipeline.
|
||||
func (s *TimeoutAggregatorTestSuite) TestOnTimeoutCertificateTriggeredRankChange() {
|
||||
rank := s.lowestRetainedRank + 1
|
||||
done := make(chan struct{})
|
||||
s.collectors.On("PruneUpToRank", rank).Run(func(args mock.Arguments) {
|
||||
close(done)
|
||||
}).Once()
|
||||
tc := helper.MakeTC(helper.WithTCRank(s.lowestRetainedRank))
|
||||
s.aggregator.OnRankChange(tc.GetRank(), tc.GetRank()+1)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
176
consensus/timeoutaggregator/timeout_collectors_test.go
Normal file
176
consensus/timeoutaggregator/timeout_collectors_test.go
Normal file
@ -0,0 +1,176 @@
|
||||
package timeoutaggregator
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/gammazero/workerpool"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"go.uber.org/atomic"
|
||||
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/mocks"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
|
||||
)
|
||||
|
||||
var factoryError = errors.New("factory error")
|
||||
|
||||
func TestTimeoutCollectors(t *testing.T) {
|
||||
suite.Run(t, new(TimeoutCollectorsTestSuite))
|
||||
}
|
||||
|
||||
// TimeoutCollectorsTestSuite is a test suite for isolated testing of TimeoutCollectors.
|
||||
// Contains helper methods and mocked state which is used to verify correct behavior of TimeoutCollectors.
|
||||
type TimeoutCollectorsTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
mockedCollectors map[uint64]*mocks.TimeoutCollector[*helper.TestVote]
|
||||
factoryMethod *mocks.TimeoutCollectorFactory[*helper.TestVote]
|
||||
collectors *TimeoutCollectors[*helper.TestVote]
|
||||
lowestRank uint64
|
||||
workerPool *workerpool.WorkerPool
|
||||
}
|
||||
|
||||
func (s *TimeoutCollectorsTestSuite) SetupTest() {
|
||||
s.lowestRank = 1000
|
||||
s.mockedCollectors = make(map[uint64]*mocks.TimeoutCollector[*helper.TestVote])
|
||||
s.workerPool = workerpool.New(2)
|
||||
s.factoryMethod = mocks.NewTimeoutCollectorFactory[*helper.TestVote](s.T())
|
||||
s.factoryMethod.On("Create", mock.Anything).Return(func(rank uint64) consensus.TimeoutCollector[*helper.TestVote] {
|
||||
if collector, found := s.mockedCollectors[rank]; found {
|
||||
return collector
|
||||
}
|
||||
return nil
|
||||
}, func(rank uint64) error {
|
||||
if _, found := s.mockedCollectors[rank]; found {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("mocked collector %v not found: %w", rank, factoryError)
|
||||
}).Maybe()
|
||||
s.collectors = NewTimeoutCollectors(helper.Logger(), s.lowestRank, s.factoryMethod)
|
||||
}
|
||||
|
||||
func (s *TimeoutCollectorsTestSuite) TearDownTest() {
|
||||
s.workerPool.StopWait()
|
||||
}
|
||||
|
||||
// prepareMockedCollector prepares a mocked collector and stores it in map, later it will be used
|
||||
// to mock behavior of timeout collectors.
|
||||
func (s *TimeoutCollectorsTestSuite) prepareMockedCollector(rank uint64) *mocks.TimeoutCollector[*helper.TestVote] {
|
||||
collector := mocks.NewTimeoutCollector[*helper.TestVote](s.T())
|
||||
collector.On("Rank").Return(rank).Maybe()
|
||||
s.mockedCollectors[rank] = collector
|
||||
return collector
|
||||
}
|
||||
|
||||
// TestGetOrCreateCollector_RankLowerThanLowest tests a scenario where caller tries to create a collector with rank
|
||||
// lower than already pruned one. This should result in sentinel error `BelowPrunedThresholdError`
|
||||
func (s *TimeoutCollectorsTestSuite) TestGetOrCreateCollector_RankLowerThanLowest() {
|
||||
collector, created, err := s.collectors.GetOrCreateCollector(s.lowestRank - 10)
|
||||
require.Nil(s.T(), collector)
|
||||
require.False(s.T(), created)
|
||||
require.Error(s.T(), err)
|
||||
require.True(s.T(), models.IsBelowPrunedThresholdError(err))
|
||||
}
|
||||
|
||||
// TestGetOrCreateCollector_UnknownEpoch tests a scenario where caller tries to create a collector with rank referring epoch
|
||||
// that we don't know about. This should result in sentinel error `
|
||||
func (s *TimeoutCollectorsTestSuite) TestGetOrCreateCollector_UnknownEpoch() {
|
||||
*s.factoryMethod = *mocks.NewTimeoutCollectorFactory[*helper.TestVote](s.T())
|
||||
s.factoryMethod.On("Create", mock.Anything).Return(nil, models.ErrRankUnknown)
|
||||
collector, created, err := s.collectors.GetOrCreateCollector(s.lowestRank + 100)
|
||||
require.Nil(s.T(), collector)
|
||||
require.False(s.T(), created)
|
||||
require.ErrorIs(s.T(), err, models.ErrRankUnknown)
|
||||
}
|
||||
|
||||
// TestGetOrCreateCollector_ValidCollector tests a happy path scenario where we try first to create and then retrieve cached collector.
|
||||
func (s *TimeoutCollectorsTestSuite) TestGetOrCreateCollector_ValidCollector() {
|
||||
rank := s.lowestRank + 10
|
||||
s.prepareMockedCollector(rank)
|
||||
collector, created, err := s.collectors.GetOrCreateCollector(rank)
|
||||
require.NoError(s.T(), err)
|
||||
require.True(s.T(), created)
|
||||
require.Equal(s.T(), rank, collector.Rank())
|
||||
|
||||
cached, cachedCreated, err := s.collectors.GetOrCreateCollector(rank)
|
||||
require.NoError(s.T(), err)
|
||||
require.False(s.T(), cachedCreated)
|
||||
require.Equal(s.T(), collector, cached)
|
||||
}
|
||||
|
||||
// TestGetOrCreateCollector_FactoryError tests that error from factory method is propagated to caller.
|
||||
func (s *TimeoutCollectorsTestSuite) TestGetOrCreateCollector_FactoryError() {
|
||||
// creating collector without calling prepareMockedCollector will yield factoryError.
|
||||
collector, created, err := s.collectors.GetOrCreateCollector(s.lowestRank + 10)
|
||||
require.Nil(s.T(), collector)
|
||||
require.False(s.T(), created)
|
||||
require.ErrorIs(s.T(), err, factoryError)
|
||||
}
|
||||
|
||||
// TestGetOrCreateCollectors_ConcurrentAccess tests that concurrently accessing of GetOrCreateCollector creates
|
||||
// only one collector and all other instances are retrieved from cache.
|
||||
func (s *TimeoutCollectorsTestSuite) TestGetOrCreateCollectors_ConcurrentAccess() {
|
||||
createdTimes := atomic.NewUint64(0)
|
||||
rank := s.lowestRank + 10
|
||||
s.prepareMockedCollector(rank)
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 10; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_, created, err := s.collectors.GetOrCreateCollector(rank)
|
||||
require.NoError(s.T(), err)
|
||||
if created {
|
||||
createdTimes.Add(1)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
require.Equal(s.T(), uint64(1), createdTimes.Load())
|
||||
}
|
||||
|
||||
// TestPruneUpToRank tests pruning removes item below pruning height and leaves unmodified other items.
|
||||
func (s *TimeoutCollectorsTestSuite) TestPruneUpToRank() {
|
||||
numberOfCollectors := uint64(10)
|
||||
prunedRanks := make([]uint64, 0)
|
||||
for i := uint64(0); i < numberOfCollectors; i++ {
|
||||
rank := s.lowestRank + i
|
||||
s.prepareMockedCollector(rank)
|
||||
_, _, err := s.collectors.GetOrCreateCollector(rank)
|
||||
require.NoError(s.T(), err)
|
||||
prunedRanks = append(prunedRanks, rank)
|
||||
}
|
||||
|
||||
pruningHeight := s.lowestRank + numberOfCollectors
|
||||
|
||||
expectedCollectors := make([]consensus.TimeoutCollector[*helper.TestVote], 0)
|
||||
for i := uint64(0); i < numberOfCollectors; i++ {
|
||||
rank := pruningHeight + i
|
||||
s.prepareMockedCollector(rank)
|
||||
collector, _, err := s.collectors.GetOrCreateCollector(rank)
|
||||
require.NoError(s.T(), err)
|
||||
expectedCollectors = append(expectedCollectors, collector)
|
||||
}
|
||||
|
||||
// after this operation collectors below pruning height should be pruned and everything higher
|
||||
// should be left unmodified
|
||||
s.collectors.PruneUpToRank(pruningHeight)
|
||||
|
||||
for _, prunedRank := range prunedRanks {
|
||||
_, _, err := s.collectors.GetOrCreateCollector(prunedRank)
|
||||
require.Error(s.T(), err)
|
||||
require.True(s.T(), models.IsBelowPrunedThresholdError(err))
|
||||
}
|
||||
|
||||
for _, collector := range expectedCollectors {
|
||||
cached, _, _ := s.collectors.GetOrCreateCollector(collector.Rank())
|
||||
require.Equal(s.T(), collector, cached)
|
||||
}
|
||||
}
|
||||
@ -177,7 +177,7 @@ func (a *TimeoutSignatureAggregator) Rank() uint64 {
|
||||
return a.rank
|
||||
}
|
||||
|
||||
// Aggregate aggregates the signatures and returns the aggregated signature.
|
||||
// Aggregate aggregates the signatures and returns the aggregated consensus.
|
||||
// The resulting aggregated signature is guaranteed to be valid, as all
|
||||
// individual signatures are pre-validated before their addition. Expected
|
||||
// errors during normal operations:
|
||||
|
||||
@ -64,6 +64,7 @@ type TimeoutProcessorFactory[
|
||||
committee consensus.Replicas
|
||||
notifier consensus.TimeoutCollectorConsumer[VoteT]
|
||||
validator consensus.Validator[StateT, VoteT]
|
||||
voting consensus.VotingProvider[StateT, VoteT, PeerIDT]
|
||||
domainSeparationTag []byte
|
||||
}
|
||||
|
||||
@ -127,6 +128,7 @@ func (f *TimeoutProcessorFactory[StateT, VoteT, PeerIDT]) Create(rank uint64) (
|
||||
f.validator,
|
||||
sigAggregator,
|
||||
f.notifier,
|
||||
f.voting,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
172
consensus/timeoutcollector/timeout_cache_test.go
Normal file
172
consensus/timeoutcollector/timeout_cache_test.go
Normal file
@ -0,0 +1,172 @@
|
||||
package timeoutcollector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
|
||||
)
|
||||
|
||||
// TestTimeoutStatesCache_Rank tests that Rank returns same value that was set by constructor
|
||||
func TestTimeoutStatesCache_Rank(t *testing.T) {
|
||||
rank := uint64(100)
|
||||
cache := NewTimeoutStatesCache[*helper.TestVote](rank)
|
||||
require.Equal(t, rank, cache.Rank())
|
||||
}
|
||||
|
||||
// TestTimeoutStatesCache_AddTimeoutStateRepeatedTimeout tests that AddTimeoutState skips duplicated timeouts
|
||||
func TestTimeoutStatesCache_AddTimeoutStateRepeatedTimeout(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
rank := uint64(100)
|
||||
cache := NewTimeoutStatesCache[*helper.TestVote](rank)
|
||||
timeout := helper.TimeoutStateFixture(
|
||||
helper.WithTimeoutStateRank[*helper.TestVote](rank),
|
||||
helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{
|
||||
ID: "1",
|
||||
Rank: rank,
|
||||
}),
|
||||
)
|
||||
|
||||
require.NoError(t, cache.AddTimeoutState(timeout))
|
||||
err := cache.AddTimeoutState(timeout)
|
||||
require.ErrorIs(t, err, ErrRepeatedTimeout)
|
||||
require.Len(t, cache.All(), 1)
|
||||
}
|
||||
|
||||
// TestTimeoutStatesCache_AddTimeoutStateIncompatibleRank tests that adding timeout with incompatible rank results in error
|
||||
func TestTimeoutStatesCache_AddTimeoutStateIncompatibleRank(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
rank := uint64(100)
|
||||
cache := NewTimeoutStatesCache[*helper.TestVote](rank)
|
||||
timeout := helper.TimeoutStateFixture(
|
||||
helper.WithTimeoutStateRank[*helper.TestVote](rank+1),
|
||||
helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{
|
||||
ID: "1",
|
||||
Rank: rank,
|
||||
}),
|
||||
)
|
||||
err := cache.AddTimeoutState(timeout)
|
||||
require.ErrorIs(t, err, ErrTimeoutForIncompatibleRank)
|
||||
}
|
||||
|
||||
// TestTimeoutStatesCache_GetTimeout tests that GetTimeout method returns the first added timeout
|
||||
// for a given signer, if any timeout has been added.
|
||||
func TestTimeoutStatesCache_GetTimeout(t *testing.T) {
|
||||
rank := uint64(100)
|
||||
knownTimeout := helper.TimeoutStateFixture(
|
||||
helper.WithTimeoutStateRank[*helper.TestVote](rank),
|
||||
helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{
|
||||
ID: "1",
|
||||
Rank: rank,
|
||||
}),
|
||||
)
|
||||
doubleTimeout := helper.TimeoutStateFixture(
|
||||
helper.WithTimeoutStateRank[*helper.TestVote](rank),
|
||||
helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{
|
||||
ID: "1",
|
||||
Rank: rank,
|
||||
}),
|
||||
)
|
||||
|
||||
cache := NewTimeoutStatesCache[*helper.TestVote](rank)
|
||||
|
||||
// unknown timeout
|
||||
timeout, found := cache.GetTimeoutState(helper.MakeIdentity())
|
||||
require.Nil(t, timeout)
|
||||
require.False(t, found)
|
||||
|
||||
// known timeout
|
||||
err := cache.AddTimeoutState(knownTimeout)
|
||||
require.NoError(t, err)
|
||||
timeout, found = cache.GetTimeoutState((*knownTimeout.Vote).ID)
|
||||
require.Equal(t, knownTimeout, timeout)
|
||||
require.True(t, found)
|
||||
|
||||
// for a signer ID with a known timeout, the cache should memorize the _first_ encountered timeout
|
||||
err = cache.AddTimeoutState(doubleTimeout)
|
||||
require.True(t, models.IsDoubleTimeoutError[*helper.TestVote](err))
|
||||
timeout, found = cache.GetTimeoutState((*doubleTimeout.Vote).ID)
|
||||
require.Equal(t, knownTimeout, timeout)
|
||||
require.True(t, found)
|
||||
}
|
||||
|
||||
// TestTimeoutStatesCache_All tests that All returns previously added timeouts.
|
||||
func TestTimeoutStatesCache_All(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
rank := uint64(100)
|
||||
cache := NewTimeoutStatesCache[*helper.TestVote](rank)
|
||||
expectedTimeouts := make([]*models.TimeoutState[*helper.TestVote], 5)
|
||||
for i := range expectedTimeouts {
|
||||
timeout := helper.TimeoutStateFixture(
|
||||
helper.WithTimeoutStateRank[*helper.TestVote](rank),
|
||||
helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{
|
||||
ID: fmt.Sprintf("%d", i),
|
||||
Rank: rank,
|
||||
}),
|
||||
)
|
||||
expectedTimeouts[i] = timeout
|
||||
require.NoError(t, cache.AddTimeoutState(timeout))
|
||||
}
|
||||
require.ElementsMatch(t, expectedTimeouts, cache.All())
|
||||
}
|
||||
|
||||
// BenchmarkAdd measured the time it takes to add `numberTimeouts` concurrently to the TimeoutStatesCache.
|
||||
// On MacBook with Intel i7-7820HQ CPU @ 2.90GHz:
|
||||
// adding 1 million timeouts in total, with 20 threads concurrently, took 0.48s
|
||||
func BenchmarkAdd(b *testing.B) {
|
||||
numberTimeouts := 1_000_000
|
||||
threads := 20
|
||||
|
||||
// Setup: create worker routines and timeouts to feed
|
||||
rank := uint64(10)
|
||||
cache := NewTimeoutStatesCache[*helper.TestVote](rank)
|
||||
|
||||
var start sync.WaitGroup
|
||||
start.Add(threads)
|
||||
var done sync.WaitGroup
|
||||
done.Add(threads)
|
||||
|
||||
n := numberTimeouts / threads
|
||||
|
||||
for ; threads > 0; threads-- {
|
||||
go func(i int) {
|
||||
// create timeouts and signal ready
|
||||
timeouts := make([]models.TimeoutState[*helper.TestVote], 0, n)
|
||||
for len(timeouts) < n {
|
||||
t := helper.TimeoutStateFixture(
|
||||
helper.WithTimeoutStateRank[*helper.TestVote](rank),
|
||||
helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{
|
||||
ID: helper.MakeIdentity(),
|
||||
Rank: rank,
|
||||
}),
|
||||
)
|
||||
timeouts = append(timeouts, *t)
|
||||
}
|
||||
|
||||
start.Done()
|
||||
|
||||
// Wait for last worker routine to signal ready. Then,
|
||||
// feed all timeouts into cache
|
||||
start.Wait()
|
||||
|
||||
for _, v := range timeouts {
|
||||
err := cache.AddTimeoutState(&v)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
done.Done()
|
||||
}(threads)
|
||||
}
|
||||
start.Wait()
|
||||
t1 := time.Now()
|
||||
done.Wait()
|
||||
duration := time.Since(t1)
|
||||
fmt.Printf("=> adding %d timeouts to Cache took %f seconds\n", cache.Size(), duration.Seconds())
|
||||
}
|
||||
230
consensus/timeoutcollector/timeout_collector_test.go
Normal file
230
consensus/timeoutcollector/timeout_collector_test.go
Normal file
@ -0,0 +1,230 @@
|
||||
package timeoutcollector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/mocks"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
|
||||
)
|
||||
|
||||
func TestTimeoutCollector(t *testing.T) {
|
||||
suite.Run(t, new(TimeoutCollectorTestSuite))
|
||||
}
|
||||
|
||||
// TimeoutCollectorTestSuite is a test suite for testing TimeoutCollector. It stores mocked
|
||||
// state internally for testing behavior.
|
||||
type TimeoutCollectorTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
rank uint64
|
||||
notifier *mocks.TimeoutAggregationConsumer[*helper.TestVote]
|
||||
processor *mocks.TimeoutProcessor[*helper.TestVote]
|
||||
collector *TimeoutCollector[*helper.TestVote]
|
||||
}
|
||||
|
||||
func (s *TimeoutCollectorTestSuite) SetupTest() {
|
||||
s.rank = 1000
|
||||
s.notifier = mocks.NewTimeoutAggregationConsumer[*helper.TestVote](s.T())
|
||||
s.processor = mocks.NewTimeoutProcessor[*helper.TestVote](s.T())
|
||||
|
||||
s.notifier.On("OnNewQuorumCertificateDiscovered", mock.Anything).Maybe()
|
||||
s.notifier.On("OnNewTimeoutCertificateDiscovered", mock.Anything).Maybe()
|
||||
|
||||
s.collector = NewTimeoutCollector(helper.Logger(), s.rank, s.notifier, s.processor)
|
||||
}
|
||||
|
||||
// TestRank tests that `Rank` returns the same value that was passed in constructor
|
||||
func (s *TimeoutCollectorTestSuite) TestRank() {
|
||||
require.Equal(s.T(), s.rank, s.collector.Rank())
|
||||
}
|
||||
|
||||
// TestAddTimeout_HappyPath tests that process in happy path executed by multiple workers deliver expected results
|
||||
// all operations should be successful, no errors expected
|
||||
func (s *TimeoutCollectorTestSuite) TestAddTimeout_HappyPath() {
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 20; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
timeout := helper.TimeoutStateFixture(
|
||||
helper.WithTimeoutStateRank[*helper.TestVote](s.rank),
|
||||
helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{
|
||||
ID: helper.MakeIdentity(),
|
||||
Rank: s.rank,
|
||||
}),
|
||||
)
|
||||
s.notifier.On("OnTimeoutProcessed", timeout).Once()
|
||||
s.processor.On("Process", timeout).Return(nil).Once()
|
||||
err := s.collector.AddTimeout(timeout)
|
||||
require.NoError(s.T(), err)
|
||||
}()
|
||||
}
|
||||
|
||||
s.processor.AssertExpectations(s.T())
|
||||
}
|
||||
|
||||
// TestAddTimeout_DoubleTimeout tests that submitting two different timeouts for same rank ends with reporting
|
||||
// double timeout to notifier which can be slashed later.
|
||||
func (s *TimeoutCollectorTestSuite) TestAddTimeout_DoubleTimeout() {
|
||||
timeout := helper.TimeoutStateFixture(
|
||||
helper.WithTimeoutStateRank[*helper.TestVote](s.rank),
|
||||
helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{
|
||||
ID: "1",
|
||||
Rank: s.rank,
|
||||
}),
|
||||
)
|
||||
s.notifier.On("OnTimeoutProcessed", timeout).Once()
|
||||
s.processor.On("Process", timeout).Return(nil).Once()
|
||||
err := s.collector.AddTimeout(timeout)
|
||||
require.NoError(s.T(), err)
|
||||
|
||||
otherTimeout := helper.TimeoutStateFixture(
|
||||
helper.WithTimeoutStateRank[*helper.TestVote](s.rank),
|
||||
helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{
|
||||
ID: "1",
|
||||
Rank: s.rank,
|
||||
}),
|
||||
)
|
||||
|
||||
s.notifier.On("OnDoubleTimeoutDetected", timeout, otherTimeout).Once()
|
||||
|
||||
err = s.collector.AddTimeout(otherTimeout)
|
||||
require.NoError(s.T(), err)
|
||||
s.notifier.AssertExpectations(s.T())
|
||||
s.processor.AssertNumberOfCalls(s.T(), "Process", 1)
|
||||
}
|
||||
|
||||
// TestAddTimeout_RepeatedTimeout checks that repeated timeouts are silently dropped without any errors.
|
||||
func (s *TimeoutCollectorTestSuite) TestAddTimeout_RepeatedTimeout() {
|
||||
timeout := helper.TimeoutStateFixture(
|
||||
helper.WithTimeoutStateRank[*helper.TestVote](s.rank),
|
||||
helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{
|
||||
ID: helper.MakeIdentity(),
|
||||
Rank: s.rank,
|
||||
}),
|
||||
)
|
||||
s.notifier.On("OnTimeoutProcessed", timeout).Once()
|
||||
s.processor.On("Process", timeout).Return(nil).Once()
|
||||
err := s.collector.AddTimeout(timeout)
|
||||
require.NoError(s.T(), err)
|
||||
err = s.collector.AddTimeout(timeout)
|
||||
require.NoError(s.T(), err)
|
||||
s.processor.AssertNumberOfCalls(s.T(), "Process", 1)
|
||||
}
|
||||
|
||||
// TestAddTimeout_TimeoutCacheException tests that submitting timeout state for rank which is not designated for this
|
||||
// collector results in ErrTimeoutForIncompatibleRank.
|
||||
func (s *TimeoutCollectorTestSuite) TestAddTimeout_TimeoutCacheException() {
|
||||
// incompatible rank is an exception and not handled by timeout collector
|
||||
timeout := helper.TimeoutStateFixture(
|
||||
helper.WithTimeoutStateRank[*helper.TestVote](s.rank+1),
|
||||
helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{
|
||||
ID: helper.MakeIdentity(),
|
||||
Rank: s.rank + 1,
|
||||
}),
|
||||
)
|
||||
err := s.collector.AddTimeout(timeout)
|
||||
require.ErrorIs(s.T(), err, ErrTimeoutForIncompatibleRank)
|
||||
s.processor.AssertNotCalled(s.T(), "Process")
|
||||
}
|
||||
|
||||
// TestAddTimeout_InvalidTimeout tests that sentinel errors while processing timeouts are correctly handled and reported
|
||||
// to notifier, but exceptions are propagated to caller.
|
||||
func (s *TimeoutCollectorTestSuite) TestAddTimeout_InvalidTimeout() {
|
||||
s.Run("invalid-timeout", func() {
|
||||
timeout := helper.TimeoutStateFixture(
|
||||
helper.WithTimeoutStateRank[*helper.TestVote](s.rank),
|
||||
helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{
|
||||
ID: helper.MakeIdentity(),
|
||||
Rank: s.rank,
|
||||
}),
|
||||
)
|
||||
s.processor.On("Process", timeout).Return(models.NewInvalidTimeoutErrorf(timeout, "")).Once()
|
||||
s.notifier.On("OnInvalidTimeoutDetected", mock.Anything).Run(func(args mock.Arguments) {
|
||||
invalidTimeoutErr := args.Get(0).(models.InvalidTimeoutError[*helper.TestVote])
|
||||
require.Equal(s.T(), timeout, invalidTimeoutErr.Timeout)
|
||||
}).Once()
|
||||
err := s.collector.AddTimeout(timeout)
|
||||
require.NoError(s.T(), err)
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
s.notifier.AssertCalled(s.T(), "OnInvalidTimeoutDetected", mock.Anything)
|
||||
})
|
||||
s.Run("process-exception", func() {
|
||||
exception := errors.New("invalid-signature")
|
||||
timeout := helper.TimeoutStateFixture(
|
||||
helper.WithTimeoutStateRank[*helper.TestVote](s.rank),
|
||||
helper.WithTimeoutVote[*helper.TestVote](&helper.TestVote{
|
||||
ID: helper.MakeIdentity(),
|
||||
Rank: s.rank,
|
||||
}),
|
||||
)
|
||||
s.processor.On("Process", timeout).Return(exception).Once()
|
||||
err := s.collector.AddTimeout(timeout)
|
||||
require.ErrorIs(s.T(), err, exception)
|
||||
})
|
||||
}
|
||||
|
||||
// TestAddTimeout_TONotifications tests that TimeoutCollector in happy path reports the newest discovered QC and TC
|
||||
func (s *TimeoutCollectorTestSuite) TestAddTimeout_TONotifications() {
|
||||
qcCount := 100
|
||||
// generate QCs with increasing rank numbers
|
||||
if s.rank < uint64(qcCount) {
|
||||
s.T().Fatal("invalid test configuration")
|
||||
}
|
||||
|
||||
*s.notifier = *mocks.NewTimeoutAggregationConsumer[*helper.TestVote](s.T())
|
||||
|
||||
var highestReportedQC models.QuorumCertificate
|
||||
s.notifier.On("OnNewQuorumCertificateDiscovered", mock.Anything).Run(func(args mock.Arguments) {
|
||||
qc := args.Get(0).(models.QuorumCertificate)
|
||||
if highestReportedQC == nil || highestReportedQC.GetRank() < qc.GetRank() {
|
||||
highestReportedQC = qc
|
||||
}
|
||||
})
|
||||
|
||||
previousRankTimeoutCert := helper.MakeTC(helper.WithTCRank(s.rank - 1))
|
||||
s.notifier.On("OnNewTimeoutCertificateDiscovered", previousRankTimeoutCert).Once()
|
||||
|
||||
timeouts := make([]*models.TimeoutState[*helper.TestVote], 0, qcCount)
|
||||
for i := 0; i < qcCount; i++ {
|
||||
qc := helper.MakeQC(helper.WithQCRank(uint64(i)))
|
||||
timeout := helper.TimeoutStateFixture(func(timeout *models.TimeoutState[*helper.TestVote]) {
|
||||
timeout.Rank = s.rank
|
||||
timeout.LatestQuorumCertificate = qc
|
||||
timeout.PriorRankTimeoutCertificate = previousRankTimeoutCert
|
||||
}, helper.WithTimeoutVote(&helper.TestVote{Rank: s.rank, ID: helper.MakeIdentity()}))
|
||||
timeouts = append(timeouts, timeout)
|
||||
s.notifier.On("OnTimeoutProcessed", timeout).Once()
|
||||
s.processor.On("Process", timeout).Return(nil).Once()
|
||||
}
|
||||
|
||||
expectedHighestQC := timeouts[len(timeouts)-1].LatestQuorumCertificate
|
||||
|
||||
// shuffle timeouts in random order
|
||||
rand.Shuffle(len(timeouts), func(i, j int) {
|
||||
timeouts[i], timeouts[j] = timeouts[j], timeouts[i]
|
||||
})
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(timeouts))
|
||||
for _, timeout := range timeouts {
|
||||
go func(timeout *models.TimeoutState[*helper.TestVote]) {
|
||||
defer wg.Done()
|
||||
err := s.collector.AddTimeout(timeout)
|
||||
require.NoError(s.T(), err)
|
||||
}(timeout)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
require.Equal(s.T(), expectedHighestQC, highestReportedQC)
|
||||
}
|
||||
@ -71,6 +71,7 @@ func NewTimeoutProcessor[
|
||||
validator consensus.Validator[StateT, VoteT],
|
||||
sigAggregator consensus.TimeoutSignatureAggregator,
|
||||
notifier consensus.TimeoutCollectorConsumer[VoteT],
|
||||
voting consensus.VotingProvider[StateT, VoteT, PeerIDT],
|
||||
) (*TimeoutProcessor[StateT, VoteT, PeerIDT], error) {
|
||||
rank := sigAggregator.Rank()
|
||||
qcThreshold, err := committee.QuorumThresholdForRank(rank)
|
||||
@ -105,6 +106,7 @@ func NewTimeoutProcessor[
|
||||
},
|
||||
sigAggregator: sigAggregator,
|
||||
newestQCTracker: tracker.NewNewestQCTracker(),
|
||||
voting: voting,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -159,7 +161,7 @@ func (p *TimeoutProcessor[StateT, VoteT, PeerIDT]) Process(
|
||||
p.newestQCTracker.Track(&timeout.LatestQuorumCertificate)
|
||||
|
||||
totalWeight, err := p.sigAggregator.VerifyAndAdd(
|
||||
(*timeout.Vote).Source(),
|
||||
(*timeout.Vote).Identity(),
|
||||
(*timeout.Vote).GetSignature(),
|
||||
timeout.LatestQuorumCertificate.GetRank(),
|
||||
)
|
||||
@ -309,7 +311,7 @@ func (p *TimeoutProcessor[StateT, VoteT, PeerIDT]) validateTimeout(
|
||||
// 3. If TC is included, it must be valid
|
||||
if timeout.PriorRankTimeoutCertificate != nil {
|
||||
err = p.validator.ValidateTimeoutCertificate(
|
||||
&timeout.PriorRankTimeoutCertificate,
|
||||
timeout.PriorRankTimeoutCertificate,
|
||||
)
|
||||
if err != nil {
|
||||
if models.IsInvalidTimeoutCertificateError(err) {
|
||||
|
||||
678
consensus/timeoutcollector/timeout_processor_test.go
Normal file
678
consensus/timeoutcollector/timeout_processor_test.go
Normal file
@ -0,0 +1,678 @@
|
||||
package timeoutcollector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"go.uber.org/atomic"
|
||||
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/helper"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/mocks"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/validator"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/verification"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/votecollector"
|
||||
)
|
||||
|
||||
func TestTimeoutProcessor(t *testing.T) {
|
||||
suite.Run(t, new(TimeoutProcessorTestSuite))
|
||||
}
|
||||
|
||||
// TimeoutProcessorTestSuite is a test suite that holds mocked state for isolated testing of TimeoutProcessor.
|
||||
type TimeoutProcessorTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
participants []models.WeightedIdentity
|
||||
signer models.WeightedIdentity
|
||||
rank uint64
|
||||
sigWeight uint64
|
||||
totalWeight atomic.Uint64
|
||||
committee *mocks.Replicas
|
||||
validator *mocks.Validator[*helper.TestState, *helper.TestVote]
|
||||
sigAggregator *mocks.TimeoutSignatureAggregator
|
||||
notifier *mocks.TimeoutCollectorConsumer[*helper.TestVote]
|
||||
processor *TimeoutProcessor[*helper.TestState, *helper.TestVote, *helper.TestPeer]
|
||||
voting *mocks.VotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer]
|
||||
}
|
||||
|
||||
func (s *TimeoutProcessorTestSuite) SetupTest() {
|
||||
var err error
|
||||
s.sigWeight = 1000
|
||||
s.committee = mocks.NewReplicas(s.T())
|
||||
s.validator = mocks.NewValidator[*helper.TestState, *helper.TestVote](s.T())
|
||||
s.sigAggregator = mocks.NewTimeoutSignatureAggregator(s.T())
|
||||
s.notifier = mocks.NewTimeoutCollectorConsumer[*helper.TestVote](s.T())
|
||||
s.participants = helper.WithWeightedIdentityList(11)
|
||||
s.signer = s.participants[0]
|
||||
s.rank = (uint64)(rand.Uint32() + 100)
|
||||
s.totalWeight = *atomic.NewUint64(0)
|
||||
s.voting = mocks.NewVotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer](s.T())
|
||||
|
||||
s.committee.On("QuorumThresholdForRank", mock.Anything).Return(uint64(8000), nil).Maybe()
|
||||
s.committee.On("TimeoutThresholdForRank", mock.Anything).Return(uint64(8000), nil).Maybe()
|
||||
s.committee.On("IdentityByEpoch", mock.Anything, mock.Anything).Return(s.signer, nil).Maybe()
|
||||
s.sigAggregator.On("Rank").Return(s.rank).Maybe()
|
||||
s.sigAggregator.On("VerifyAndAdd", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
|
||||
s.totalWeight.Add(s.sigWeight)
|
||||
}).Return(func(signerID models.Identity, sig []byte, newestQCRank uint64) uint64 {
|
||||
return s.totalWeight.Load()
|
||||
}, func(signerID models.Identity, sig []byte, newestQCRank uint64) error {
|
||||
return nil
|
||||
}).Maybe()
|
||||
s.sigAggregator.On("TotalWeight").Return(func() uint64 {
|
||||
return s.totalWeight.Load()
|
||||
}).Maybe()
|
||||
|
||||
s.processor, err = NewTimeoutProcessor[*helper.TestState, *helper.TestVote, *helper.TestPeer](
|
||||
helper.Logger(),
|
||||
s.committee,
|
||||
s.validator,
|
||||
s.sigAggregator,
|
||||
s.notifier,
|
||||
s.voting,
|
||||
)
|
||||
require.NoError(s.T(), err)
|
||||
}
|
||||
|
||||
// TimeoutLastRankSuccessfulFixture creates a valid timeout if last rank has ended with QC.
|
||||
func (s *TimeoutProcessorTestSuite) TimeoutLastRankSuccessfulFixture(opts ...func(*models.TimeoutState[*helper.TestVote])) *models.TimeoutState[*helper.TestVote] {
|
||||
timeout := helper.TimeoutStateFixture(
|
||||
helper.WithTimeoutStateRank[*helper.TestVote](s.rank),
|
||||
helper.WithTimeoutNewestQC[*helper.TestVote](helper.MakeQC(helper.WithQCRank(s.rank-1))),
|
||||
helper.WithTimeoutVote(&helper.TestVote{ID: helper.MakeIdentity(), Rank: s.rank}),
|
||||
helper.WithTimeoutPreviousRankTimeoutCertificate[*helper.TestVote](nil),
|
||||
)
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(timeout)
|
||||
}
|
||||
|
||||
return timeout
|
||||
}
|
||||
|
||||
// TimeoutLastRankFailedFixture creates a valid timeout if last rank has ended with TC.
|
||||
func (s *TimeoutProcessorTestSuite) TimeoutLastRankFailedFixture(opts ...func(*models.TimeoutState[*helper.TestVote])) *models.TimeoutState[*helper.TestVote] {
|
||||
newestQC := helper.MakeQC(helper.WithQCRank(s.rank - 10))
|
||||
timeout := helper.TimeoutStateFixture(
|
||||
helper.WithTimeoutStateRank[*helper.TestVote](s.rank),
|
||||
helper.WithTimeoutNewestQC[*helper.TestVote](newestQC),
|
||||
helper.WithTimeoutVote(&helper.TestVote{ID: helper.MakeIdentity(), Rank: s.rank}),
|
||||
helper.WithTimeoutPreviousRankTimeoutCertificate[*helper.TestVote](helper.MakeTC(
|
||||
helper.WithTCRank(s.rank-1),
|
||||
helper.WithTCNewestQC(helper.MakeQC(helper.WithQCRank(newestQC.GetRank()))))),
|
||||
)
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(timeout)
|
||||
}
|
||||
|
||||
return timeout
|
||||
}
|
||||
|
||||
// TestProcess_TimeoutNotForRank tests that TimeoutProcessor accepts only timeouts for the rank it was initialized with
|
||||
// We expect dedicated sentinel errors for timeouts for different ranks (`ErrTimeoutForIncompatibleRank`).
|
||||
func (s *TimeoutProcessorTestSuite) TestProcess_TimeoutNotForRank() {
|
||||
err := s.processor.Process(s.TimeoutLastRankSuccessfulFixture(func(t *models.TimeoutState[*helper.TestVote]) {
|
||||
t.Rank++
|
||||
}))
|
||||
require.ErrorIs(s.T(), err, ErrTimeoutForIncompatibleRank)
|
||||
require.False(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err))
|
||||
|
||||
s.sigAggregator.AssertNotCalled(s.T(), "Verify")
|
||||
}
|
||||
|
||||
// TestProcess_TimeoutWithoutQC tests that TimeoutProcessor fails with models.InvalidTimeoutError if
|
||||
// timeout doesn't contain QC.
|
||||
func (s *TimeoutProcessorTestSuite) TestProcess_TimeoutWithoutQC() {
|
||||
err := s.processor.Process(s.TimeoutLastRankSuccessfulFixture(func(t *models.TimeoutState[*helper.TestVote]) {
|
||||
t.LatestQuorumCertificate = nil
|
||||
}))
|
||||
require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err))
|
||||
}
|
||||
|
||||
// TestProcess_TimeoutNewerHighestQC tests that TimeoutProcessor fails with models.InvalidTimeoutError if
|
||||
// timeout contains a QC with QC.Rank > timeout.Rank, QC can be only with lower rank than timeout.
|
||||
func (s *TimeoutProcessorTestSuite) TestProcess_TimeoutNewerHighestQC() {
|
||||
s.Run("t.Rank == t.LatestQuorumCertificate.(*helper.TestQuorumCertificate).Rank", func() {
|
||||
err := s.processor.Process(s.TimeoutLastRankSuccessfulFixture(func(t *models.TimeoutState[*helper.TestVote]) {
|
||||
t.LatestQuorumCertificate.(*helper.TestQuorumCertificate).Rank = t.Rank
|
||||
}))
|
||||
require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err))
|
||||
})
|
||||
s.Run("t.Rank < t.LatestQuorumCertificate.(*helper.TestQuorumCertificate).Rank", func() {
|
||||
err := s.processor.Process(s.TimeoutLastRankSuccessfulFixture(func(t *models.TimeoutState[*helper.TestVote]) {
|
||||
t.LatestQuorumCertificate.(*helper.TestQuorumCertificate).Rank = t.Rank + 1
|
||||
}))
|
||||
require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err))
|
||||
})
|
||||
}
|
||||
|
||||
// TestProcess_PreviousRankTimeoutCertificateWrongRank tests that TimeoutProcessor fails with models.InvalidTimeoutError if
|
||||
// timeout contains a proof that sender legitimately entered timeout.Rank but it has wrong rank meaning he used TC from previous rounds.
|
||||
func (s *TimeoutProcessorTestSuite) TestProcess_PreviousRankTimeoutCertificateWrongRank() {
|
||||
// if TC is included it must have timeout.Rank == timeout.PriorRankTimeoutCertificate.(*helper.TestTimeoutCertificate).Rank+1
|
||||
err := s.processor.Process(s.TimeoutLastRankFailedFixture(func(t *models.TimeoutState[*helper.TestVote]) {
|
||||
t.PriorRankTimeoutCertificate.(*helper.TestTimeoutCertificate).Rank = t.Rank - 10
|
||||
}))
|
||||
require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err))
|
||||
}
|
||||
|
||||
// TestProcess_LastRankHighestQCInvalidRank tests that TimeoutProcessor fails with models.InvalidTimeoutError if
|
||||
// timeout contains a proof that sender legitimately entered timeout.Rank but included HighestQC has older rank
|
||||
// than QC included in TC. For honest nodes this shouldn't happen.
|
||||
func (s *TimeoutProcessorTestSuite) TestProcess_LastRankHighestQCInvalidRank() {
|
||||
err := s.processor.Process(s.TimeoutLastRankFailedFixture(func(t *models.TimeoutState[*helper.TestVote]) {
|
||||
t.PriorRankTimeoutCertificate.(*helper.TestTimeoutCertificate).LatestQuorumCert.(*helper.TestQuorumCertificate).Rank = t.LatestQuorumCertificate.(*helper.TestQuorumCertificate).Rank + 1 // TC contains newer QC than Timeout State
|
||||
}))
|
||||
require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err))
|
||||
}
|
||||
|
||||
// TestProcess_PreviousRankTimeoutCertificateRequiredButNotPresent tests that TimeoutProcessor fails with models.InvalidTimeoutError if
|
||||
// timeout must contain a proof that sender legitimately entered timeout.Rank but doesn't have it.
|
||||
func (s *TimeoutProcessorTestSuite) TestProcess_PreviousRankTimeoutCertificateRequiredButNotPresent() {
|
||||
// if last rank is not successful(timeout.Rank != timeout.HighestQC.Rank+1) then this
|
||||
// timeout must contain valid timeout.PriorRankTimeoutCertificate
|
||||
err := s.processor.Process(s.TimeoutLastRankFailedFixture(func(t *models.TimeoutState[*helper.TestVote]) {
|
||||
t.PriorRankTimeoutCertificate = nil
|
||||
}))
|
||||
require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err))
|
||||
}
|
||||
|
||||
// TestProcess_IncludedQCInvalid tests that TimeoutProcessor correctly handles validation errors if
|
||||
// timeout is well-formed but included QC is invalid
|
||||
func (s *TimeoutProcessorTestSuite) TestProcess_IncludedQCInvalid() {
|
||||
timeout := s.TimeoutLastRankSuccessfulFixture()
|
||||
|
||||
s.Run("invalid-qc-sentinel", func() {
|
||||
*s.validator = *mocks.NewValidator[*helper.TestState, *helper.TestVote](s.T())
|
||||
s.validator.On("ValidateQuorumCertificate", timeout.LatestQuorumCertificate).Return(models.InvalidQuorumCertificateError{}).Once()
|
||||
|
||||
err := s.processor.Process(timeout)
|
||||
require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err))
|
||||
require.True(s.T(), models.IsInvalidQuorumCertificateError(err))
|
||||
})
|
||||
s.Run("invalid-qc-exception", func() {
|
||||
exception := errors.New("validate-qc-failed")
|
||||
*s.validator = *mocks.NewValidator[*helper.TestState, *helper.TestVote](s.T())
|
||||
s.validator.On("ValidateQuorumCertificate", timeout.LatestQuorumCertificate).Return(exception).Once()
|
||||
|
||||
err := s.processor.Process(timeout)
|
||||
require.ErrorIs(s.T(), err, exception)
|
||||
require.False(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err))
|
||||
})
|
||||
s.Run("invalid-qc-err-rank-for-unknown-epoch", func() {
|
||||
*s.validator = *mocks.NewValidator[*helper.TestState, *helper.TestVote](s.T())
|
||||
s.validator.On("ValidateQuorumCertificate", timeout.LatestQuorumCertificate).Return(models.ErrRankUnknown).Once()
|
||||
|
||||
err := s.processor.Process(timeout)
|
||||
require.False(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err))
|
||||
require.NotErrorIs(s.T(), err, models.ErrRankUnknown)
|
||||
})
|
||||
}
|
||||
|
||||
// TestProcess_IncludedTCInvalid tests that TimeoutProcessor correctly handles validation errors if
|
||||
// timeout is well-formed but included TC is invalid
|
||||
func (s *TimeoutProcessorTestSuite) TestProcess_IncludedTCInvalid() {
|
||||
timeout := s.TimeoutLastRankFailedFixture()
|
||||
|
||||
s.Run("invalid-tc-sentinel", func() {
|
||||
*s.validator = *mocks.NewValidator[*helper.TestState, *helper.TestVote](s.T())
|
||||
s.validator.On("ValidateQuorumCertificate", timeout.LatestQuorumCertificate).Return(nil)
|
||||
s.validator.On("ValidateTimeoutCertificate", timeout.PriorRankTimeoutCertificate).Return(models.InvalidTimeoutCertificateError{})
|
||||
|
||||
err := s.processor.Process(timeout)
|
||||
require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err))
|
||||
require.True(s.T(), models.IsInvalidTimeoutCertificateError(err))
|
||||
})
|
||||
s.Run("invalid-tc-exception", func() {
|
||||
exception := errors.New("validate-tc-failed")
|
||||
*s.validator = *mocks.NewValidator[*helper.TestState, *helper.TestVote](s.T())
|
||||
s.validator.On("ValidateQuorumCertificate", timeout.LatestQuorumCertificate).Return(nil)
|
||||
s.validator.On("ValidateTimeoutCertificate", timeout.PriorRankTimeoutCertificate).Return(exception).Once()
|
||||
|
||||
err := s.processor.Process(timeout)
|
||||
require.ErrorIs(s.T(), err, exception)
|
||||
require.False(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err))
|
||||
})
|
||||
s.Run("invalid-tc-err-rank-for-unknown-epoch", func() {
|
||||
*s.validator = *mocks.NewValidator[*helper.TestState, *helper.TestVote](s.T())
|
||||
s.validator.On("ValidateQuorumCertificate", timeout.LatestQuorumCertificate).Return(nil)
|
||||
s.validator.On("ValidateTimeoutCertificate", timeout.PriorRankTimeoutCertificate).Return(models.ErrRankUnknown).Once()
|
||||
|
||||
err := s.processor.Process(timeout)
|
||||
require.False(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err))
|
||||
require.NotErrorIs(s.T(), err, models.ErrRankUnknown)
|
||||
})
|
||||
}
|
||||
|
||||
// TestProcess_ValidTimeout tests that processing a valid timeout succeeds without error
|
||||
func (s *TimeoutProcessorTestSuite) TestProcess_ValidTimeout() {
|
||||
s.Run("happy-path", func() {
|
||||
timeout := s.TimeoutLastRankSuccessfulFixture()
|
||||
s.validator.On("ValidateQuorumCertificate", timeout.LatestQuorumCertificate).Return(nil).Once()
|
||||
err := s.processor.Process(timeout)
|
||||
require.NoError(s.T(), err)
|
||||
s.sigAggregator.AssertCalled(s.T(), "VerifyAndAdd", (*timeout.Vote).ID, (*timeout.Vote).Signature, timeout.LatestQuorumCertificate.(*helper.TestQuorumCertificate).Rank)
|
||||
})
|
||||
s.Run("recovery-path", func() {
|
||||
timeout := s.TimeoutLastRankFailedFixture()
|
||||
s.validator.On("ValidateQuorumCertificate", timeout.LatestQuorumCertificate).Return(nil).Once()
|
||||
s.validator.On("ValidateTimeoutCertificate", timeout.PriorRankTimeoutCertificate).Return(nil).Once()
|
||||
err := s.processor.Process(timeout)
|
||||
require.NoError(s.T(), err)
|
||||
s.sigAggregator.AssertCalled(s.T(), "VerifyAndAdd", (*timeout.Vote).ID, (*timeout.Vote).Signature, timeout.LatestQuorumCertificate.(*helper.TestQuorumCertificate).Rank)
|
||||
})
|
||||
}
|
||||
|
||||
// TestProcess_VerifyAndAddFailed tests different scenarios when TimeoutSignatureAggregator fails with error.
|
||||
// We check all sentinel errors and exceptions in this scenario.
|
||||
func (s *TimeoutProcessorTestSuite) TestProcess_VerifyAndAddFailed() {
|
||||
timeout := s.TimeoutLastRankSuccessfulFixture()
|
||||
s.validator.On("ValidateQuorumCertificate", timeout.LatestQuorumCertificate).Return(nil)
|
||||
s.Run("invalid-signer", func() {
|
||||
*s.sigAggregator = *mocks.NewTimeoutSignatureAggregator(s.T())
|
||||
s.sigAggregator.On("VerifyAndAdd", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(uint64(0), models.NewInvalidSignerError(fmt.Errorf(""))).Once()
|
||||
err := s.processor.Process(timeout)
|
||||
require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err))
|
||||
require.True(s.T(), models.IsInvalidSignerError(err))
|
||||
})
|
||||
s.Run("invalid-signature", func() {
|
||||
*s.sigAggregator = *mocks.NewTimeoutSignatureAggregator(s.T())
|
||||
s.sigAggregator.On("VerifyAndAdd", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(uint64(0), models.ErrInvalidSignature).Once()
|
||||
err := s.processor.Process(timeout)
|
||||
require.True(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err))
|
||||
require.ErrorIs(s.T(), err, models.ErrInvalidSignature)
|
||||
})
|
||||
s.Run("duplicated-signer", func() {
|
||||
*s.sigAggregator = *mocks.NewTimeoutSignatureAggregator(s.T())
|
||||
s.sigAggregator.On("VerifyAndAdd", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(uint64(0), models.NewDuplicatedSignerErrorf("")).Once()
|
||||
err := s.processor.Process(timeout)
|
||||
require.True(s.T(), models.IsDuplicatedSignerError(err))
|
||||
// this shouldn't be wrapped in invalid timeout
|
||||
require.False(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err))
|
||||
})
|
||||
s.Run("verify-exception", func() {
|
||||
*s.sigAggregator = *mocks.NewTimeoutSignatureAggregator(s.T())
|
||||
exception := errors.New("verify-exception")
|
||||
s.sigAggregator.On("VerifyAndAdd", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(uint64(0), exception).Once()
|
||||
err := s.processor.Process(timeout)
|
||||
require.False(s.T(), models.IsInvalidTimeoutError[*helper.TestVote](err))
|
||||
require.ErrorIs(s.T(), err, exception)
|
||||
})
|
||||
}
|
||||
|
||||
// TestProcess_CreatingTC is a test for happy path single threaded signature aggregation and TC creation
|
||||
// Each replica commits unique timeout state, this object gets processed by TimeoutProcessor. After collecting
|
||||
// enough weight we expect a TC to be created. All further operations should be no-op, only one TC should be created.
|
||||
func (s *TimeoutProcessorTestSuite) TestProcess_CreatingTC() {
|
||||
// consider next situation:
|
||||
// last successful rank was N, after this we weren't able to get a proposal with QC for
|
||||
// len(participants) ranks, but in each rank QC was created(but not distributed).
|
||||
// In rank N+len(participants) each replica contributes with unique highest QC.
|
||||
lastSuccessfulQC := helper.MakeQC(helper.WithQCRank(s.rank - uint64(len(s.participants))))
|
||||
previousRankTimeoutCert := helper.MakeTC(helper.WithTCRank(s.rank-1),
|
||||
helper.WithTCNewestQC(lastSuccessfulQC))
|
||||
|
||||
var highQCRanks []uint64
|
||||
var timeouts []*models.TimeoutState[*helper.TestVote]
|
||||
signers := s.participants[1:]
|
||||
for i, signer := range signers {
|
||||
qc := helper.MakeQC(helper.WithQCRank(lastSuccessfulQC.GetRank() + uint64(i+1)))
|
||||
highQCRanks = append(highQCRanks, qc.GetRank())
|
||||
|
||||
timeout := helper.TimeoutStateFixture(
|
||||
helper.WithTimeoutStateRank[*helper.TestVote](s.rank),
|
||||
helper.WithTimeoutNewestQC[*helper.TestVote](qc),
|
||||
helper.WithTimeoutVote(&helper.TestVote{ID: signer.Identity(), Rank: s.rank}),
|
||||
helper.WithTimeoutPreviousRankTimeoutCertificate[*helper.TestVote](previousRankTimeoutCert),
|
||||
)
|
||||
timeouts = append(timeouts, timeout)
|
||||
}
|
||||
|
||||
// change tracker to require all except one signer to create TC
|
||||
s.processor.tcTracker.minRequiredWeight = s.sigWeight * uint64(len(highQCRanks))
|
||||
|
||||
expectedSigBytes := make([]byte, 74)
|
||||
expectedSig := &helper.TestAggregatedSignature{
|
||||
Signature: expectedSigBytes,
|
||||
Bitmask: []byte{0b11111111, 0b00000111},
|
||||
PublicKey: make([]byte, 585),
|
||||
}
|
||||
s.validator.On("ValidateQuorumCertificate", mock.Anything).Return(nil)
|
||||
s.validator.On("ValidateTimeoutCertificate", mock.Anything).Return(nil)
|
||||
s.notifier.On("OnPartialTimeoutCertificateCreated", s.rank, mock.Anything, previousRankTimeoutCert).Return(nil).Once()
|
||||
s.notifier.On("OnTimeoutCertificateConstructedFromTimeouts", mock.Anything).Run(func(args mock.Arguments) {
|
||||
newestQC := timeouts[len(timeouts)-1].LatestQuorumCertificate
|
||||
tc := args.Get(0).(models.TimeoutCertificate)
|
||||
// ensure that TC contains correct fields
|
||||
expectedTC := &helper.TestTimeoutCertificate{
|
||||
Rank: s.rank,
|
||||
LatestRanks: highQCRanks,
|
||||
LatestQuorumCert: newestQC,
|
||||
AggregatedSignature: expectedSig,
|
||||
}
|
||||
require.Equal(s.T(), expectedTC, tc)
|
||||
}).Return(nil).Once()
|
||||
s.voting.On("FinalizeTimeout", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&helper.TestTimeoutCertificate{
|
||||
Filter: nil,
|
||||
Rank: s.rank,
|
||||
LatestRanks: highQCRanks,
|
||||
LatestQuorumCert: timeouts[len(timeouts)-1].LatestQuorumCertificate,
|
||||
AggregatedSignature: &helper.TestAggregatedSignature{
|
||||
PublicKey: make([]byte, 585),
|
||||
Signature: make([]byte, 74),
|
||||
Bitmask: []byte{0b11111111, 0b00000111},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
signersData := make([]consensus.TimeoutSignerInfo, 0)
|
||||
for i, signer := range signers {
|
||||
signersData = append(signersData, consensus.TimeoutSignerInfo{
|
||||
NewestQCRank: highQCRanks[i],
|
||||
Signer: signer.Identity(),
|
||||
})
|
||||
}
|
||||
s.sigAggregator.On("Aggregate").Return(signersData, expectedSig, nil)
|
||||
|
||||
for _, timeout := range timeouts {
|
||||
err := s.processor.Process(timeout)
|
||||
require.NoError(s.T(), err)
|
||||
}
|
||||
s.notifier.AssertExpectations(s.T())
|
||||
s.sigAggregator.AssertExpectations(s.T())
|
||||
|
||||
// add extra timeout, make sure we don't create another TC
|
||||
// should be no-op
|
||||
timeout := helper.TimeoutStateFixture(
|
||||
helper.WithTimeoutStateRank[*helper.TestVote](s.rank),
|
||||
helper.WithTimeoutNewestQC[*helper.TestVote](helper.MakeQC(helper.WithQCRank(lastSuccessfulQC.GetRank()))),
|
||||
helper.WithTimeoutVote(&helper.TestVote{
|
||||
ID: s.participants[0].Identity(),
|
||||
Rank: s.rank,
|
||||
}),
|
||||
helper.WithTimeoutPreviousRankTimeoutCertificate[*helper.TestVote](nil),
|
||||
)
|
||||
err := s.processor.Process(timeout)
|
||||
require.NoError(s.T(), err)
|
||||
|
||||
s.notifier.AssertExpectations(s.T())
|
||||
s.validator.AssertExpectations(s.T())
|
||||
}
|
||||
|
||||
// TestProcess_ConcurrentCreatingTC tests a scenario where multiple goroutines process timeout at same time,
|
||||
// we expect only one TC created in this scenario.
|
||||
func (s *TimeoutProcessorTestSuite) TestProcess_ConcurrentCreatingTC() {
|
||||
s.validator.On("ValidateQuorumCertificate", mock.Anything).Return(nil)
|
||||
s.notifier.On("OnPartialTimeoutCertificateCreated", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
||||
s.notifier.On("OnTimeoutCertificateConstructedFromTimeouts", mock.Anything).Return(nil).Once()
|
||||
|
||||
signersData := make([]consensus.TimeoutSignerInfo, 0, len(s.participants))
|
||||
for _, signer := range s.participants {
|
||||
signersData = append(signersData, consensus.TimeoutSignerInfo{
|
||||
NewestQCRank: 0,
|
||||
Signer: signer.Identity(),
|
||||
})
|
||||
}
|
||||
// don't care about actual data
|
||||
s.sigAggregator.On("Aggregate").Return(signersData, &helper.TestAggregatedSignature{PublicKey: make([]byte, 585), Signature: make([]byte, 74), Bitmask: []byte{0b11111111, 0b00000111}}, nil)
|
||||
var startupWg, shutdownWg sync.WaitGroup
|
||||
|
||||
newestQC := helper.MakeQC(helper.WithQCRank(s.rank - 1))
|
||||
s.voting.On("FinalizeTimeout", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&helper.TestTimeoutCertificate{
|
||||
Filter: nil,
|
||||
Rank: s.rank,
|
||||
LatestRanks: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
LatestQuorumCert: newestQC,
|
||||
AggregatedSignature: &helper.TestAggregatedSignature{
|
||||
PublicKey: make([]byte, 585),
|
||||
Signature: make([]byte, 74),
|
||||
Bitmask: []byte{0b11111111, 0b00000111},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
startupWg.Add(1)
|
||||
// prepare goroutines, so they are ready to submit a timeout at roughly same time
|
||||
for i, signer := range s.participants {
|
||||
shutdownWg.Add(1)
|
||||
timeout := helper.TimeoutStateFixture(
|
||||
helper.WithTimeoutStateRank[*helper.TestVote](s.rank),
|
||||
helper.WithTimeoutNewestQC[*helper.TestVote](newestQC),
|
||||
helper.WithTimeoutVote(&helper.TestVote{
|
||||
ID: signer.Identity(),
|
||||
Rank: s.rank,
|
||||
}),
|
||||
helper.WithTimeoutPreviousRankTimeoutCertificate[*helper.TestVote](nil),
|
||||
)
|
||||
go func(i int, timeout *models.TimeoutState[*helper.TestVote]) {
|
||||
defer shutdownWg.Done()
|
||||
startupWg.Wait()
|
||||
err := s.processor.Process(timeout)
|
||||
require.NoError(s.T(), err)
|
||||
}(i, timeout)
|
||||
}
|
||||
|
||||
startupWg.Done()
|
||||
|
||||
// wait for all routines to finish
|
||||
shutdownWg.Wait()
|
||||
}
|
||||
|
||||
// TestTimeoutProcessor_BuildVerifyTC tests a complete path from creating timeouts to collecting timeouts and then
|
||||
// building & verifying TC.
|
||||
// This test emulates the most complex scenario where TC consists of TimeoutStates that are structurally different.
|
||||
// Let's consider a case where at some rank N consensus committee generated both QC and TC, resulting in nodes differently entering rank N+1.
|
||||
// When constructing TC for rank N+1 some replicas will contribute with TO{Rank:N+1, NewestQC.Rank: N, PreviousRankTimeoutCertificate: nil}
|
||||
// while others with TO{Rank:N+1, NewestQC.Rank: N-1, PreviousRankTimeoutCertificate: TC{Rank: N, NewestQC.Rank: N-1}}.
|
||||
// This results in multi-message BLS signature with messages picked from set M={N-1,N}.
|
||||
// We have to be able to construct a valid TC for rank N+1 and successfully validate it.
|
||||
// We start by building a valid QC for rank N-1, that will be included in every TimeoutState at rank N.
|
||||
// Right after we create a valid QC for rank N. We need to have valid QCs since TimeoutProcessor performs complete validation of TimeoutState.
|
||||
// Then we create a valid cryptographically signed timeout for each signer. Created timeouts are feed to TimeoutProcessor
|
||||
// which eventually creates a TC after seeing processing enough objects. After we verify if TC was correctly constructed
|
||||
// and if it doesn't violate protocol rules. At this point we have QC for rank N-1, both QC and TC for rank N.
|
||||
// After constructing valid objects we will repeat TC creation process and create a TC for rank N+1 where replicas contribute
|
||||
// with structurally different TimeoutStates to make sure that TC is correctly built and can be successfully validated.
|
||||
func TestTimeoutProcessor_BuildVerifyTC(t *testing.T) {
|
||||
// signers hold objects that are created with private key and can sign votes and proposals
|
||||
signers := make(map[models.Identity]*verification.Signer[*helper.TestState, *helper.TestVote, *helper.TestPeer])
|
||||
// prepare proving signers, each signer has its own private/public key pair
|
||||
// identities must be in canonical order
|
||||
provingSigners := helper.WithWeightedIdentityList(11)
|
||||
leader := provingSigners[0]
|
||||
rank := uint64(rand.Uint32() + 100)
|
||||
|
||||
state := helper.MakeState(helper.WithStateRank[*helper.TestState](rank-1),
|
||||
helper.WithStateProposer[*helper.TestState](leader.Identity()))
|
||||
votingProviders := []*mocks.VotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer]{}
|
||||
for _, s := range provingSigners {
|
||||
v := mocks.NewVotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer](t)
|
||||
votingProviders = append(votingProviders, v)
|
||||
vote := &helper.TestVote{
|
||||
ID: s.Identity(),
|
||||
Rank: rank - 1,
|
||||
Signature: make([]byte, 74),
|
||||
Timestamp: uint64(time.Now().UnixMilli()),
|
||||
StateID: state.Identifier,
|
||||
}
|
||||
v.On("SignVote", mock.Anything, mock.Anything).Return(&vote, nil).Once()
|
||||
signers[s.Identity()] = verification.NewSigner(v)
|
||||
}
|
||||
|
||||
// utility function which generates a valid timeout for every signer
|
||||
createTimeouts := func(participants []models.WeightedIdentity, rank uint64, newestQC models.QuorumCertificate, previousRankTimeoutCert models.TimeoutCertificate) []*models.TimeoutState[*helper.TestVote] {
|
||||
timeouts := make([]*models.TimeoutState[*helper.TestVote], 0, len(participants))
|
||||
for _, signer := range participants {
|
||||
timeout, err := signers[signer.Identity()].CreateTimeout(rank, newestQC, previousRankTimeoutCert)
|
||||
require.NoError(t, err)
|
||||
timeouts = append(timeouts, timeout)
|
||||
}
|
||||
return timeouts
|
||||
}
|
||||
|
||||
provingSignersSkeleton := provingSigners
|
||||
|
||||
committee := mocks.NewDynamicCommittee(t)
|
||||
committee.On("IdentitiesByRank", mock.Anything).Return(provingSignersSkeleton, nil)
|
||||
committee.On("IdentitiesByState", mock.Anything).Return(provingSigners, nil)
|
||||
committee.On("QuorumThresholdForRank", mock.Anything).Return(uint64(8000), nil)
|
||||
committee.On("TimeoutThresholdForRank", mock.Anything).Return(uint64(8000), nil)
|
||||
|
||||
// create first QC for rank N-1, this will be our olderQC
|
||||
olderQC := createRealQC(t, committee, provingSignersSkeleton, signers, state)
|
||||
// now create a second QC for rank N, this will be our newest QC
|
||||
nextState := helper.MakeState(
|
||||
helper.WithStateRank[*helper.TestState](rank),
|
||||
helper.WithStateProposer[*helper.TestState](leader.Identity()),
|
||||
helper.WithStateQC[*helper.TestState](olderQC))
|
||||
|
||||
for i, vp := range votingProviders {
|
||||
vote := &helper.TestVote{
|
||||
ID: provingSigners[i].Identity(),
|
||||
Rank: rank,
|
||||
Signature: make([]byte, 74),
|
||||
Timestamp: uint64(time.Now().UnixMilli()),
|
||||
StateID: nextState.Identifier,
|
||||
}
|
||||
vp.On("SignVote", mock.Anything, mock.Anything).Return(&vote, nil).Once()
|
||||
tvote := &helper.TestVote{
|
||||
ID: provingSigners[i].Identity(),
|
||||
Rank: rank,
|
||||
Signature: make([]byte, 74),
|
||||
Timestamp: uint64(time.Now().UnixMilli()),
|
||||
}
|
||||
vp.On("SignTimeoutVote", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&tvote, nil)
|
||||
}
|
||||
newestQC := createRealQC(t, committee, provingSignersSkeleton, signers, nextState)
|
||||
|
||||
// At this point we have created two QCs for round N-1 and N.
|
||||
// Next step is create a TC for rank N.
|
||||
|
||||
// create verifier that will do crypto checks of created TC
|
||||
verifier := &mocks.Verifier[*helper.TestVote]{}
|
||||
verifier.On("VerifyQuorumCertificate", mock.Anything).Return(nil)
|
||||
verifier.On("VerifyTimeoutCertificate", mock.Anything).Return(nil)
|
||||
|
||||
// create validator which will do compliance and crypto checks of created TC
|
||||
validator := validator.NewValidator[*helper.TestState, *helper.TestVote](committee, verifier)
|
||||
|
||||
var previousRankTimeoutCert models.TimeoutCertificate
|
||||
onTCCreated := func(args mock.Arguments) {
|
||||
tc := args.Get(0).(models.TimeoutCertificate)
|
||||
// check if resulted TC is valid
|
||||
err := validator.ValidateTimeoutCertificate(tc)
|
||||
require.NoError(t, err)
|
||||
previousRankTimeoutCert = tc
|
||||
}
|
||||
|
||||
sigagg := mocks.NewSignatureAggregator(t)
|
||||
sigagg.On("VerifySignatureRaw", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true)
|
||||
sigagg.On("Aggregate", mock.Anything, mock.Anything).Return(&helper.TestAggregatedSignature{PublicKey: make([]byte, 585), Signature: make([]byte, 74), Bitmask: []byte{0b11111111, 0b00000111}}, nil)
|
||||
|
||||
aggregator, err := NewTimeoutSignatureAggregator(sigagg, rank, provingSignersSkeleton, []byte{})
|
||||
require.NoError(t, err)
|
||||
|
||||
notifier := mocks.NewTimeoutCollectorConsumer[*helper.TestVote](t)
|
||||
notifier.On("OnPartialTimeoutCertificateCreated", rank, olderQC, nil).Return().Once()
|
||||
notifier.On("OnTimeoutCertificateConstructedFromTimeouts", mock.Anything).Run(onTCCreated).Return().Once()
|
||||
voting := mocks.NewVotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer](t)
|
||||
voting.On("FinalizeTimeout", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&helper.TestTimeoutCertificate{
|
||||
Filter: nil,
|
||||
Rank: rank,
|
||||
LatestRanks: []uint64{rank - 1, rank - 1, rank - 1, rank - 1, rank - 1, rank - 1, rank - 1, rank - 1},
|
||||
LatestQuorumCert: olderQC,
|
||||
AggregatedSignature: &helper.TestAggregatedSignature{PublicKey: make([]byte, 585), Signature: make([]byte, 74), Bitmask: []byte{0b11111111, 0b00000111}},
|
||||
}, nil)
|
||||
processor, err := NewTimeoutProcessor[*helper.TestState, *helper.TestVote, *helper.TestPeer](helper.Logger(), committee, validator, aggregator, notifier, voting)
|
||||
require.NoError(t, err)
|
||||
|
||||
// last rank was successful, no previousRankTimeoutCert in this case
|
||||
timeouts := createTimeouts(provingSignersSkeleton, rank, olderQC, nil)
|
||||
for _, timeout := range timeouts {
|
||||
err := processor.Process(timeout)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
notifier.AssertExpectations(t)
|
||||
|
||||
// at this point we have created QCs for rank N-1 and N additionally a TC for rank N, we can create TC for rank N+1
|
||||
// with timeout states containing both QC and TC for rank N
|
||||
|
||||
aggregator, err = NewTimeoutSignatureAggregator(sigagg, rank+1, provingSignersSkeleton, []byte{})
|
||||
require.NoError(t, err)
|
||||
|
||||
notifier = mocks.NewTimeoutCollectorConsumer[*helper.TestVote](t)
|
||||
notifier.On("OnPartialTimeoutCertificateCreated", rank+1, newestQC, mock.Anything).Return()
|
||||
notifier.On("OnTimeoutCertificateConstructedFromTimeouts", mock.Anything).Run(onTCCreated).Return().Once()
|
||||
processor, err = NewTimeoutProcessor[*helper.TestState, *helper.TestVote, *helper.TestPeer](helper.Logger(), committee, validator, aggregator, notifier, voting)
|
||||
require.NoError(t, err)
|
||||
|
||||
// part of committee will use QC, another part TC, this will result in aggregated signature consisting
|
||||
// of two types of messages with ranks N-1 and N representing the newest QC known to replicas.
|
||||
timeoutsWithQC := createTimeouts(provingSignersSkeleton[:len(provingSignersSkeleton)/2], rank+1, newestQC, nil)
|
||||
timeoutsWithTC := createTimeouts(provingSignersSkeleton[len(provingSignersSkeleton)/2:], rank+1, olderQC, previousRankTimeoutCert)
|
||||
timeouts = append(timeoutsWithQC, timeoutsWithTC...)
|
||||
for _, timeout := range timeouts {
|
||||
err := processor.Process(timeout)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
notifier.AssertExpectations(t)
|
||||
}
|
||||
|
||||
// createRealQC is a helper function which generates a properly signed QC with real signatures for given state.
|
||||
func createRealQC(
|
||||
t *testing.T,
|
||||
committee consensus.DynamicCommittee,
|
||||
signers []models.WeightedIdentity,
|
||||
signerObjects map[models.Identity]*verification.Signer[*helper.TestState, *helper.TestVote, *helper.TestPeer],
|
||||
state *models.State[*helper.TestState],
|
||||
) models.QuorumCertificate {
|
||||
leader := signers[0]
|
||||
leaderVote, err := signerObjects[leader.Identity()].CreateVote(state)
|
||||
require.NoError(t, err)
|
||||
proposal := helper.MakeSignedProposal(helper.WithProposal[*helper.TestState, *helper.TestVote](helper.MakeProposal(helper.WithState(state))), helper.WithVote[*helper.TestState](leaderVote))
|
||||
|
||||
var createdQC *models.QuorumCertificate
|
||||
onQCCreated := func(qc models.QuorumCertificate) {
|
||||
createdQC = &qc
|
||||
}
|
||||
|
||||
voteProcessorFactory := votecollector.NewVoteProcessorFactory[*helper.TestState, *helper.TestVote, *helper.TestPeer](committee, onQCCreated)
|
||||
sigagg := mocks.NewSignatureAggregator(t)
|
||||
sigagg.On("VerifySignatureRaw", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(true)
|
||||
sigagg.On("Aggregate", mock.Anything, mock.Anything).Return(&helper.TestAggregatedSignature{PublicKey: make([]byte, 585), Signature: make([]byte, 74), Bitmask: []byte{0b11111111, 0b00000111}}, nil)
|
||||
|
||||
votingProvider := mocks.NewVotingProvider[*helper.TestState, *helper.TestVote, *helper.TestPeer](t)
|
||||
votingProvider.On("FinalizeQuorumCertificate", mock.Anything, mock.Anything, mock.Anything).Return(&helper.TestQuorumCertificate{
|
||||
Filter: nil,
|
||||
Rank: state.Rank,
|
||||
FrameNumber: state.Rank,
|
||||
Selector: state.Identifier,
|
||||
Timestamp: time.Now().UnixMilli(),
|
||||
AggregatedSignature: &helper.TestAggregatedSignature{PublicKey: make([]byte, 585), Signature: make([]byte, 74), Bitmask: []byte{0b11111111, 0b00000111}},
|
||||
}, nil)
|
||||
voteProcessor, err := voteProcessorFactory.Create(helper.Logger(), proposal, []byte{}, sigagg, votingProvider)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, signer := range signers[1:] {
|
||||
vote, err := signerObjects[signer.Identity()].CreateVote(state)
|
||||
require.NoError(t, err)
|
||||
err = voteProcessor.Process(vote)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.NotNil(t, createdQC, "vote processor must create a valid QC at this point")
|
||||
return *createdQC
|
||||
}
|
||||
@ -128,48 +128,48 @@ func (t *NewestStateTracker[StateT]) NewestState() *models.State[StateT] {
|
||||
return (*models.State[StateT])(t.newestState.Load())
|
||||
}
|
||||
|
||||
// NewestPartialTcTracker tracks the newest partial TC (by rank) in a
|
||||
// NewestPartialTimeoutCertificateTracker tracks the newest partial TC (by rank) in a
|
||||
// concurrency safe way.
|
||||
type NewestPartialTcTracker struct {
|
||||
newestPartialTc *atomic.UnsafePointer
|
||||
type NewestPartialTimeoutCertificateTracker struct {
|
||||
newestPartialTimeoutCertificate *atomic.UnsafePointer
|
||||
}
|
||||
|
||||
func NewNewestPartialTcTracker() *NewestPartialTcTracker {
|
||||
tracker := &NewestPartialTcTracker{
|
||||
newestPartialTc: atomic.NewUnsafePointer(unsafe.Pointer(nil)),
|
||||
func NewNewestPartialTimeoutCertificateTracker() *NewestPartialTimeoutCertificateTracker {
|
||||
tracker := &NewestPartialTimeoutCertificateTracker{
|
||||
newestPartialTimeoutCertificate: atomic.NewUnsafePointer(unsafe.Pointer(nil)),
|
||||
}
|
||||
return tracker
|
||||
}
|
||||
|
||||
// Track updates local state of newestPartialTc if the provided instance is
|
||||
// Track updates local state of newestPartialTimeoutCertificate if the provided instance is
|
||||
// newer (by rank). Concurrency safe.
|
||||
func (t *NewestPartialTcTracker) Track(
|
||||
partialTc *consensus.PartialTimeoutCertificateCreated,
|
||||
func (t *NewestPartialTimeoutCertificateTracker) Track(
|
||||
partialTimeoutCertificate *consensus.PartialTimeoutCertificateCreated,
|
||||
) bool {
|
||||
// To record the newest value that we have ever seen, we need to use loop
|
||||
// with CAS atomic operation to make sure that we always write the latest
|
||||
// value in case of shared access to updated value.
|
||||
for {
|
||||
// take a snapshot
|
||||
newestPartialTc := t.NewestPartialTc()
|
||||
newestPartialTimeoutCertificate := t.NewestPartialTimeoutCertificate()
|
||||
// verify that our partial TC is from a newer rank
|
||||
if newestPartialTc != nil && newestPartialTc.Rank >= partialTc.Rank {
|
||||
if newestPartialTimeoutCertificate != nil && newestPartialTimeoutCertificate.Rank >= partialTimeoutCertificate.Rank {
|
||||
return false
|
||||
}
|
||||
// attempt to install new value, repeat in case of shared update.
|
||||
if t.newestPartialTc.CompareAndSwap(
|
||||
unsafe.Pointer(newestPartialTc),
|
||||
unsafe.Pointer(partialTc),
|
||||
if t.newestPartialTimeoutCertificate.CompareAndSwap(
|
||||
unsafe.Pointer(newestPartialTimeoutCertificate),
|
||||
unsafe.Pointer(partialTimeoutCertificate),
|
||||
) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewestPartialTc returns the newest partial TC (by rank) tracked.
|
||||
// NewestPartialTimeoutCertificate returns the newest partial TC (by rank) tracked.
|
||||
// Concurrency safe.
|
||||
func (
|
||||
t *NewestPartialTcTracker,
|
||||
) NewestPartialTc() *consensus.PartialTimeoutCertificateCreated {
|
||||
return (*consensus.PartialTimeoutCertificateCreated)(t.newestPartialTc.Load())
|
||||
t *NewestPartialTimeoutCertificateTracker,
|
||||
) NewestPartialTimeoutCertificate() *consensus.PartialTimeoutCertificateCreated {
|
||||
return (*consensus.PartialTimeoutCertificateCreated)(t.newestPartialTimeoutCertificate.Load())
|
||||
}
|
||||
|
||||
@ -242,12 +242,7 @@ func (v *Validator[StateT, VoteT]) ValidateQuorumCertificate(
|
||||
err = v.verifier.VerifyQuorumCertificate(qc)
|
||||
if err != nil {
|
||||
// Considerations about other errors that `VerifyQC` could return:
|
||||
// * models.InvalidSignerError: for the time being, we assume that _every_
|
||||
// HotStuff participant is also a member of the random beacon committee.
|
||||
// Consequently, `InvalidSignerError` should not occur atm.
|
||||
// TODO: if the random beacon committee is a strict subset of the
|
||||
// HotStuff committee, we expect `models.InvalidSignerError` here
|
||||
// during normal operations.
|
||||
// * models.InvalidSignerError
|
||||
// * models.InsufficientSignaturesError: we previously checked the total
|
||||
// weight of all signers meets the supermajority threshold, which is a
|
||||
// _positive_ number. Hence, there must be at least one signer. Hence,
|
||||
@ -470,12 +465,7 @@ func (v *Validator[StateT, VoteT]) ValidateVote(vote *VoteT) (
|
||||
err = v.verifier.VerifyVote(vote)
|
||||
if err != nil {
|
||||
// Theoretically, `VerifyVote` could also return a
|
||||
// `models.InvalidSignerError`. However, for the time being, we assume that
|
||||
// _every_ HotStuff participant is also a member of the random beacon
|
||||
// committee. Consequently, `InvalidSignerError` should not occur atm.
|
||||
// TODO: if the random beacon committee is a strict subset of the HotStuff
|
||||
// committee, we expect `models.InvalidSignerError` here during normal
|
||||
// operations.
|
||||
// `models.InvalidSignerError`.
|
||||
if models.IsInvalidFormatError(err) ||
|
||||
errors.Is(err, models.ErrInvalidSignature) {
|
||||
return nil, newInvalidVoteError(vote, err)
|
||||
|
||||
@ -15,7 +15,7 @@ import (
|
||||
// message without having the full state contents.
|
||||
func MakeVoteMessage(rank uint64, stateID models.Identity) []byte {
|
||||
msg := []byte{}
|
||||
binary.BigEndian.PutUint64(msg, rank)
|
||||
binary.BigEndian.AppendUint64(msg, rank)
|
||||
msg = append(msg, stateID[:]...)
|
||||
return msg
|
||||
}
|
||||
|
||||
@ -24,7 +24,7 @@ type VoteAggregator[StateT models.Unique, VoteT models.Unique] interface {
|
||||
// `VoteAggregator` and processed _asynchronously_ by the VoteAggregator's
|
||||
// internal worker routines.
|
||||
// CAUTION: we expect that the input state's validity has been confirmed prior
|
||||
// to calling AddState, including the proposer's signature. Otherwise,
|
||||
// to calling AddState, including the proposer's consensus. Otherwise,
|
||||
// VoteAggregator might crash or exhibit undefined behaviour.
|
||||
AddState(state *models.SignedProposal[StateT, VoteT])
|
||||
|
||||
|
||||
@ -123,12 +123,22 @@ type VerifyingVoteProcessor[
|
||||
// VoteProcessorFactory is a factory that can be used to create a verifying vote
|
||||
// processors for a specific proposal. Depending on factory implementation it
|
||||
// will return processors for consensus or collection clusters
|
||||
type VoteProcessorFactory[StateT models.Unique, VoteT models.Unique] interface {
|
||||
type VoteProcessorFactory[
|
||||
StateT models.Unique,
|
||||
VoteT models.Unique,
|
||||
PeerIDT models.Unique,
|
||||
] interface {
|
||||
// Create instantiates a VerifyingVoteProcessor for processing votes for a
|
||||
// specific proposal. Caller can be sure that proposal vote was successfully
|
||||
// verified and processed. Expected error returns during normal operations:
|
||||
// * models.InvalidProposalError - proposal has invalid proposer vote
|
||||
Create(tracer TraceLogger, proposal *models.SignedProposal[StateT, VoteT]) (
|
||||
Create(
|
||||
tracer TraceLogger,
|
||||
proposal *models.SignedProposal[StateT, VoteT],
|
||||
dsTag []byte,
|
||||
aggregator SignatureAggregator,
|
||||
votingProvider VotingProvider[StateT, VoteT, PeerIDT],
|
||||
) (
|
||||
VerifyingVoteProcessor[StateT, VoteT],
|
||||
error,
|
||||
)
|
||||
|
||||
@ -224,7 +224,7 @@ func (va *VoteAggregator[StateT, VoteT]) processQueuedVote(vote *VoteT) error {
|
||||
// processQueuedState performs actual processing of queued state proposals, this
|
||||
// method is called from multiple concurrent goroutines.
|
||||
// CAUTION: we expect that the input state's validity has been confirmed prior
|
||||
// to calling AddState, including the proposer's signature. Otherwise,
|
||||
// to calling AddState, including the proposer's consensus. Otherwise,
|
||||
// VoteAggregator might crash or exhibit undefined behaviour. No errors are
|
||||
// expected during normal operation.
|
||||
func (va *VoteAggregator[StateT, VoteT]) processQueuedState(
|
||||
@ -302,7 +302,7 @@ func (va *VoteAggregator[StateT, VoteT]) AddVote(vote *VoteT) {
|
||||
// `VoteAggregator` and processed _asynchronously_ by the VoteAggregator's
|
||||
// internal worker routines.
|
||||
// CAUTION: we expect that the input state's validity has been confirmed prior
|
||||
// to calling AddState, including the proposer's signature. Otherwise,
|
||||
// to calling AddState, including the proposer's consensus. Otherwise,
|
||||
// VoteAggregator might crash or exhibit undefined behaviour.
|
||||
func (va *VoteAggregator[StateT, VoteT]) AddState(
|
||||
state *models.SignedProposal[StateT, VoteT],
|
||||
|
||||
@ -32,17 +32,11 @@ type VoteCollectors[StateT models.Unique, VoteT models.Unique] struct {
|
||||
var _ consensus.VoteCollectors[*nilUnique, *nilUnique] = (*VoteCollectors[*nilUnique, *nilUnique])(nil)
|
||||
|
||||
func NewVoteCollectors[StateT models.Unique, VoteT models.Unique](
|
||||
ctx context.Context,
|
||||
tracer consensus.TraceLogger,
|
||||
lowestRetainedRank uint64,
|
||||
workerPool consensus.Workerpool,
|
||||
factoryMethod NewCollectorFactoryMethod[StateT, VoteT],
|
||||
) *VoteCollectors[StateT, VoteT] {
|
||||
go func() {
|
||||
<-ctx.Done() // wait for parent context to signal shutdown
|
||||
workerPool.StopWait() // wait till all workers exit
|
||||
}()
|
||||
|
||||
return &VoteCollectors[StateT, VoteT]{
|
||||
tracer: tracer,
|
||||
lowestRetainedRank: lowestRetainedRank,
|
||||
@ -52,6 +46,14 @@ func NewVoteCollectors[StateT models.Unique, VoteT models.Unique](
|
||||
}
|
||||
}
|
||||
|
||||
func (v *VoteCollectors[StateT, VoteT]) Start(ctx context.Context) error {
|
||||
go func() {
|
||||
<-ctx.Done() // wait for parent context to signal shutdown
|
||||
v.workerPool.StopWait() // wait till all workers exit
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetOrCreateCollector retrieves the consensus.VoteCollector for the specified
|
||||
// rank or creates one if none exists.
|
||||
// - (collector, true, nil) if no collector can be found by the rank, and a
|
||||
|
||||
@ -44,21 +44,21 @@ func (c *NoopProcessor[VoteT]) Status() consensus.VoteCollectorStatus {
|
||||
// state but for a different stateID
|
||||
func EnsureVoteForState[StateT models.Unique, VoteT models.Unique](
|
||||
vote *VoteT,
|
||||
state *StateT,
|
||||
state *models.State[StateT],
|
||||
) error {
|
||||
if (*vote).GetRank() != (*state).GetRank() {
|
||||
if (*vote).GetRank() != state.Rank {
|
||||
return fmt.Errorf(
|
||||
"vote %v has rank %d while state's rank is %d: %w ",
|
||||
(*vote).Identity(),
|
||||
(*vote).GetRank(),
|
||||
(*state).GetRank(),
|
||||
state.Rank,
|
||||
VoteForIncompatibleRankError,
|
||||
)
|
||||
}
|
||||
if (*vote).Source() != (*state).Identity() {
|
||||
if (*vote).Source() != state.Identifier {
|
||||
return fmt.Errorf(
|
||||
"expecting only votes for state %v, but vote %v is for state %v: %w ",
|
||||
(*state).Identity(),
|
||||
state.Identifier,
|
||||
(*vote).Identity(),
|
||||
(*vote).Source(),
|
||||
VoteForIncompatibleStateError,
|
||||
|
||||
@ -17,13 +17,17 @@ import (
|
||||
// `consensus.VoteProcessorFactory` by itself. The VoteProcessorFactory adds the
|
||||
// missing logic to verify the proposer's vote, by wrapping the baseFactory
|
||||
// (decorator pattern).
|
||||
type baseFactory[StateT models.Unique, VoteT models.Unique] func(
|
||||
type baseFactory[
|
||||
StateT models.Unique,
|
||||
VoteT models.Unique,
|
||||
PeerIDT models.Unique,
|
||||
] func(
|
||||
tracer consensus.TraceLogger,
|
||||
state *models.State[StateT],
|
||||
) (
|
||||
consensus.VerifyingVoteProcessor[StateT, VoteT],
|
||||
error,
|
||||
)
|
||||
dsTag []byte,
|
||||
aggregator consensus.SignatureAggregator,
|
||||
votingProvider consensus.VotingProvider[StateT, VoteT, PeerIDT],
|
||||
) (consensus.VerifyingVoteProcessor[StateT, VoteT], error)
|
||||
|
||||
// VoteProcessorFactory implements `consensus.VoteProcessorFactory`. Its main
|
||||
// purpose is to construct instances of VerifyingVoteProcessors for a given
|
||||
@ -34,21 +38,34 @@ type baseFactory[StateT models.Unique, VoteT models.Unique] func(
|
||||
// Thereby, VoteProcessorFactory guarantees that only proposals with valid
|
||||
// proposer vote are accepted (as per API specification). Otherwise, an
|
||||
// `models.InvalidProposalError` is returned.
|
||||
type VoteProcessorFactory[StateT models.Unique, VoteT models.Unique] struct {
|
||||
baseFactory baseFactory[StateT, VoteT]
|
||||
type VoteProcessorFactory[
|
||||
StateT models.Unique,
|
||||
VoteT models.Unique,
|
||||
PeerIDT models.Unique,
|
||||
] struct {
|
||||
baseFactory baseFactory[StateT, VoteT, PeerIDT]
|
||||
}
|
||||
|
||||
var _ consensus.VoteProcessorFactory[*nilUnique, *nilUnique] = (*VoteProcessorFactory[*nilUnique, *nilUnique])(nil)
|
||||
var _ consensus.VoteProcessorFactory[*nilUnique, *nilUnique, *nilUnique] = (*VoteProcessorFactory[*nilUnique, *nilUnique, *nilUnique])(nil)
|
||||
|
||||
// Create instantiates a VerifyingVoteProcessor for the given state proposal.
|
||||
// A VerifyingVoteProcessor are only created for proposals with valid proposer
|
||||
// votes. Expected error returns during normal operations:
|
||||
// * models.InvalidProposalError - proposal has invalid proposer vote
|
||||
func (f *VoteProcessorFactory[StateT, VoteT]) Create(
|
||||
func (f *VoteProcessorFactory[StateT, VoteT, PeerIDT]) Create(
|
||||
tracer consensus.TraceLogger,
|
||||
proposal *models.SignedProposal[StateT, VoteT],
|
||||
dsTag []byte,
|
||||
aggregator consensus.SignatureAggregator,
|
||||
votingProvider consensus.VotingProvider[StateT, VoteT, PeerIDT],
|
||||
) (consensus.VerifyingVoteProcessor[StateT, VoteT], error) {
|
||||
processor, err := f.baseFactory(tracer, proposal.State)
|
||||
processor, err := f.baseFactory(
|
||||
tracer,
|
||||
proposal.State,
|
||||
dsTag,
|
||||
aggregator,
|
||||
votingProvider,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"instantiating vote processor for state %v failed: %w",
|
||||
@ -88,12 +105,12 @@ func NewVoteProcessorFactory[
|
||||
](
|
||||
committee consensus.DynamicCommittee,
|
||||
onQCCreated consensus.OnQuorumCertificateCreated,
|
||||
) *VoteProcessorFactory[StateT, VoteT] {
|
||||
) *VoteProcessorFactory[StateT, VoteT, PeerIDT] {
|
||||
base := &provingVoteProcessorFactoryBase[StateT, VoteT, PeerIDT]{
|
||||
committee: committee,
|
||||
onQCCreated: onQCCreated,
|
||||
}
|
||||
return &VoteProcessorFactory[StateT, VoteT]{
|
||||
return &VoteProcessorFactory[StateT, VoteT, PeerIDT]{
|
||||
baseFactory: base.Create,
|
||||
}
|
||||
}
|
||||
@ -113,12 +130,15 @@ func NewBootstrapVoteProcessor[
|
||||
committee consensus.DynamicCommittee,
|
||||
state *models.State[StateT],
|
||||
onQCCreated consensus.OnQuorumCertificateCreated,
|
||||
dsTag []byte,
|
||||
aggregator consensus.SignatureAggregator,
|
||||
votingProvider consensus.VotingProvider[StateT, VoteT, PeerIDT],
|
||||
) (consensus.VerifyingVoteProcessor[StateT, VoteT], error) {
|
||||
factory := &provingVoteProcessorFactoryBase[StateT, VoteT, PeerIDT]{
|
||||
committee: committee,
|
||||
onQCCreated: onQCCreated,
|
||||
}
|
||||
return factory.Create(tracer, state)
|
||||
return factory.Create(tracer, state, dsTag, aggregator, votingProvider)
|
||||
}
|
||||
|
||||
// Type used to satisfy generic arguments in compiler time type assertion check
|
||||
|
||||
@ -5,9 +5,10 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"go.uber.org/atomic"
|
||||
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
|
||||
"source.quilibrium.com/quilibrium/monorepo/consensus/voteaggregator"
|
||||
)
|
||||
|
||||
@ -29,7 +30,7 @@ type VerifyingVoteProcessorFactory[
|
||||
// states of vote collector
|
||||
type VoteCollector[StateT models.Unique, VoteT models.Unique] struct {
|
||||
sync.Mutex
|
||||
log zerolog.Logger
|
||||
tracer consensus.TraceLogger
|
||||
workers consensus.Workers
|
||||
notifier consensus.VoteAggregationConsumer[StateT, VoteT]
|
||||
createVerifyingProcessor VerifyingVoteProcessorFactory[StateT, VoteT]
|
||||
@ -81,16 +82,16 @@ func NewStateMachine[StateT models.Unique, VoteT models.Unique](
|
||||
verifyingVoteProcessorFactory VerifyingVoteProcessorFactory[StateT, VoteT],
|
||||
) *VoteCollector[StateT, VoteT] {
|
||||
sm := &VoteCollector[StateT, VoteT]{
|
||||
tracer: tracer
|
||||
tracer: tracer,
|
||||
workers: workers,
|
||||
notifier: notifier,
|
||||
createVerifyingProcessor: verifyingVoteProcessorFactory,
|
||||
votesCache: *NewVotesCache[StateT, VoteT](rank),
|
||||
votesCache: *NewVotesCache[VoteT](rank),
|
||||
}
|
||||
|
||||
// without a state, we don't process votes (only cache them)
|
||||
sm.votesProcessor.Store(&atomicValueWrapper{
|
||||
processor: NewNoopCollector(consensus.VoteCollectorStatusCaching),
|
||||
sm.votesProcessor.Store(&atomicValueWrapper[VoteT]{
|
||||
processor: NewNoopCollector[VoteT](consensus.VoteCollectorStatusCaching),
|
||||
})
|
||||
return sm
|
||||
}
|
||||
@ -105,7 +106,7 @@ func (m *VoteCollector[StateT, VoteT]) AddVote(vote *VoteT) error {
|
||||
if errors.Is(err, RepeatedVoteErr) {
|
||||
return nil
|
||||
}
|
||||
doubleVoteErr, isDoubleVoteErr := models.AsDoubleVoteError(err)
|
||||
doubleVoteErr, isDoubleVoteErr := models.AsDoubleVoteError[VoteT](err)
|
||||
if isDoubleVoteErr {
|
||||
m.notifier.OnDoubleVotingDetected(
|
||||
doubleVoteErr.FirstVote,
|
||||
@ -115,8 +116,8 @@ func (m *VoteCollector[StateT, VoteT]) AddVote(vote *VoteT) error {
|
||||
}
|
||||
return fmt.Errorf(
|
||||
"internal error adding vote %v to cache for state %v: %w",
|
||||
vote.ID(),
|
||||
vote.Identifier,
|
||||
(*vote).Identity(),
|
||||
(*vote).Source(),
|
||||
err,
|
||||
)
|
||||
}
|
||||
@ -143,8 +144,8 @@ func (m *VoteCollector[StateT, VoteT]) AddVote(vote *VoteT) error {
|
||||
}
|
||||
return fmt.Errorf(
|
||||
"internal error processing vote %v for state %v: %w",
|
||||
vote.ID(),
|
||||
vote.Identifier,
|
||||
(*vote).Identity(),
|
||||
(*vote).Source(),
|
||||
err,
|
||||
)
|
||||
}
|
||||
@ -159,7 +160,7 @@ func (m *VoteCollector[StateT, VoteT]) processVote(vote *VoteT) error {
|
||||
currentState := processor.Status()
|
||||
err := processor.Process(vote)
|
||||
if err != nil {
|
||||
if invalidVoteErr, ok := models.AsInvalidVoteError(err); ok {
|
||||
if invalidVoteErr, ok := models.AsInvalidVoteError[VoteT](err); ok {
|
||||
m.notifier.OnInvalidVoteDetected(*invalidVoteErr)
|
||||
return nil
|
||||
}
|
||||
@ -168,7 +169,7 @@ func (m *VoteCollector[StateT, VoteT]) processVote(vote *VoteT) error {
|
||||
// double voting. This scenario is possible if leader submits their vote
|
||||
// additionally to the vote in proposal.
|
||||
if models.IsDuplicatedSignerError(err) {
|
||||
m.tracer.Trace(fmt.Sprintf("duplicated signer %x", vote.SignerID))
|
||||
m.tracer.Trace(fmt.Sprintf("duplicated signer %x", (*vote).Identity()))
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
@ -207,7 +208,9 @@ func (m *VoteCollector[StateT, VoteT]) Rank() uint64 {
|
||||
// CachingVotes -> VerifyingVotes
|
||||
// CachingVotes -> Invalid
|
||||
// VerifyingVotes -> Invalid
|
||||
func (m *VoteCollector[StateT, VoteT]) ProcessState(proposal *models.SignedProposal) error {
|
||||
func (m *VoteCollector[StateT, VoteT]) ProcessState(
|
||||
proposal *models.SignedProposal[StateT, VoteT],
|
||||
) error {
|
||||
|
||||
if proposal.State.Rank != m.Rank() {
|
||||
return fmt.Errorf(
|
||||
@ -240,9 +243,7 @@ func (m *VoteCollector[StateT, VoteT]) ProcessState(proposal *models.SignedPropo
|
||||
)
|
||||
}
|
||||
|
||||
m.log.Info().
|
||||
Hex("state_id", proposal.State.Identifier[:]).
|
||||
Msg("vote collector status changed from caching to verifying")
|
||||
m.tracer.Trace("vote collector status changed from caching to verifying")
|
||||
|
||||
m.processCachedVotes(proposal.State)
|
||||
|
||||
@ -251,7 +252,7 @@ func (m *VoteCollector[StateT, VoteT]) ProcessState(proposal *models.SignedPropo
|
||||
// Note: proposal equivocation is handled by consensus.Forks, so we don't
|
||||
// have to do anything else here.
|
||||
case consensus.VoteCollectorStatusVerifying:
|
||||
verifyingProc, ok := proc.(consensus.VerifyingVoteProcessor)
|
||||
verifyingProc, ok := proc.(consensus.VerifyingVoteProcessor[StateT, VoteT])
|
||||
if !ok {
|
||||
return fmt.Errorf(
|
||||
"while processing state %v, found that VoteProcessor reports status %s but has an incompatible implementation type %T",
|
||||
@ -296,25 +297,33 @@ func (m *VoteCollector[StateT, VoteT]) RegisterVoteConsumer(
|
||||
// `VoteCollectorStatusCaching` and replaces it by a newly-created
|
||||
// VerifyingVoteProcessor.
|
||||
// Error returns:
|
||||
// * ErrDifferentCollectorState if the VoteCollector's state is _not_
|
||||
// `CachingVotes`
|
||||
// * all other errors are unexpected and potential symptoms of internal bugs or
|
||||
// state corruption (fatal)
|
||||
// - ErrDifferentCollectorState if the VoteCollector's state is _not_
|
||||
// `CachingVotes`
|
||||
// - all other errors are unexpected and potential symptoms of internal bugs
|
||||
// or state corruption (fatal)
|
||||
func (m *VoteCollector[StateT, VoteT]) caching2Verifying(
|
||||
proposal *models.SignedProposal[StateT, VoteT],
|
||||
) error {
|
||||
stateID := proposal.State.Identifier
|
||||
newProc, err := m.createVerifyingProcessor(m.log, proposal)
|
||||
newProc, err := m.createVerifyingProcessor(m.tracer, proposal)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create VerifyingVoteProcessor for state %v: %w", stateID, err)
|
||||
return fmt.Errorf(
|
||||
"failed to create VerifyingVoteProcessor for state %v: %w",
|
||||
stateID,
|
||||
err,
|
||||
)
|
||||
}
|
||||
newProcWrapper := &atomicValueWrapper{processor: newProc}
|
||||
newProcWrapper := &atomicValueWrapper[VoteT]{processor: newProc}
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
proc := m.atomicLoadProcessor()
|
||||
if proc.Status() != consensus.VoteCollectorStatusCaching {
|
||||
return fmt.Errorf("processors's current state is %s: %w", proc.Status().String(), ErrDifferentCollectorState)
|
||||
return fmt.Errorf(
|
||||
"processors's current state is %s: %w",
|
||||
proc.Status().String(),
|
||||
ErrDifferentCollectorState,
|
||||
)
|
||||
}
|
||||
m.votesProcessor.Store(newProcWrapper)
|
||||
return nil
|
||||
@ -324,8 +333,8 @@ func (m *VoteCollector[StateT, VoteT]) terminateVoteProcessing() {
|
||||
if m.Status() == consensus.VoteCollectorStatusInvalid {
|
||||
return
|
||||
}
|
||||
newProcWrapper := &atomicValueWrapper{
|
||||
processor: NewNoopCollector(consensus.VoteCollectorStatusInvalid),
|
||||
newProcWrapper := &atomicValueWrapper[VoteT]{
|
||||
processor: NewNoopCollector[VoteT](consensus.VoteCollectorStatusInvalid),
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
@ -334,11 +343,13 @@ func (m *VoteCollector[StateT, VoteT]) terminateVoteProcessing() {
|
||||
}
|
||||
|
||||
// processCachedVotes feeds all cached votes into the VoteProcessor
|
||||
func (m *VoteCollector[StateT, VoteT]) processCachedVotes(state *models.State) {
|
||||
func (m *VoteCollector[StateT, VoteT]) processCachedVotes(
|
||||
state *models.State[StateT],
|
||||
) {
|
||||
cachedVotes := m.votesCache.All()
|
||||
m.log.Info().Msgf("processing %d cached votes", len(cachedVotes))
|
||||
m.tracer.Trace(fmt.Sprintf("processing %d cached votes", len(cachedVotes)))
|
||||
for _, vote := range cachedVotes {
|
||||
if vote.Identifier != state.Identifier {
|
||||
if (*vote).Source() != state.Identifier {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -346,7 +357,7 @@ func (m *VoteCollector[StateT, VoteT]) processCachedVotes(state *models.State) {
|
||||
voteProcessingTask := func() {
|
||||
err := m.processVote(stateVote)
|
||||
if err != nil {
|
||||
m.log.Fatal().Err(err).Msg("internal error processing cached vote")
|
||||
m.tracer.Error("internal error processing cached vote", err)
|
||||
}
|
||||
}
|
||||
m.workers.Submit(voteProcessingTask)
|
||||
|
||||
@ -17,8 +17,8 @@ var (
|
||||
|
||||
// voteContainer container stores the vote and in index representing
|
||||
// the order in which the votes were received
|
||||
type voteContainer struct {
|
||||
*models.Vote
|
||||
type voteContainer[VoteT models.Unique] struct {
|
||||
Vote *VoteT
|
||||
index int
|
||||
}
|
||||
|
||||
@ -33,7 +33,7 @@ type voteContainer struct {
|
||||
type VotesCache[VoteT models.Unique] struct {
|
||||
lock sync.RWMutex
|
||||
rank uint64
|
||||
votes map[models.Identity]voteContainer // signerID -> first vote
|
||||
votes map[models.Identity]voteContainer[VoteT] // signerID -> first vote
|
||||
voteConsumers []consensus.VoteConsumer[VoteT]
|
||||
}
|
||||
|
||||
@ -41,7 +41,7 @@ type VotesCache[VoteT models.Unique] struct {
|
||||
func NewVotesCache[VoteT models.Unique](rank uint64) *VotesCache[VoteT] {
|
||||
return &VotesCache[VoteT]{
|
||||
rank: rank,
|
||||
votes: make(map[models.Identity]voteContainer),
|
||||
votes: make(map[models.Identity]voteContainer[VoteT]),
|
||||
}
|
||||
}
|
||||
|
||||
@ -85,7 +85,7 @@ func (vc *VotesCache[VoteT]) AddVote(vote *VoteT) error {
|
||||
}
|
||||
|
||||
// previously unknown vote: (1) store and (2) forward to consumers
|
||||
vc.votes[(*vote).Identity()] = voteContainer{vote, len(vc.votes)}
|
||||
vc.votes[(*vote).Identity()] = voteContainer[VoteT]{vote, len(vc.votes)}
|
||||
for _, consumer := range vc.voteConsumers {
|
||||
consumer(vote)
|
||||
}
|
||||
|
||||
@ -39,6 +39,7 @@ func (f *provingVoteProcessorFactoryBase[StateT, VoteT, PeerIDT]) Create(
|
||||
state *models.State[StateT],
|
||||
dsTag []byte,
|
||||
aggregator consensus.SignatureAggregator,
|
||||
votingProvider consensus.VotingProvider[StateT, VoteT, PeerIDT],
|
||||
) (consensus.VerifyingVoteProcessor[StateT, VoteT], error) {
|
||||
allParticipants, err := f.committee.IdentitiesByState(state.Identifier)
|
||||
if err != nil {
|
||||
@ -81,6 +82,7 @@ func (f *provingVoteProcessorFactoryBase[StateT, VoteT, PeerIDT]) Create(
|
||||
tracer: tracer,
|
||||
state: state,
|
||||
provingSigAggtor: provingSigAggtor,
|
||||
votingProvider: votingProvider,
|
||||
onQCCreated: f.onQCCreated,
|
||||
minRequiredWeight: minRequiredWeight,
|
||||
done: *atomic.NewBool(false),
|
||||
@ -92,7 +94,7 @@ func (f *provingVoteProcessorFactoryBase[StateT, VoteT, PeerIDT]) Create(
|
||||
|
||||
// VoteProcessor implements the consensus.VerifyingVoteProcessor interface.
|
||||
// It processes hotstuff votes from a collector cluster, where participants vote
|
||||
// in favour of a state by proving their proving key signature.
|
||||
// in favour of a state by proving their proving key consensus.
|
||||
// Concurrency safe.
|
||||
type VoteProcessor[
|
||||
StateT models.Unique,
|
||||
|
||||
Loading…
Reference in New Issue
Block a user