ceremonyclient/node/consensus/app/factory.go
Cassandra Heart 53f7c2b5c9
v2.1.0.2 (#442)
* v2.1.0.2

* restore tweaks to simlibp2p

* fix: nil ref on size calc

* fix: panic should induce shutdown from event_distributor

* fix: friendlier initialization that requires less manual kickstarting for test/devnets

* fix: fewer available shards than provers should choose shard length

* fix: update stored worker registry, improve logging for debug mode

* fix: shut the fuck up, peer log

* qol: log value should be snake cased

* fix:non-archive snap sync issues

* fix: separate X448/Decaf448 signed keys, add onion key to registry

* fix: overflow arithmetic on frame number comparison

* fix: worker registration should be idempotent if inputs are same, otherwise permit updated records

* fix: remove global prover state from size calculation

* fix: divide by zero case

* fix: eager prover

* fix: broadcast listener default

* qol: diagnostic data for peer authenticator

* fix: master/worker connectivity issue in sparse networks

tight coupling of peer and workers can sometimes interfere if mesh is sparse, so give workers a pseudoidentity but publish messages with the proper peer key

* fix: reorder steps of join creation

* fix: join verify frame source + ensure domain is properly padded (unnecessary but good for consistency)

* fix: add delegate to protobuf <-> reified join conversion

* fix: preempt prover from planning with no workers

* fix: use the unallocated workers to generate a proof

* qol: underflow causes join fail in first ten frames on test/devnets

* qol: small logging tweaks for easier log correlation in debug mode

* qol: use fisher-yates shuffle to ensure prover allocations are evenly distributed when scores are equal

* qol: separate decisional logic on post-enrollment confirmation into consensus engine, proposer, and worker manager where relevant, refactor out scoring

* reuse shard descriptors for both join planning and confirm/reject decisions

* fix: add missing interface method and amend test blossomsub to use new peer id basis

* fix: only check allocations if they exist

* fix: pomw mint proof data needs to be hierarchically under global intrinsic domain

* staging temporary state under diagnostics

* fix: first phase of distributed lock refactoring

* fix: compute intrinsic locking

* fix: hypergraph intrinsic locking

* fix: token intrinsic locking

* fix: update execution engines to support new locking model

* fix: adjust tests with new execution shape

* fix: weave in lock/unlock semantics to liveness provider

* fix lock fallthrough, add missing allocation update

* qol: additional logging for diagnostics, also testnet/devnet handling for confirmations

* fix: establish grace period on halt scenario to permit recovery

* fix: support test/devnet defaults for coverage scenarios

* fix: nil ref on consensus halts for non-archive nodes

* fix: remove unnecessary prefix from prover ref

* add test coverage for fork choice behaviors and replay – once passing, blocker (2) is resolved

* fix: no fork replay on repeat for non-archive nodes, snap now behaves correctly

* rollup of pre-liveness check lock interactions

* ahead of tests, get the protobuf/metrics-related changes out so teams can prepare

* add test coverage for distributed lock behaviors – once passing, blocker (3) is resolved

* fix: blocker (3)

* Dev docs improvements (#445)

* Make install deps script more robust

* Improve testing instructions

* Worker node should stop upon OS SIGINT/SIGTERM signal (#447)

* move pebble close to Stop()

* move deferred Stop() to Start()

* add core id to worker stop log message

* create done os signal channel and stop worker upon message to it

---------

Co-authored-by: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com>

---------

Co-authored-by: Daz <daz_the_corgi@proton.me>
Co-authored-by: Black Swan <3999712+blacks1ne@users.noreply.github.com>
2025-10-23 01:03:06 -05:00

189 lines
5.9 KiB
Go

package app
import (
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/grpc"
"source.quilibrium.com/quilibrium/monorepo/config"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/events"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/time"
qp2p "source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/types/channel"
"source.quilibrium.com/quilibrium/monorepo/types/compiler"
"source.quilibrium.com/quilibrium/monorepo/types/consensus"
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
"source.quilibrium.com/quilibrium/monorepo/types/hypergraph"
"source.quilibrium.com/quilibrium/monorepo/types/keys"
"source.quilibrium.com/quilibrium/monorepo/types/p2p"
"source.quilibrium.com/quilibrium/monorepo/types/store"
)
// AppConsensusEngineFactory provides a factory method for creating properly
// wired AppConsensusEngine instances with time reels and event distributors.
type AppConsensusEngineFactory struct {
logger *zap.Logger
config *config.Config
pubsub p2p.PubSub
hypergraph hypergraph.Hypergraph
keyManager keys.KeyManager
keyStore store.KeyStore
clockStore store.ClockStore
inboxStore store.InboxStore
shardsStore store.ShardsStore
hypergraphStore store.HypergraphStore
frameProver crypto.FrameProver
inclusionProver crypto.InclusionProver
bulletproofProver crypto.BulletproofProver
verEnc crypto.VerifiableEncryptor
decafConstructor crypto.DecafConstructor
compiler compiler.CircuitCompiler
signerRegistry consensus.SignerRegistry
proverRegistry consensus.ProverRegistry
peerInfoManager qp2p.PeerInfoManager
dynamicFeeManager consensus.DynamicFeeManager
frameValidator consensus.AppFrameValidator
globalFrameValidator consensus.GlobalFrameValidator
difficultyAdjuster consensus.DifficultyAdjuster
rewardIssuance consensus.RewardIssuance
blsConstructor crypto.BlsConstructor
encryptedChannel channel.EncryptedChannel
}
// NewAppConsensusEngineFactory creates a new factory for consensus engines.
func NewAppConsensusEngineFactory(
logger *zap.Logger,
config *config.Config,
pubsub p2p.PubSub,
hypergraph hypergraph.Hypergraph,
keyManager keys.KeyManager,
keyStore store.KeyStore,
clockStore store.ClockStore,
inboxStore store.InboxStore,
shardsStore store.ShardsStore,
hypergraphStore store.HypergraphStore,
frameProver crypto.FrameProver,
inclusionProver crypto.InclusionProver,
bulletproofProver crypto.BulletproofProver,
verEnc crypto.VerifiableEncryptor,
decafConstructor crypto.DecafConstructor,
compiler compiler.CircuitCompiler,
signerRegistry consensus.SignerRegistry,
proverRegistry consensus.ProverRegistry,
peerInfoManager qp2p.PeerInfoManager,
dynamicFeeManager consensus.DynamicFeeManager,
frameValidator consensus.AppFrameValidator,
globalFrameValidator consensus.GlobalFrameValidator,
difficultyAdjuster consensus.DifficultyAdjuster,
rewardIssuance consensus.RewardIssuance,
blsConstructor crypto.BlsConstructor,
encryptedChannel channel.EncryptedChannel,
) *AppConsensusEngineFactory {
return &AppConsensusEngineFactory{
logger: logger,
config: config,
pubsub: pubsub,
hypergraph: hypergraph,
keyManager: keyManager,
keyStore: keyStore,
clockStore: clockStore,
inboxStore: inboxStore,
shardsStore: shardsStore,
hypergraphStore: hypergraphStore,
frameProver: frameProver,
inclusionProver: inclusionProver,
bulletproofProver: bulletproofProver,
verEnc: verEnc,
decafConstructor: decafConstructor,
compiler: compiler,
signerRegistry: signerRegistry,
proverRegistry: proverRegistry,
dynamicFeeManager: dynamicFeeManager,
frameValidator: frameValidator,
globalFrameValidator: globalFrameValidator,
difficultyAdjuster: difficultyAdjuster,
rewardIssuance: rewardIssuance,
blsConstructor: blsConstructor,
encryptedChannel: encryptedChannel,
}
}
// CreateAppConsensusEngine creates a new AppConsensusEngine
func (f *AppConsensusEngineFactory) CreateAppConsensusEngine(
appAddress []byte,
coreId uint,
globalTimeReel *time.GlobalTimeReel,
grpcServer *grpc.Server,
) (*AppConsensusEngine, error) {
// Create the app time reel for this shard
appTimeReel, err := time.NewAppTimeReel(
f.logger,
appAddress,
f.proverRegistry,
f.clockStore,
f.config.Engine.ArchiveMode,
)
if err != nil {
return nil, errors.Wrap(err, "create app time reel")
}
// Create the event distributor with channels from both time reels
eventDistributor := events.NewAppEventDistributor(
globalTimeReel.GetEventCh(),
appTimeReel.GetEventCh(),
)
// Create the consensus engine with the wired event distributor and time reel
engine, err := NewAppConsensusEngine(
f.logger,
f.config,
coreId,
appAddress,
f.pubsub,
f.hypergraph,
f.keyManager,
f.keyStore,
f.clockStore,
f.inboxStore,
f.shardsStore,
f.hypergraphStore,
f.frameProver,
f.inclusionProver,
f.bulletproofProver,
f.verEnc,
f.decafConstructor,
f.compiler,
f.signerRegistry,
f.proverRegistry,
f.dynamicFeeManager,
f.frameValidator,
f.globalFrameValidator,
f.difficultyAdjuster,
f.rewardIssuance,
eventDistributor,
f.peerInfoManager,
appTimeReel,
globalTimeReel,
f.blsConstructor,
f.encryptedChannel,
grpcServer,
)
if err != nil {
return nil, errors.Wrap(err, "create app consensus engine")
}
return engine, nil
}
// CreateGlobalTimeReel creates a new global time reel
func (
f *AppConsensusEngineFactory,
) CreateGlobalTimeReel() (*time.GlobalTimeReel, error) {
return time.NewGlobalTimeReel(
f.logger,
f.proverRegistry,
f.clockStore,
f.config.P2P.Network,
f.config.Engine.ArchiveMode,
)
}