mirror of
https://github.com/QuilibriumNetwork/ceremonyclient.git
synced 2026-02-21 18:37:26 +08:00
* wip: conversion of hotstuff from flow into Q-oriented model * bulk of tests * remaining non-integration tests * add integration test, adjust log interface, small tweaks * further adjustments, restore full pacemaker shape * add component lifecycle management+supervisor * further refinements * resolve timeout hanging * mostly finalized state for consensus * bulk of engine swap out * lifecycle-ify most types * wiring nearly complete, missing needed hooks for proposals * plugged in, vetting message validation paths * global consensus, plugged in and verified * app shard now wired in too * do not decode empty keys.yml (#456) * remove obsolete engine.maxFrames config parameter (#454) * default to Info log level unless debug is enabled (#453) * respect config's "logging" section params, remove obsolete single-file logging (#452) * Trivial code cleanup aiming to reduce Go compiler warnings (#451) * simplify range traversal * simplify channel read for single select case * delete rand.Seed() deprecated in Go 1.20 and no-op as of Go 1.24 * simplify range traversal * simplify channel read for single select case * remove redundant type from array * simplify range traversal * simplify channel read for single select case * RC slate * finalize 2.1.0.5 * Update comments in StrictMonotonicCounter Fix comment formatting and clarify description. --------- Co-authored-by: Black Swan <3999712+blacks1ne@users.noreply.github.com>
281 lines
7.0 KiB
Go
281 lines
7.0 KiB
Go
package app
|
|
|
|
import (
|
|
"github.com/libp2p/go-libp2p/core/peer"
|
|
"github.com/pkg/errors"
|
|
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
|
"source.quilibrium.com/quilibrium/monorepo/rpm"
|
|
"source.quilibrium.com/quilibrium/monorepo/types/p2p"
|
|
)
|
|
|
|
func (e *AppConsensusEngine) subscribeToConsensusMessages() error {
|
|
proverKey, _, _, _ := e.GetProvingKey(e.config.Engine)
|
|
e.mixnet = rpm.NewRPMMixnet(
|
|
e.logger,
|
|
proverKey,
|
|
e.proverRegistry,
|
|
e.appAddress,
|
|
)
|
|
|
|
if err := e.pubsub.Subscribe(
|
|
e.getConsensusMessageBitmask(),
|
|
func(message *pb.Message) error {
|
|
select {
|
|
case <-e.haltCtx.Done():
|
|
return nil
|
|
case e.consensusMessageQueue <- message:
|
|
return nil
|
|
case <-e.ShutdownSignal():
|
|
return errors.New("context cancelled")
|
|
default:
|
|
e.logger.Warn("consensus message queue full, dropping message")
|
|
return nil
|
|
}
|
|
},
|
|
); err != nil {
|
|
return errors.Wrap(err, "subscribe to consensus messages")
|
|
}
|
|
|
|
// Register consensus message validator
|
|
if err := e.pubsub.RegisterValidator(
|
|
e.getConsensusMessageBitmask(),
|
|
func(peerID peer.ID, message *pb.Message) p2p.ValidationResult {
|
|
return e.validateConsensusMessage(peerID, message)
|
|
},
|
|
true,
|
|
); err != nil {
|
|
return errors.Wrap(err, "subscribe to consensus messages")
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (e *AppConsensusEngine) subscribeToGlobalProverMessages() error {
|
|
if err := e.pubsub.Subscribe(
|
|
e.getGlobalProverMessageBitmask(),
|
|
func(message *pb.Message) error {
|
|
return nil
|
|
},
|
|
); err != nil {
|
|
return errors.Wrap(err, "subscribe to consensus messages")
|
|
}
|
|
|
|
// Register consensus message validator
|
|
if err := e.pubsub.RegisterValidator(
|
|
e.getGlobalProverMessageBitmask(),
|
|
func(peerID peer.ID, message *pb.Message) p2p.ValidationResult {
|
|
return e.validateGlobalProverMessage(peerID, message)
|
|
},
|
|
true,
|
|
); err != nil {
|
|
return errors.Wrap(err, "subscribe to consensus messages")
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (e *AppConsensusEngine) subscribeToProverMessages() error {
|
|
if err := e.pubsub.Subscribe(
|
|
e.getProverMessageBitmask(),
|
|
func(message *pb.Message) error {
|
|
select {
|
|
case <-e.haltCtx.Done():
|
|
return nil
|
|
case e.proverMessageQueue <- message:
|
|
e.logger.Debug("got prover message")
|
|
return nil
|
|
case <-e.ShutdownSignal():
|
|
return errors.New("context cancelled")
|
|
default:
|
|
e.logger.Warn("prover message queue full, dropping message")
|
|
return nil
|
|
}
|
|
},
|
|
); err != nil {
|
|
return errors.Wrap(err, "subscribe to prover messages")
|
|
}
|
|
|
|
// Register frame validator
|
|
if err := e.pubsub.RegisterValidator(
|
|
e.getProverMessageBitmask(),
|
|
func(peerID peer.ID, message *pb.Message) p2p.ValidationResult {
|
|
return e.validateProverMessage(peerID, message)
|
|
},
|
|
true,
|
|
); err != nil {
|
|
return errors.Wrap(err, "subscribe to prover messages")
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (e *AppConsensusEngine) subscribeToFrameMessages() error {
|
|
if err := e.pubsub.Subscribe(
|
|
e.getFrameMessageBitmask(),
|
|
func(message *pb.Message) error {
|
|
if e.IsInProverTrie(e.getProverAddress()) {
|
|
return nil
|
|
}
|
|
|
|
select {
|
|
case <-e.haltCtx.Done():
|
|
return nil
|
|
case e.frameMessageQueue <- message:
|
|
return nil
|
|
case <-e.ShutdownSignal():
|
|
return errors.New("context cancelled")
|
|
default:
|
|
e.logger.Warn("app message queue full, dropping message")
|
|
return nil
|
|
}
|
|
},
|
|
); err != nil {
|
|
return errors.Wrap(err, "subscribe to frame messages")
|
|
}
|
|
|
|
// Register frame validator
|
|
if err := e.pubsub.RegisterValidator(
|
|
e.getFrameMessageBitmask(),
|
|
func(peerID peer.ID, message *pb.Message) p2p.ValidationResult {
|
|
return e.validateFrameMessage(peerID, message)
|
|
},
|
|
true,
|
|
); err != nil {
|
|
return errors.Wrap(err, "subscribe to frame messages")
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (e *AppConsensusEngine) subscribeToGlobalFrameMessages() error {
|
|
if err := e.pubsub.Subscribe(
|
|
e.getGlobalFrameMessageBitmask(),
|
|
func(message *pb.Message) error {
|
|
select {
|
|
case <-e.haltCtx.Done():
|
|
return nil
|
|
case e.globalFrameMessageQueue <- message:
|
|
return nil
|
|
case <-e.ShutdownSignal():
|
|
return errors.New("context cancelled")
|
|
default:
|
|
e.logger.Warn("global message queue full, dropping message")
|
|
return nil
|
|
}
|
|
},
|
|
); err != nil {
|
|
return errors.Wrap(err, "subscribe to global frame messages")
|
|
}
|
|
|
|
// Register frame validator
|
|
if err := e.pubsub.RegisterValidator(
|
|
e.getGlobalFrameMessageBitmask(),
|
|
func(peerID peer.ID, message *pb.Message) p2p.ValidationResult {
|
|
return e.validateGlobalFrameMessage(peerID, message)
|
|
},
|
|
true,
|
|
); err != nil {
|
|
return errors.Wrap(err, "subscribe to global frame messages")
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (e *AppConsensusEngine) subscribeToGlobalAlertMessages() error {
|
|
if err := e.pubsub.Subscribe(
|
|
e.getGlobalAlertMessageBitmask(),
|
|
func(message *pb.Message) error {
|
|
select {
|
|
case e.globalAlertMessageQueue <- message:
|
|
return nil
|
|
case <-e.ShutdownSignal():
|
|
return errors.New("context cancelled")
|
|
default:
|
|
e.logger.Warn("global alert queue full, dropping message")
|
|
return nil
|
|
}
|
|
},
|
|
); err != nil {
|
|
return errors.Wrap(err, "subscribe to global alert messages")
|
|
}
|
|
|
|
// Register alert validator
|
|
if err := e.pubsub.RegisterValidator(
|
|
e.getGlobalAlertMessageBitmask(),
|
|
func(peerID peer.ID, message *pb.Message) p2p.ValidationResult {
|
|
return e.validateAlertMessage(peerID, message)
|
|
},
|
|
true,
|
|
); err != nil {
|
|
return errors.Wrap(err, "subscribe to global alert messages")
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (e *AppConsensusEngine) subscribeToPeerInfoMessages() error {
|
|
if err := e.pubsub.Subscribe(
|
|
e.getGlobalPeerInfoMessageBitmask(),
|
|
func(message *pb.Message) error {
|
|
select {
|
|
case <-e.haltCtx.Done():
|
|
return nil
|
|
case e.globalPeerInfoMessageQueue <- message:
|
|
return nil
|
|
case <-e.ShutdownSignal():
|
|
return errors.New("context cancelled")
|
|
default:
|
|
e.logger.Warn("peer info message queue full, dropping message")
|
|
return nil
|
|
}
|
|
},
|
|
); err != nil {
|
|
return errors.Wrap(err, "subscribe to peer info messages")
|
|
}
|
|
|
|
// Register frame validator
|
|
if err := e.pubsub.RegisterValidator(
|
|
e.getGlobalPeerInfoMessageBitmask(),
|
|
func(peerID peer.ID, message *pb.Message) p2p.ValidationResult {
|
|
return e.validatePeerInfoMessage(peerID, message)
|
|
},
|
|
true,
|
|
); err != nil {
|
|
return errors.Wrap(err, "subscribe to peer info messages")
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (e *AppConsensusEngine) subscribeToDispatchMessages() error {
|
|
if err := e.pubsub.Subscribe(
|
|
e.getDispatchMessageBitmask(),
|
|
func(message *pb.Message) error {
|
|
select {
|
|
case e.dispatchMessageQueue <- message:
|
|
return nil
|
|
case <-e.ShutdownSignal():
|
|
return errors.New("context cancelled")
|
|
default:
|
|
e.logger.Warn("dispatch queue full, dropping message")
|
|
return nil
|
|
}
|
|
},
|
|
); err != nil {
|
|
return errors.Wrap(err, "subscribe to dispatch messages")
|
|
}
|
|
|
|
// Register dispatch validator
|
|
if err := e.pubsub.RegisterValidator(
|
|
e.getDispatchMessageBitmask(),
|
|
func(peerID peer.ID, message *pb.Message) p2p.ValidationResult {
|
|
return e.validateDispatchMessage(peerID, message)
|
|
},
|
|
true,
|
|
); err != nil {
|
|
return errors.Wrap(err, "subscribe to dispatch messages")
|
|
}
|
|
|
|
return nil
|
|
}
|