mirror of
https://github.com/QuilibriumNetwork/ceremonyclient.git
synced 2026-03-05 08:17:39 +08:00
* v2.1.0.19 * enhanced error logging, fix seniority marker join blocker, fix sync message size limit defaults * resolve signature failure * additional error logging for merge-related signatures * fix: one-shot sync message size, app shard TC signature size, collector/hotstuff race condition, expired joins blocking new joins due to pruning disable * remove compat with old 2.0.0 blossomsub * fix: resolve abandoned prover joins * reload prover registry * fix stale worker proposal edge * add full sanity check on join before submitting to identify bug * resolve non-fallthrough condition that should be fallthrough * fix: resolve rare SIGFPE, fix orphan expired joins blocking workers from reallocating * add reconnect fallback if no peers are found with variable reconnect time (#511) Co-authored-by: Tyler Sturos <55340199+tjsturos@users.noreply.github.com> * update base peer count to 1 (#513) * fix: expired prover join frames, starting port ranges, proposer getting stuck, and seniority on joins * fix: panic on shutdown, libp2p discovery picking inaccessible peers, coverage event check not in shutdown logic, amend app shard worker behavior to mirror global for prover root reconciliation * fix: shutdown scenario quirks, reload hanging * fix: do not bailout early on shutdown of coverage check * fix: force registry refresh on worker waiting for registration * add more logging to wait for prover * fix: worker manager refreshes the filter on allocation, snapshots blocking close on shutdown * tweak: force shutdown after five seconds for app worker * fix: don't loop when shutting down * fix: slight reordering, also added named workers to trace hanging shutdowns * use deterministic key for peer id of workers to stop flagging workers as sybil attacks * fix: remove pubsub stop from app consensus engine as it shouldn't manage pubsub lifecycle, integrate shutdown context to PerformSync to prevent stuck syncs from halting respawn * fix: blossomsub pubsub interface does not properly track subscription status * fix: subscribe order to avoid nil panic * switch from dnsaddr to dns4 * add missing quic-v1 * additional logging to isolate respawn quirks * fix: dnsaddr -> dns4 for blossomsub * sort-of fix: apply sledgehammer to restart logic * fix: restore proper respawn logic, fix frozen hypergraph post respawn, unsubscribe from bitmask previously missing --------- Co-authored-by: winged-pegasus <55340199+winged-pegasus@users.noreply.github.com> Co-authored-by: Tyler Sturos <55340199+tjsturos@users.noreply.github.com>
148 lines
3.6 KiB
Go
148 lines
3.6 KiB
Go
package validator
|
|
|
|
import (
|
|
"bytes"
|
|
"encoding/hex"
|
|
"slices"
|
|
|
|
"github.com/pkg/errors"
|
|
"go.uber.org/zap"
|
|
"source.quilibrium.com/quilibrium/monorepo/protobufs"
|
|
"source.quilibrium.com/quilibrium/monorepo/types/consensus"
|
|
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
|
|
)
|
|
|
|
type BLSGlobalFrameValidator struct {
|
|
proverRegistry consensus.ProverRegistry
|
|
blsConstructor crypto.BlsConstructor
|
|
frameProver crypto.FrameProver
|
|
logger *zap.Logger
|
|
}
|
|
|
|
func NewBLSGlobalFrameValidator(
|
|
proverRegistry consensus.ProverRegistry,
|
|
blsConstructor crypto.BlsConstructor,
|
|
frameProver crypto.FrameProver,
|
|
logger *zap.Logger,
|
|
) *BLSGlobalFrameValidator {
|
|
return &BLSGlobalFrameValidator{
|
|
proverRegistry: proverRegistry,
|
|
blsConstructor: blsConstructor,
|
|
frameProver: frameProver,
|
|
logger: logger,
|
|
}
|
|
}
|
|
|
|
// Validate implements consensus.GlobalFrameValidator.
|
|
func (b *BLSGlobalFrameValidator) Validate(
|
|
frame *protobufs.GlobalFrame,
|
|
) (bool, error) {
|
|
if frame == nil || frame.Header == nil {
|
|
return false, errors.New("frame or header is nil")
|
|
}
|
|
|
|
if len(frame.Header.Output) != 516 {
|
|
return false, errors.Errorf(
|
|
"invalid output length: %d",
|
|
len(frame.Header.Output),
|
|
)
|
|
}
|
|
|
|
if frame.Header.FrameNumber == 0 {
|
|
b.logger.Debug("validating genesis frame - no signature required")
|
|
return true, nil
|
|
}
|
|
|
|
if frame.Header.PublicKeySignatureBls48581 == nil {
|
|
return false, errors.New("no bls signature")
|
|
}
|
|
|
|
sig := frame.Header.PublicKeySignatureBls48581
|
|
if sig.Signature == nil || sig.PublicKey == nil {
|
|
return false, errors.New("signature or public key is nil")
|
|
}
|
|
|
|
if sig.Bitmask == nil {
|
|
return false, errors.New("bitmask is nil")
|
|
}
|
|
|
|
bits, err := b.frameProver.VerifyGlobalFrameHeader(
|
|
frame.Header,
|
|
b.blsConstructor,
|
|
)
|
|
isValid := err == nil
|
|
|
|
if !isValid {
|
|
b.logger.Debug(
|
|
"frame verification failed",
|
|
zap.Error(err),
|
|
zap.Uint64("frame_number", frame.Header.FrameNumber),
|
|
zap.String(
|
|
"parent_selector",
|
|
hex.EncodeToString(frame.Header.ParentSelector),
|
|
),
|
|
)
|
|
return false, errors.Wrap(err, "global frame header verification")
|
|
}
|
|
|
|
provers, err := b.proverRegistry.GetActiveProvers(nil)
|
|
if err != nil {
|
|
b.logger.Error("could not get active provers", zap.Error(err))
|
|
return false, errors.Wrap(err, "validate")
|
|
}
|
|
|
|
activeProverSet := [][]byte{}
|
|
throwawaySet := [][]byte{}
|
|
for i, prover := range provers {
|
|
if slices.Contains(bits, uint8(i)) {
|
|
info := prover
|
|
activeProverSet = append(activeProverSet, info.PublicKey)
|
|
throwawaySet = append(
|
|
throwawaySet,
|
|
frame.Header.PublicKeySignatureBls48581.Signature,
|
|
)
|
|
}
|
|
}
|
|
|
|
aggregate, err := b.blsConstructor.Aggregate(activeProverSet, throwawaySet)
|
|
if err != nil {
|
|
b.logger.Error("could not aggregate keys", zap.Error(err))
|
|
return false, errors.Wrap(err, "validate")
|
|
}
|
|
|
|
if !bytes.Equal(
|
|
aggregate.GetAggregatePublicKey(),
|
|
frame.Header.PublicKeySignatureBls48581.PublicKey.KeyValue,
|
|
) {
|
|
b.logger.Error(
|
|
"could not verify aggregated keys",
|
|
zap.String("expected_key", hex.EncodeToString(
|
|
frame.Header.PublicKeySignatureBls48581.PublicKey.KeyValue,
|
|
)),
|
|
zap.String("actual_key", hex.EncodeToString(
|
|
aggregate.GetAggregatePublicKey(),
|
|
)),
|
|
zap.Error(err),
|
|
)
|
|
return false, errors.Wrap(
|
|
errors.New("could not verify aggregated keys"),
|
|
"validate",
|
|
)
|
|
}
|
|
|
|
b.logger.Debug(
|
|
"frame verification result",
|
|
zap.Bool("is_valid", isValid),
|
|
zap.Error(err),
|
|
zap.Uint64("frame_number", frame.Header.FrameNumber),
|
|
zap.String(
|
|
"parent_selector",
|
|
hex.EncodeToString(frame.Header.ParentSelector),
|
|
),
|
|
)
|
|
|
|
return isValid, err
|
|
}
|
|
|
|
var _ consensus.GlobalFrameValidator = (*BLSGlobalFrameValidator)(nil)
|