mirror of
https://github.com/QuilibriumNetwork/ceremonyclient.git
synced 2026-02-21 10:27:26 +08:00
fix: seniority calculation, sync race condition, migration
This commit is contained in:
parent
a87be81113
commit
2fd9ccaa63
@ -161,8 +161,9 @@ func (h *snapshotHandle) isLeafMiss(key []byte) bool {
|
||||
// snapshotGeneration represents a set of shard snapshots for a specific
|
||||
// commit root.
|
||||
type snapshotGeneration struct {
|
||||
root []byte
|
||||
handles map[string]*snapshotHandle // keyed by shard key
|
||||
root []byte
|
||||
handles map[string]*snapshotHandle // keyed by shard key
|
||||
dbSnapshot tries.DBSnapshot // point-in-time DB snapshot taken at publish
|
||||
}
|
||||
|
||||
type snapshotManager struct {
|
||||
@ -211,6 +212,22 @@ func (m *snapshotManager) publish(root []byte) {
|
||||
newGen.root = append([]byte{}, root...)
|
||||
}
|
||||
|
||||
// Take a point-in-time DB snapshot if the store supports it.
|
||||
// This ensures all shard snapshots for this generation reflect
|
||||
// the exact state at publish time, avoiding race conditions.
|
||||
if m.store != nil {
|
||||
dbSnap, err := m.store.NewDBSnapshot()
|
||||
if err != nil {
|
||||
m.logger.Warn(
|
||||
"failed to create DB snapshot for generation",
|
||||
zap.String("root", rootHex),
|
||||
zap.Error(err),
|
||||
)
|
||||
} else {
|
||||
newGen.dbSnapshot = dbSnap
|
||||
}
|
||||
}
|
||||
|
||||
// Prepend the new generation (newest first)
|
||||
m.generations = append([]*snapshotGeneration{newGen}, m.generations...)
|
||||
|
||||
@ -227,6 +244,16 @@ func (m *snapshotManager) publish(root []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
// Close the DB snapshot if present
|
||||
if oldGen.dbSnapshot != nil {
|
||||
if err := oldGen.dbSnapshot.Close(); err != nil {
|
||||
m.logger.Warn(
|
||||
"failed to close DB snapshot",
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
oldRootHex := ""
|
||||
if len(oldGen.root) != 0 {
|
||||
oldRootHex = hex.EncodeToString(oldGen.root)
|
||||
@ -247,10 +274,11 @@ func (m *snapshotManager) publish(root []byte) {
|
||||
// acquire returns a snapshot handle for the given shard key. If expectedRoot
|
||||
// is provided and a matching generation has an existing snapshot for this shard,
|
||||
// that snapshot is returned. Otherwise, a new snapshot is created from the
|
||||
// current DB state and associated with the latest generation.
|
||||
// generation's DB snapshot (if available) to ensure consistency.
|
||||
//
|
||||
// Note: Historical snapshots are only available if they were created while that
|
||||
// generation was current. We cannot create a snapshot of past state retroactively.
|
||||
// With DB snapshots: Historical generations can create new shard snapshots because
|
||||
// the DB snapshot captures the exact state at publish time.
|
||||
// Without DB snapshots (fallback): Only the latest generation can create snapshots.
|
||||
func (m *snapshotManager) acquire(
|
||||
shardKey tries.ShardKey,
|
||||
expectedRoot []byte,
|
||||
@ -264,7 +292,9 @@ func (m *snapshotManager) acquire(
|
||||
return nil
|
||||
}
|
||||
|
||||
// If expectedRoot is provided, look for an existing snapshot in that generation
|
||||
var targetGen *snapshotGeneration
|
||||
|
||||
// If expectedRoot is provided, look for the matching generation
|
||||
if len(expectedRoot) > 0 {
|
||||
for _, gen := range m.generations {
|
||||
if bytes.Equal(gen.root, expectedRoot) {
|
||||
@ -278,24 +308,33 @@ func (m *snapshotManager) acquire(
|
||||
return handle
|
||||
}
|
||||
// Generation exists but no snapshot for this shard yet.
|
||||
// Only create if this is the latest generation (current DB state matches).
|
||||
// If we have a DB snapshot, we can create from it even for older generations.
|
||||
if gen.dbSnapshot != nil {
|
||||
targetGen = gen
|
||||
m.logger.Debug(
|
||||
"creating snapshot for expected root from DB snapshot",
|
||||
zap.String("expected_root", hex.EncodeToString(expectedRoot)),
|
||||
)
|
||||
break
|
||||
}
|
||||
// No DB snapshot - only allow if this is the latest generation
|
||||
if gen != m.generations[0] {
|
||||
m.logger.Warn(
|
||||
"generation matches expected root but is not latest, cannot create snapshot",
|
||||
"generation matches expected root but has no DB snapshot and is not latest",
|
||||
zap.String("expected_root", hex.EncodeToString(expectedRoot)),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
// Fall through to create snapshot for latest generation
|
||||
targetGen = gen
|
||||
m.logger.Debug(
|
||||
"creating snapshot for expected root (latest generation)",
|
||||
"creating snapshot for expected root (latest generation, no DB snapshot)",
|
||||
zap.String("expected_root", hex.EncodeToString(expectedRoot)),
|
||||
)
|
||||
break
|
||||
}
|
||||
}
|
||||
// If we didn't find a matching generation at all, reject
|
||||
if len(m.generations) == 0 || !bytes.Equal(m.generations[0].root, expectedRoot) {
|
||||
if targetGen == nil {
|
||||
if m.logger != nil {
|
||||
latestRoot := ""
|
||||
if len(m.generations) > 0 {
|
||||
@ -309,13 +348,13 @@ func (m *snapshotManager) acquire(
|
||||
}
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
// No expected root - use the latest generation
|
||||
targetGen = m.generations[0]
|
||||
}
|
||||
|
||||
// Use the latest generation for new snapshots
|
||||
latestGen := m.generations[0]
|
||||
|
||||
// Check if we already have a handle for this shard in the latest generation
|
||||
if handle, ok := latestGen.handles[key]; ok {
|
||||
// Check if we already have a handle for this shard in the target generation
|
||||
if handle, ok := targetGen.handles[key]; ok {
|
||||
handle.acquire()
|
||||
return handle
|
||||
}
|
||||
@ -324,7 +363,19 @@ func (m *snapshotManager) acquire(
|
||||
return nil
|
||||
}
|
||||
|
||||
storeSnapshot, release, err := m.store.NewShardSnapshot(shardKey)
|
||||
// Create the shard snapshot, preferring DB snapshot if available
|
||||
var storeSnapshot tries.TreeBackingStore
|
||||
var release func()
|
||||
var err error
|
||||
|
||||
if targetGen.dbSnapshot != nil {
|
||||
storeSnapshot, release, err = m.store.NewShardSnapshotFromDBSnapshot(
|
||||
shardKey,
|
||||
targetGen.dbSnapshot,
|
||||
)
|
||||
} else {
|
||||
storeSnapshot, release, err = m.store.NewShardSnapshot(shardKey)
|
||||
}
|
||||
if err != nil {
|
||||
m.logger.Warn(
|
||||
"failed to build shard snapshot",
|
||||
@ -334,13 +385,13 @@ func (m *snapshotManager) acquire(
|
||||
return nil
|
||||
}
|
||||
|
||||
handle := newSnapshotHandle(key, storeSnapshot, release, latestGen.root)
|
||||
handle := newSnapshotHandle(key, storeSnapshot, release, targetGen.root)
|
||||
// Acquire a ref for the caller. The handle is created with refs=1 (the owner ref
|
||||
// held by the snapshot manager), and this adds another ref for the sync session.
|
||||
// This ensures publish() can release the owner ref without closing the DB while
|
||||
// a sync is still using it.
|
||||
handle.acquire()
|
||||
latestGen.handles[key] = handle
|
||||
targetGen.handles[key] = handle
|
||||
return handle
|
||||
}
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
// Code generated by Wire. DO NOT EDIT.
|
||||
|
||||
//go:generate go run github.com/google/wire/cmd/wire
|
||||
//go:generate go run -mod=mod github.com/google/wire/cmd/wire
|
||||
//go:build !wireinject
|
||||
// +build !wireinject
|
||||
|
||||
@ -66,15 +66,13 @@ func NewDBConsole(configConfig *config.Config) (*DBConsole, error) {
|
||||
}
|
||||
|
||||
func NewClockStore(logger *zap.Logger, configConfig *config.Config, uint2 uint) (store.ClockStore, error) {
|
||||
dbConfig := configConfig.DB
|
||||
pebbleDB := store2.NewPebbleDB(logger, dbConfig, uint2)
|
||||
pebbleDB := store2.NewPebbleDB(logger, configConfig, uint2)
|
||||
pebbleClockStore := store2.NewPebbleClockStore(pebbleDB, logger)
|
||||
return pebbleClockStore, nil
|
||||
}
|
||||
|
||||
func NewDataWorkerNodeWithProxyPubsub(logger *zap.Logger, config2 *config.Config, coreId uint, rpcMultiaddr string, parentProcess int, configDir p2p.ConfigDir) (*DataWorkerNode, error) {
|
||||
dbConfig := config2.DB
|
||||
pebbleDB := store2.NewPebbleDB(logger, dbConfig, coreId)
|
||||
pebbleDB := store2.NewPebbleDB(logger, config2, coreId)
|
||||
pebbleDataProofStore := store2.NewPebbleDataProofStore(pebbleDB, logger)
|
||||
pebbleClockStore := store2.NewPebbleClockStore(pebbleDB, logger)
|
||||
pebbleTokenStore := store2.NewPebbleTokenStore(pebbleDB, logger)
|
||||
@ -88,6 +86,7 @@ func NewDataWorkerNodeWithProxyPubsub(logger *zap.Logger, config2 *config.Config
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbConfig := config2.DB
|
||||
mpCitHVerifiableEncryptor := newVerifiableEncryptor()
|
||||
kzgInclusionProver := bls48581.NewKZGInclusionProver(logger)
|
||||
pebbleHypergraphStore := store2.NewPebbleHypergraphStore(dbConfig, pebbleDB, logger, mpCitHVerifiableEncryptor, kzgInclusionProver)
|
||||
@ -133,8 +132,7 @@ func NewDataWorkerNodeWithProxyPubsub(logger *zap.Logger, config2 *config.Config
|
||||
}
|
||||
|
||||
func NewDataWorkerNodeWithoutProxyPubsub(logger *zap.Logger, config2 *config.Config, coreId uint, rpcMultiaddr string, parentProcess int, configDir p2p.ConfigDir) (*DataWorkerNode, error) {
|
||||
dbConfig := config2.DB
|
||||
pebbleDB := store2.NewPebbleDB(logger, dbConfig, coreId)
|
||||
pebbleDB := store2.NewPebbleDB(logger, config2, coreId)
|
||||
pebbleDataProofStore := store2.NewPebbleDataProofStore(pebbleDB, logger)
|
||||
pebbleClockStore := store2.NewPebbleClockStore(pebbleDB, logger)
|
||||
pebbleTokenStore := store2.NewPebbleTokenStore(pebbleDB, logger)
|
||||
@ -148,6 +146,7 @@ func NewDataWorkerNodeWithoutProxyPubsub(logger *zap.Logger, config2 *config.Con
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbConfig := config2.DB
|
||||
mpCitHVerifiableEncryptor := newVerifiableEncryptor()
|
||||
kzgInclusionProver := bls48581.NewKZGInclusionProver(logger)
|
||||
pebbleHypergraphStore := store2.NewPebbleHypergraphStore(dbConfig, pebbleDB, logger, mpCitHVerifiableEncryptor, kzgInclusionProver)
|
||||
@ -190,8 +189,7 @@ func NewDataWorkerNodeWithoutProxyPubsub(logger *zap.Logger, config2 *config.Con
|
||||
}
|
||||
|
||||
func NewMasterNode(logger *zap.Logger, config2 *config.Config, coreId uint, configDir p2p.ConfigDir) (*MasterNode, error) {
|
||||
dbConfig := config2.DB
|
||||
pebbleDB := store2.NewPebbleDB(logger, dbConfig, coreId)
|
||||
pebbleDB := store2.NewPebbleDB(logger, config2, coreId)
|
||||
pebbleDataProofStore := store2.NewPebbleDataProofStore(pebbleDB, logger)
|
||||
pebbleClockStore := store2.NewPebbleClockStore(pebbleDB, logger)
|
||||
pebbleTokenStore := store2.NewPebbleTokenStore(pebbleDB, logger)
|
||||
@ -202,6 +200,7 @@ func NewMasterNode(logger *zap.Logger, config2 *config.Config, coreId uint, conf
|
||||
engineConfig := config2.Engine
|
||||
blossomSub := p2p.NewBlossomSub(p2PConfig, engineConfig, logger, coreId, configDir)
|
||||
inMemoryPeerInfoManager := p2p.NewInMemoryPeerInfoManager(logger)
|
||||
dbConfig := config2.DB
|
||||
mpCitHVerifiableEncryptor := newVerifiableEncryptor()
|
||||
kzgInclusionProver := bls48581.NewKZGInclusionProver(logger)
|
||||
pebbleHypergraphStore := store2.NewPebbleHypergraphStore(dbConfig, pebbleDB, logger, mpCitHVerifiableEncryptor, kzgInclusionProver)
|
||||
|
||||
@ -3306,89 +3306,40 @@ func (e *GlobalConsensusEngine) ProposeWorkerJoin(
|
||||
return errors.Wrap(err, "propose worker join")
|
||||
}
|
||||
|
||||
skipMerge := false
|
||||
info, err := e.proverRegistry.GetProverInfo(e.getProverAddress())
|
||||
if err == nil || info != nil {
|
||||
skipMerge = true
|
||||
proverExists := err == nil && info != nil
|
||||
|
||||
// Build merge helpers and calculate potential merge seniority
|
||||
helpers, peerIds := e.buildMergeHelpers()
|
||||
mergeSeniorityBI := compat.GetAggregatedSeniority(peerIds)
|
||||
var mergeSeniority uint64 = 0
|
||||
if mergeSeniorityBI.IsUint64() {
|
||||
mergeSeniority = mergeSeniorityBI.Uint64()
|
||||
}
|
||||
|
||||
helpers := []*global.SeniorityMerge{}
|
||||
if !skipMerge {
|
||||
e.logger.Debug("attempting merge")
|
||||
peerIds := []string{}
|
||||
oldProver, err := keys.Ed448KeyFromBytes(
|
||||
[]byte(e.config.P2P.PeerPrivKey),
|
||||
e.pubsub.GetPublicKey(),
|
||||
)
|
||||
if err != nil {
|
||||
e.logger.Debug("cannot get peer key", zap.Error(err))
|
||||
return errors.Wrap(err, "propose worker join")
|
||||
// If prover already exists, check if we should submit a seniority merge
|
||||
if proverExists {
|
||||
if mergeSeniority > info.Seniority {
|
||||
e.logger.Info(
|
||||
"existing prover has lower seniority than merge would provide, submitting seniority merge",
|
||||
zap.Uint64("existing_seniority", info.Seniority),
|
||||
zap.Uint64("merge_seniority", mergeSeniority),
|
||||
)
|
||||
return e.submitSeniorityMerge(frame, helpers)
|
||||
}
|
||||
helpers = append(helpers, global.NewSeniorityMerge(
|
||||
crypto.KeyTypeEd448,
|
||||
oldProver,
|
||||
))
|
||||
peerIds = append(peerIds, peer.ID(e.pubsub.GetPeerID()).String())
|
||||
if len(e.config.Engine.MultisigProverEnrollmentPaths) != 0 {
|
||||
e.logger.Debug("loading old configs")
|
||||
for _, conf := range e.config.Engine.MultisigProverEnrollmentPaths {
|
||||
extraConf, err := config.LoadConfig(conf, "", false)
|
||||
if err != nil {
|
||||
e.logger.Error("could not construct join", zap.Error(err))
|
||||
return errors.Wrap(err, "propose worker join")
|
||||
}
|
||||
|
||||
peerPrivKey, err := hex.DecodeString(extraConf.P2P.PeerPrivKey)
|
||||
if err != nil {
|
||||
e.logger.Error("could not construct join", zap.Error(err))
|
||||
return errors.Wrap(err, "propose worker join")
|
||||
}
|
||||
|
||||
privKey, err := pcrypto.UnmarshalEd448PrivateKey(peerPrivKey)
|
||||
if err != nil {
|
||||
e.logger.Error("could not construct join", zap.Error(err))
|
||||
return errors.Wrap(err, "propose worker join")
|
||||
}
|
||||
|
||||
pub := privKey.GetPublic()
|
||||
pubBytes, err := pub.Raw()
|
||||
if err != nil {
|
||||
e.logger.Error("could not construct join", zap.Error(err))
|
||||
return errors.Wrap(err, "propose worker join")
|
||||
}
|
||||
|
||||
id, err := peer.IDFromPublicKey(pub)
|
||||
if err != nil {
|
||||
e.logger.Error("could not construct join", zap.Error(err))
|
||||
return errors.Wrap(err, "propose worker join")
|
||||
}
|
||||
|
||||
priv, err := privKey.Raw()
|
||||
if err != nil {
|
||||
e.logger.Error("could not construct join", zap.Error(err))
|
||||
return errors.Wrap(err, "propose worker join")
|
||||
}
|
||||
|
||||
signer, err := keys.Ed448KeyFromBytes(priv, pubBytes)
|
||||
if err != nil {
|
||||
e.logger.Error("could not construct join", zap.Error(err))
|
||||
return errors.Wrap(err, "propose worker join")
|
||||
}
|
||||
|
||||
peerIds = append(peerIds, id.String())
|
||||
helpers = append(helpers, global.NewSeniorityMerge(
|
||||
crypto.KeyTypeEd448,
|
||||
signer,
|
||||
))
|
||||
}
|
||||
}
|
||||
seniorityBI := compat.GetAggregatedSeniority(peerIds)
|
||||
e.logger.Info(
|
||||
"existing seniority detected for proposed join",
|
||||
zap.String("seniority", seniorityBI.String()),
|
||||
e.logger.Debug(
|
||||
"prover already exists with sufficient seniority, skipping join",
|
||||
zap.Uint64("existing_seniority", info.Seniority),
|
||||
zap.Uint64("merge_seniority", mergeSeniority),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
e.logger.Info(
|
||||
"existing seniority detected for proposed join",
|
||||
zap.String("seniority", mergeSeniorityBI.String()),
|
||||
)
|
||||
|
||||
var delegate []byte
|
||||
if e.config.Engine.DelegateAddress != "" {
|
||||
delegate, err = hex.DecodeString(e.config.Engine.DelegateAddress)
|
||||
@ -3522,6 +3473,149 @@ func (e *GlobalConsensusEngine) ProposeWorkerJoin(
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildMergeHelpers constructs the seniority merge helpers from the current
|
||||
// peer key and any configured multisig prover enrollment paths.
|
||||
func (e *GlobalConsensusEngine) buildMergeHelpers() ([]*global.SeniorityMerge, []string) {
|
||||
helpers := []*global.SeniorityMerge{}
|
||||
peerIds := []string{}
|
||||
|
||||
peerPrivKey, err := hex.DecodeString(e.config.P2P.PeerPrivKey)
|
||||
if err != nil {
|
||||
e.logger.Debug("cannot decode peer key for merge helpers", zap.Error(err))
|
||||
return helpers, peerIds
|
||||
}
|
||||
|
||||
oldProver, err := keys.Ed448KeyFromBytes(
|
||||
peerPrivKey,
|
||||
e.pubsub.GetPublicKey(),
|
||||
)
|
||||
if err != nil {
|
||||
e.logger.Debug("cannot get peer key for merge helpers", zap.Error(err))
|
||||
return helpers, peerIds
|
||||
}
|
||||
|
||||
helpers = append(helpers, global.NewSeniorityMerge(
|
||||
crypto.KeyTypeEd448,
|
||||
oldProver,
|
||||
))
|
||||
peerIds = append(peerIds, peer.ID(e.pubsub.GetPeerID()).String())
|
||||
|
||||
if len(e.config.Engine.MultisigProverEnrollmentPaths) != 0 {
|
||||
e.logger.Debug("loading old configs for merge helpers")
|
||||
for _, conf := range e.config.Engine.MultisigProverEnrollmentPaths {
|
||||
extraConf, err := config.LoadConfig(conf, "", false)
|
||||
if err != nil {
|
||||
e.logger.Error("could not load config for merge helpers", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
peerPrivKey, err := hex.DecodeString(extraConf.P2P.PeerPrivKey)
|
||||
if err != nil {
|
||||
e.logger.Error("could not decode peer key for merge helpers", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
privKey, err := pcrypto.UnmarshalEd448PrivateKey(peerPrivKey)
|
||||
if err != nil {
|
||||
e.logger.Error("could not unmarshal peer key for merge helpers", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
pub := privKey.GetPublic()
|
||||
pubBytes, err := pub.Raw()
|
||||
if err != nil {
|
||||
e.logger.Error("could not get public key for merge helpers", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
id, err := peer.IDFromPublicKey(pub)
|
||||
if err != nil {
|
||||
e.logger.Error("could not get peer ID for merge helpers", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
priv, err := privKey.Raw()
|
||||
if err != nil {
|
||||
e.logger.Error("could not get private key for merge helpers", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
signer, err := keys.Ed448KeyFromBytes(priv, pubBytes)
|
||||
if err != nil {
|
||||
e.logger.Error("could not create signer for merge helpers", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
peerIds = append(peerIds, id.String())
|
||||
helpers = append(helpers, global.NewSeniorityMerge(
|
||||
crypto.KeyTypeEd448,
|
||||
signer,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
return helpers, peerIds
|
||||
}
|
||||
|
||||
// submitSeniorityMerge submits a seniority merge request to claim additional
|
||||
// seniority from old peer keys for an existing prover.
|
||||
func (e *GlobalConsensusEngine) submitSeniorityMerge(
|
||||
frame *protobufs.GlobalFrame,
|
||||
helpers []*global.SeniorityMerge,
|
||||
) error {
|
||||
if len(helpers) == 0 {
|
||||
return errors.New("no merge helpers available")
|
||||
}
|
||||
|
||||
seniorityMerge, err := global.NewProverSeniorityMerge(
|
||||
frame.Header.FrameNumber,
|
||||
helpers,
|
||||
e.hypergraph,
|
||||
schema.NewRDFMultiprover(&schema.TurtleRDFParser{}, e.inclusionProver),
|
||||
e.keyManager,
|
||||
)
|
||||
if err != nil {
|
||||
e.logger.Error("could not construct seniority merge", zap.Error(err))
|
||||
return errors.Wrap(err, "submit seniority merge")
|
||||
}
|
||||
|
||||
err = seniorityMerge.Prove(frame.Header.FrameNumber)
|
||||
if err != nil {
|
||||
e.logger.Error("could not prove seniority merge", zap.Error(err))
|
||||
return errors.Wrap(err, "submit seniority merge")
|
||||
}
|
||||
|
||||
bundle := &protobufs.MessageBundle{
|
||||
Requests: []*protobufs.MessageRequest{
|
||||
{
|
||||
Request: &protobufs.MessageRequest_SeniorityMerge{
|
||||
SeniorityMerge: seniorityMerge.ToProtobuf(),
|
||||
},
|
||||
},
|
||||
},
|
||||
Timestamp: time.Now().UnixMilli(),
|
||||
}
|
||||
|
||||
msg, err := bundle.ToCanonicalBytes()
|
||||
if err != nil {
|
||||
e.logger.Error("could not encode seniority merge bundle", zap.Error(err))
|
||||
return errors.Wrap(err, "submit seniority merge")
|
||||
}
|
||||
|
||||
err = e.pubsub.PublishToBitmask(
|
||||
GLOBAL_PROVER_BITMASK,
|
||||
msg,
|
||||
)
|
||||
if err != nil {
|
||||
e.logger.Error("could not publish seniority merge", zap.Error(err))
|
||||
return errors.Wrap(err, "submit seniority merge")
|
||||
}
|
||||
|
||||
e.logger.Info("submitted seniority merge request")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *GlobalConsensusEngine) DecideWorkerJoins(
|
||||
reject [][]byte,
|
||||
confirm [][]byte,
|
||||
|
||||
@ -297,7 +297,8 @@ func (e *GlobalConsensusEngine) filterProverOnlyRequests(
|
||||
*protobufs.MessageRequest_Confirm,
|
||||
*protobufs.MessageRequest_Reject,
|
||||
*protobufs.MessageRequest_Kick,
|
||||
*protobufs.MessageRequest_Update:
|
||||
*protobufs.MessageRequest_Update,
|
||||
*protobufs.MessageRequest_SeniorityMerge:
|
||||
// Prover messages are allowed
|
||||
filtered = append(filtered, req)
|
||||
default:
|
||||
@ -367,6 +368,10 @@ func requestTypeNameAndDetail(
|
||||
return "ProverUpdate",
|
||||
zap.Any(fmt.Sprintf("request_%d_prover_update", idx), actual.Update),
|
||||
true
|
||||
case *protobufs.MessageRequest_SeniorityMerge:
|
||||
return "ProverSeniorityMerge",
|
||||
zap.Any(fmt.Sprintf("request_%d_seniority_merge", idx), actual.SeniorityMerge),
|
||||
true
|
||||
case *protobufs.MessageRequest_TokenDeploy:
|
||||
return "TokenDeploy",
|
||||
zap.Any(fmt.Sprintf("request_%d_token_deploy", idx), actual.TokenDeploy),
|
||||
|
||||
@ -109,7 +109,7 @@ func main() {
|
||||
}
|
||||
defer closer.Close()
|
||||
|
||||
db1 := store.NewPebbleDB(logger, nodeConfig1.DB, uint(0))
|
||||
db1 := store.NewPebbleDB(logger, nodeConfig1, uint(0))
|
||||
defer db1.Close()
|
||||
|
||||
// Determine iteration bounds based on prefix filter
|
||||
@ -216,7 +216,7 @@ func runCompareMode(
|
||||
log.Fatal("failed to load config", err)
|
||||
}
|
||||
|
||||
db2 := store.NewPebbleDB(logger, nodeConfig2.DB, uint(0))
|
||||
db2 := store.NewPebbleDB(logger, nodeConfig2, uint(0))
|
||||
defer db2.Close()
|
||||
|
||||
iter2, err := db2.NewIter(lowerBound, upperBound)
|
||||
|
||||
@ -585,6 +585,8 @@ func (e *GlobalExecutionEngine) tryExtractMessageForIntrinsic(
|
||||
payload, err = r.Reject.ToCanonicalBytes()
|
||||
case *protobufs.MessageRequest_Kick:
|
||||
payload, err = r.Kick.ToCanonicalBytes()
|
||||
case *protobufs.MessageRequest_SeniorityMerge:
|
||||
payload, err = r.SeniorityMerge.ToCanonicalBytes()
|
||||
default:
|
||||
err = errors.New("unsupported message type")
|
||||
}
|
||||
|
||||
@ -611,6 +611,66 @@ func (p *ProverUpdate) ToProtobuf() *protobufs.ProverUpdate {
|
||||
}
|
||||
}
|
||||
|
||||
// FromProtobuf converts a protobuf ProverSeniorityMerge to intrinsics
|
||||
func ProverSeniorityMergeFromProtobuf(
|
||||
pb *protobufs.ProverSeniorityMerge,
|
||||
hg hypergraph.Hypergraph,
|
||||
rdfMultiprover *schema.RDFMultiprover,
|
||||
keyManager keys.KeyManager,
|
||||
) (*ProverSeniorityMerge, error) {
|
||||
if pb == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
signature, err := BLS48581AddressedSignatureFromProtobuf(
|
||||
pb.PublicKeySignatureBls48581,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "prover seniority merge from protobuf")
|
||||
}
|
||||
|
||||
// Convert MergeTargets
|
||||
var mergeTargets []*SeniorityMerge
|
||||
if len(pb.MergeTargets) > 0 {
|
||||
mergeTargets = make([]*SeniorityMerge, len(pb.MergeTargets))
|
||||
for i, target := range pb.MergeTargets {
|
||||
converted, err := SeniorityMergeFromProtobuf(target)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "converting merge target %d", i)
|
||||
}
|
||||
mergeTargets[i] = converted
|
||||
}
|
||||
}
|
||||
|
||||
return &ProverSeniorityMerge{
|
||||
FrameNumber: pb.FrameNumber,
|
||||
PublicKeySignatureBLS48581: *signature,
|
||||
MergeTargets: mergeTargets,
|
||||
hypergraph: hg,
|
||||
rdfMultiprover: rdfMultiprover,
|
||||
keyManager: keyManager,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ToProtobuf converts an intrinsics ProverSeniorityMerge to protobuf
|
||||
func (p *ProverSeniorityMerge) ToProtobuf() *protobufs.ProverSeniorityMerge {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert MergeTargets
|
||||
mergeTargets := make([]*protobufs.SeniorityMerge, len(p.MergeTargets))
|
||||
for i, target := range p.MergeTargets {
|
||||
mergeTargets[i] = target.ToProtobuf()
|
||||
}
|
||||
|
||||
return &protobufs.ProverSeniorityMerge{
|
||||
FrameNumber: p.FrameNumber,
|
||||
PublicKeySignatureBls48581: p.PublicKeySignatureBLS48581.ToProtobuf(),
|
||||
MergeTargets: mergeTargets,
|
||||
}
|
||||
}
|
||||
|
||||
// FromProtobuf converts a protobuf MessageRequest to intrinsics types
|
||||
func GlobalRequestFromProtobuf(
|
||||
pb *protobufs.MessageRequest,
|
||||
@ -695,6 +755,14 @@ func GlobalRequestFromProtobuf(
|
||||
keyManager,
|
||||
)
|
||||
|
||||
case *protobufs.MessageRequest_SeniorityMerge:
|
||||
return ProverSeniorityMergeFromProtobuf(
|
||||
req.SeniorityMerge,
|
||||
hg,
|
||||
schema.NewRDFMultiprover(&schema.TurtleRDFParser{}, inclusionProver),
|
||||
keyManager,
|
||||
)
|
||||
|
||||
default:
|
||||
return nil, errors.New("unknown global request type")
|
||||
}
|
||||
|
||||
@ -678,6 +678,58 @@ func (a *GlobalIntrinsic) Validate(
|
||||
).Inc()
|
||||
return nil
|
||||
|
||||
case protobufs.ProverSeniorityMergeType:
|
||||
// Parse ProverSeniorityMerge directly from input
|
||||
pb := &protobufs.ProverSeniorityMerge{}
|
||||
if err := pb.FromCanonicalBytes(input); err != nil {
|
||||
observability.ValidateErrors.WithLabelValues(
|
||||
"global",
|
||||
"prover_seniority_merge",
|
||||
).Inc()
|
||||
return errors.Wrap(err, "validate")
|
||||
}
|
||||
|
||||
// Convert from protobuf to intrinsics type
|
||||
op, err := ProverSeniorityMergeFromProtobuf(
|
||||
pb,
|
||||
a.hypergraph,
|
||||
a.rdfMultiprover,
|
||||
a.keyManager,
|
||||
)
|
||||
if err != nil {
|
||||
observability.ValidateErrors.WithLabelValues(
|
||||
"global",
|
||||
"prover_seniority_merge",
|
||||
).Inc()
|
||||
return errors.Wrap(err, "validate")
|
||||
}
|
||||
|
||||
valid, err := op.Verify(frameNumber)
|
||||
if err != nil {
|
||||
observability.ValidateErrors.WithLabelValues(
|
||||
"global",
|
||||
"prover_seniority_merge",
|
||||
).Inc()
|
||||
return errors.Wrap(err, "validate")
|
||||
}
|
||||
|
||||
if !valid {
|
||||
observability.ValidateErrors.WithLabelValues(
|
||||
"global",
|
||||
"prover_seniority_merge",
|
||||
).Inc()
|
||||
return errors.Wrap(
|
||||
errors.New("invalid prover seniority merge"),
|
||||
"validate",
|
||||
)
|
||||
}
|
||||
|
||||
observability.ValidateTotal.WithLabelValues(
|
||||
"global",
|
||||
"prover_seniority_merge",
|
||||
).Inc()
|
||||
return nil
|
||||
|
||||
default:
|
||||
observability.ValidateErrors.WithLabelValues(
|
||||
"global",
|
||||
@ -1156,6 +1208,59 @@ func (a *GlobalIntrinsic) InvokeStep(
|
||||
).Inc()
|
||||
return a.state, nil
|
||||
|
||||
case protobufs.ProverSeniorityMergeType:
|
||||
opTimer := prometheus.NewTimer(
|
||||
observability.OperationDuration.WithLabelValues(
|
||||
"global",
|
||||
"prover_seniority_merge",
|
||||
),
|
||||
)
|
||||
defer opTimer.ObserveDuration()
|
||||
|
||||
// Parse ProverSeniorityMerge directly from input
|
||||
pb := &protobufs.ProverSeniorityMerge{}
|
||||
if err := pb.FromCanonicalBytes(input); err != nil {
|
||||
observability.InvokeStepErrors.WithLabelValues(
|
||||
"global",
|
||||
"prover_seniority_merge",
|
||||
).Inc()
|
||||
return nil, errors.Wrap(err, "invoke step")
|
||||
}
|
||||
|
||||
// Convert from protobuf to intrinsics type
|
||||
op, err := ProverSeniorityMergeFromProtobuf(
|
||||
pb,
|
||||
a.hypergraph,
|
||||
a.rdfMultiprover,
|
||||
a.keyManager,
|
||||
)
|
||||
if err != nil {
|
||||
observability.InvokeStepErrors.WithLabelValues(
|
||||
"global",
|
||||
"prover_seniority_merge",
|
||||
).Inc()
|
||||
return nil, errors.Wrap(err, "invoke step")
|
||||
}
|
||||
|
||||
matTimer := prometheus.NewTimer(
|
||||
observability.MaterializeDuration.WithLabelValues("global"),
|
||||
)
|
||||
a.state, err = op.Materialize(frameNumber, state)
|
||||
matTimer.ObserveDuration()
|
||||
if err != nil {
|
||||
observability.InvokeStepErrors.WithLabelValues(
|
||||
"global",
|
||||
"prover_seniority_merge",
|
||||
).Inc()
|
||||
return nil, errors.Wrap(err, "invoke step")
|
||||
}
|
||||
|
||||
observability.InvokeStepTotal.WithLabelValues(
|
||||
"global",
|
||||
"prover_seniority_merge",
|
||||
).Inc()
|
||||
return a.state, nil
|
||||
|
||||
default:
|
||||
observability.InvokeStepErrors.WithLabelValues(
|
||||
"global",
|
||||
@ -1274,6 +1379,17 @@ func (a *GlobalIntrinsic) Lock(
|
||||
|
||||
observability.LockTotal.WithLabelValues("global", "prover_kick").Inc()
|
||||
|
||||
case protobufs.ProverSeniorityMergeType:
|
||||
reads, writes, err = a.tryLockSeniorityMerge(frameNumber, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
observability.LockTotal.WithLabelValues(
|
||||
"global",
|
||||
"prover_seniority_merge",
|
||||
).Inc()
|
||||
|
||||
default:
|
||||
observability.LockErrors.WithLabelValues(
|
||||
"global",
|
||||
@ -1737,6 +1853,60 @@ func (a *GlobalIntrinsic) tryLockKick(frameNumber uint64, input []byte) (
|
||||
return reads, writes, nil
|
||||
}
|
||||
|
||||
func (a *GlobalIntrinsic) tryLockSeniorityMerge(
|
||||
frameNumber uint64,
|
||||
input []byte,
|
||||
) (
|
||||
[][]byte,
|
||||
[][]byte,
|
||||
error,
|
||||
) {
|
||||
// Parse ProverSeniorityMerge directly from input
|
||||
pb := &protobufs.ProverSeniorityMerge{}
|
||||
if err := pb.FromCanonicalBytes(input); err != nil {
|
||||
observability.LockErrors.WithLabelValues(
|
||||
"global",
|
||||
"prover_seniority_merge",
|
||||
).Inc()
|
||||
return nil, nil, errors.Wrap(err, "lock")
|
||||
}
|
||||
|
||||
// Convert from protobuf to intrinsics type
|
||||
op, err := ProverSeniorityMergeFromProtobuf(
|
||||
pb,
|
||||
a.hypergraph,
|
||||
a.rdfMultiprover,
|
||||
a.keyManager,
|
||||
)
|
||||
if err != nil {
|
||||
observability.LockErrors.WithLabelValues(
|
||||
"global",
|
||||
"prover_seniority_merge",
|
||||
).Inc()
|
||||
return nil, nil, errors.Wrap(err, "lock")
|
||||
}
|
||||
|
||||
reads, err := op.GetReadAddresses(frameNumber)
|
||||
if err != nil {
|
||||
observability.LockErrors.WithLabelValues(
|
||||
"global",
|
||||
"prover_seniority_merge",
|
||||
).Inc()
|
||||
return nil, nil, errors.Wrap(err, "lock")
|
||||
}
|
||||
|
||||
writes, err := op.GetWriteAddresses(frameNumber)
|
||||
if err != nil {
|
||||
observability.LockErrors.WithLabelValues(
|
||||
"global",
|
||||
"prover_seniority_merge",
|
||||
).Inc()
|
||||
return nil, nil, errors.Wrap(err, "lock")
|
||||
}
|
||||
|
||||
return reads, writes, nil
|
||||
}
|
||||
|
||||
// LoadGlobalIntrinsic loads the global intrinsic from the global intrinsic
|
||||
// address. The global intrinsic is implicitly deployed and always exists at the
|
||||
// global address.
|
||||
|
||||
@ -0,0 +1,532 @@
|
||||
package global
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
"slices"
|
||||
|
||||
"github.com/iden3/go-iden3-crypto/poseidon"
|
||||
pcrypto "github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/global/compat"
|
||||
hgstate "source.quilibrium.com/quilibrium/monorepo/node/execution/state/hypergraph"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/execution/intrinsics"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/execution/state"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/hypergraph"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/keys"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/schema"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/tries"
|
||||
)
|
||||
|
||||
// ProverSeniorityMerge allows existing provers to claim seniority from their
|
||||
// old peer keys. This is used as a repair mechanism for provers who joined
|
||||
// before the seniority merge bug was fixed.
|
||||
type ProverSeniorityMerge struct {
|
||||
// The frame number when this request is made
|
||||
FrameNumber uint64
|
||||
// The BLS48581 addressed signature
|
||||
PublicKeySignatureBLS48581 BLS48581AddressedSignature
|
||||
// Any merge targets for seniority
|
||||
MergeTargets []*SeniorityMerge
|
||||
|
||||
// Runtime dependencies (injected after deserialization)
|
||||
hypergraph hypergraph.Hypergraph
|
||||
keyManager keys.KeyManager
|
||||
rdfMultiprover *schema.RDFMultiprover
|
||||
}
|
||||
|
||||
// NewProverSeniorityMerge creates a new ProverSeniorityMerge instance
|
||||
func NewProverSeniorityMerge(
|
||||
frameNumber uint64,
|
||||
mergeTargets []*SeniorityMerge,
|
||||
hypergraph hypergraph.Hypergraph,
|
||||
rdfMultiprover *schema.RDFMultiprover,
|
||||
keyManager keys.KeyManager,
|
||||
) (*ProverSeniorityMerge, error) {
|
||||
return &ProverSeniorityMerge{
|
||||
FrameNumber: frameNumber,
|
||||
MergeTargets: mergeTargets, // buildutils:allow-slice-alias slice is static
|
||||
hypergraph: hypergraph,
|
||||
rdfMultiprover: rdfMultiprover,
|
||||
keyManager: keyManager,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetCost implements intrinsics.IntrinsicOperation.
|
||||
func (p *ProverSeniorityMerge) GetCost() (*big.Int, error) {
|
||||
return big.NewInt(0), nil
|
||||
}
|
||||
|
||||
// Materialize implements intrinsics.IntrinsicOperation.
|
||||
func (p *ProverSeniorityMerge) Materialize(
|
||||
frameNumber uint64,
|
||||
s state.State,
|
||||
) (state.State, error) {
|
||||
if p.hypergraph == nil || p.rdfMultiprover == nil {
|
||||
return nil, errors.Wrap(errors.New("missing deps"), "materialize")
|
||||
}
|
||||
if len(p.MergeTargets) == 0 {
|
||||
return nil, errors.Wrap(errors.New("no merge targets"), "materialize")
|
||||
}
|
||||
|
||||
hg := s.(*hgstate.HypergraphState)
|
||||
|
||||
// The prover address is the addressed signature's Address (poseidon(pubkey))
|
||||
proverAddress := p.PublicKeySignatureBLS48581.Address
|
||||
if len(proverAddress) != 32 {
|
||||
return nil, errors.Wrap(
|
||||
errors.New("invalid prover address length"),
|
||||
"materialize",
|
||||
)
|
||||
}
|
||||
|
||||
// Ensure the prover exists
|
||||
proverFullAddr := [64]byte{}
|
||||
copy(proverFullAddr[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(proverFullAddr[32:], proverAddress)
|
||||
|
||||
proverVertex, err := hg.Get(
|
||||
proverFullAddr[:32],
|
||||
proverFullAddr[32:],
|
||||
hgstate.VertexAddsDiscriminator,
|
||||
)
|
||||
if err != nil || proverVertex == nil {
|
||||
return nil, errors.Wrap(errors.New("prover not found"), "materialize")
|
||||
}
|
||||
|
||||
proverTree, ok := proverVertex.(*tries.VectorCommitmentTree)
|
||||
if !ok || proverTree == nil {
|
||||
return nil, errors.Wrap(errors.New("invalid prover vertex"), "materialize")
|
||||
}
|
||||
|
||||
// Get existing seniority
|
||||
existingSeniorityData, err := p.rdfMultiprover.Get(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
"prover:Prover",
|
||||
"Seniority",
|
||||
proverTree,
|
||||
)
|
||||
var existingSeniority uint64 = 0
|
||||
if err == nil && len(existingSeniorityData) == 8 {
|
||||
existingSeniority = binary.BigEndian.Uint64(existingSeniorityData)
|
||||
}
|
||||
|
||||
// Convert Ed448 public keys to peer IDs and calculate seniority
|
||||
var peerIds []string
|
||||
for _, target := range p.MergeTargets {
|
||||
if target.KeyType == crypto.KeyTypeEd448 {
|
||||
pk, err := pcrypto.UnmarshalEd448PublicKey(target.PublicKey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
|
||||
peerId, err := peer.IDFromPublicKey(pk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
|
||||
peerIds = append(peerIds, peerId.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Get aggregated seniority from merge targets
|
||||
var mergeSeniority uint64 = 0
|
||||
if len(peerIds) > 0 {
|
||||
seniorityBig := compat.GetAggregatedSeniority(peerIds)
|
||||
if seniorityBig.IsUint64() {
|
||||
mergeSeniority = seniorityBig.Uint64()
|
||||
}
|
||||
}
|
||||
|
||||
// Add merge seniority to existing seniority
|
||||
newSeniority := existingSeniority + mergeSeniority
|
||||
|
||||
// Store updated seniority
|
||||
seniorityBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(seniorityBytes, newSeniority)
|
||||
err = p.rdfMultiprover.Set(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
"prover:Prover",
|
||||
"Seniority",
|
||||
seniorityBytes,
|
||||
proverTree,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
|
||||
// Get the prior tree for change tracking
|
||||
priorVertex, err := hg.Get(
|
||||
proverFullAddr[:32],
|
||||
proverFullAddr[32:],
|
||||
hgstate.VertexAddsDiscriminator,
|
||||
)
|
||||
var priorTree *tries.VectorCommitmentTree
|
||||
if err == nil && priorVertex != nil {
|
||||
priorTree, _ = priorVertex.(*tries.VectorCommitmentTree)
|
||||
}
|
||||
|
||||
// Update prover vertex with new seniority
|
||||
proverVertexUpdate := hg.NewVertexAddMaterializedState(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS,
|
||||
[32]byte(proverAddress),
|
||||
frameNumber,
|
||||
priorTree,
|
||||
proverTree,
|
||||
)
|
||||
|
||||
err = hg.Set(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
proverAddress,
|
||||
hgstate.VertexAddsDiscriminator,
|
||||
frameNumber,
|
||||
proverVertexUpdate,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
|
||||
// Mark merge targets as spent
|
||||
for _, mt := range p.MergeTargets {
|
||||
spentMergeBI, err := poseidon.HashBytes(slices.Concat(
|
||||
[]byte("PROVER_SENIORITY_MERGE"),
|
||||
mt.PublicKey,
|
||||
))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
|
||||
spentMergeVertex := hg.NewVertexAddMaterializedState(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS,
|
||||
[32]byte(spentMergeBI.FillBytes(make([]byte, 32))),
|
||||
frameNumber,
|
||||
nil,
|
||||
&tries.VectorCommitmentTree{},
|
||||
)
|
||||
|
||||
err = hg.Set(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
spentMergeBI.FillBytes(make([]byte, 32)),
|
||||
hgstate.VertexAddsDiscriminator,
|
||||
frameNumber,
|
||||
spentMergeVertex,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Prove implements intrinsics.IntrinsicOperation.
|
||||
func (p *ProverSeniorityMerge) Prove(frameNumber uint64) error {
|
||||
if p.keyManager == nil {
|
||||
return errors.New("key manager not initialized")
|
||||
}
|
||||
|
||||
// Get the signing key
|
||||
signingKey, err := p.keyManager.GetSigningKey("q-prover-key")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "prove")
|
||||
}
|
||||
|
||||
// Sign merge target signatures
|
||||
for _, mt := range p.MergeTargets {
|
||||
if mt.signer != nil {
|
||||
mt.Signature, err = mt.signer.SignWithDomain(
|
||||
signingKey.Public().([]byte),
|
||||
[]byte("PROVER_SENIORITY_MERGE"),
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "prove")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get the public key
|
||||
pubKey := signingKey.Public()
|
||||
|
||||
// Compute address from public key
|
||||
addressBI, err := poseidon.HashBytes(pubKey.([]byte))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "prove")
|
||||
}
|
||||
address := addressBI.FillBytes(make([]byte, 32))
|
||||
|
||||
// Create domain for seniority merge signature
|
||||
mergeDomainPreimage := slices.Concat(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
[]byte("PROVER_SENIORITY_MERGE"),
|
||||
)
|
||||
mergeDomain, err := poseidon.HashBytes(mergeDomainPreimage)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "prove")
|
||||
}
|
||||
|
||||
// Create message to sign: frame number + all merge target public keys
|
||||
message := binary.BigEndian.AppendUint64(nil, p.FrameNumber)
|
||||
for _, mt := range p.MergeTargets {
|
||||
message = append(message, mt.PublicKey...)
|
||||
}
|
||||
|
||||
// Sign the message
|
||||
signature, err := signingKey.SignWithDomain(
|
||||
message,
|
||||
mergeDomain.Bytes(),
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "prove")
|
||||
}
|
||||
|
||||
// Create the addressed signature
|
||||
p.PublicKeySignatureBLS48581 = BLS48581AddressedSignature{
|
||||
Signature: signature,
|
||||
Address: address,
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ProverSeniorityMerge) GetReadAddresses(frameNumber uint64) ([][]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (p *ProverSeniorityMerge) GetWriteAddresses(frameNumber uint64) ([][]byte, error) {
|
||||
proverAddress := p.PublicKeySignatureBLS48581.Address
|
||||
proverFullAddress := [64]byte{}
|
||||
copy(proverFullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(proverFullAddress[32:], proverAddress)
|
||||
|
||||
addresses := map[string]struct{}{}
|
||||
addresses[string(proverFullAddress[:])] = struct{}{}
|
||||
|
||||
// Add spent merge addresses
|
||||
for _, mt := range p.MergeTargets {
|
||||
spentMergeBI, err := poseidon.HashBytes(slices.Concat(
|
||||
[]byte("PROVER_SENIORITY_MERGE"),
|
||||
mt.PublicKey,
|
||||
))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get write addresses")
|
||||
}
|
||||
|
||||
addresses[string(slices.Concat(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
spentMergeBI.FillBytes(make([]byte, 32)),
|
||||
))] = struct{}{}
|
||||
}
|
||||
|
||||
result := [][]byte{}
|
||||
for key := range addresses {
|
||||
result = append(result, []byte(key))
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Verify implements intrinsics.IntrinsicOperation.
|
||||
func (p *ProverSeniorityMerge) Verify(frameNumber uint64) (bool, error) {
|
||||
if p.hypergraph == nil {
|
||||
return false, errors.Wrap(
|
||||
errors.New("hypergraph not initialized"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
if p.keyManager == nil {
|
||||
return false, errors.Wrap(
|
||||
errors.New("key manager not initialized"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
if p.rdfMultiprover == nil {
|
||||
return false, errors.Wrap(
|
||||
errors.New("rdf multiprover not initialized"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
if len(p.MergeTargets) == 0 {
|
||||
return false, errors.Wrap(errors.New("no merge targets"), "verify")
|
||||
}
|
||||
if len(p.PublicKeySignatureBLS48581.Address) != 32 {
|
||||
return false, errors.Wrap(
|
||||
errors.New("invalid addressed prover address"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
|
||||
// Disallow too old of a request
|
||||
if p.FrameNumber+10 < frameNumber {
|
||||
return false, errors.Wrap(errors.New("outdated request"), "verify")
|
||||
}
|
||||
|
||||
// Resolve the prover vertex
|
||||
proverFullAddr := [64]byte{}
|
||||
copy(proverFullAddr[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(proverFullAddr[32:], p.PublicKeySignatureBLS48581.Address)
|
||||
|
||||
vertexData, err := p.hypergraph.GetVertexData(proverFullAddr)
|
||||
if err != nil || vertexData == nil {
|
||||
return false, errors.Wrap(errors.New("prover not found"), "verify")
|
||||
}
|
||||
|
||||
// Fetch the registered PublicKey
|
||||
pubKeyBytes, err := p.rdfMultiprover.Get(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
"prover:Prover",
|
||||
"PublicKey",
|
||||
vertexData,
|
||||
)
|
||||
if err != nil || len(pubKeyBytes) == 0 {
|
||||
return false, errors.Wrap(errors.New("prover public key missing"), "verify")
|
||||
}
|
||||
|
||||
// Check poseidon(pubKey) == addressed.Address
|
||||
addrBI, err := poseidon.HashBytes(pubKeyBytes)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify")
|
||||
}
|
||||
addrCheck := addrBI.FillBytes(make([]byte, 32))
|
||||
if !slices.Equal(addrCheck, p.PublicKeySignatureBLS48581.Address) {
|
||||
return false, errors.Wrap(
|
||||
errors.New("address does not match registered pubkey"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
|
||||
// Verify merge target signatures and track peer IDs for seniority lookup
|
||||
var peerIds []string
|
||||
for _, mt := range p.MergeTargets {
|
||||
valid, err := p.keyManager.ValidateSignature(
|
||||
mt.KeyType,
|
||||
mt.PublicKey,
|
||||
pubKeyBytes,
|
||||
mt.Signature,
|
||||
[]byte("PROVER_SENIORITY_MERGE"),
|
||||
)
|
||||
if err != nil || !valid {
|
||||
return false, errors.Wrap(err, "verify")
|
||||
}
|
||||
|
||||
// Confirm this merge target has not already been used
|
||||
spentMergeBI, err := poseidon.HashBytes(slices.Concat(
|
||||
[]byte("PROVER_SENIORITY_MERGE"),
|
||||
mt.PublicKey,
|
||||
))
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify")
|
||||
}
|
||||
|
||||
spentAddress := [64]byte{}
|
||||
copy(spentAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(spentAddress[32:], spentMergeBI.FillBytes(make([]byte, 32)))
|
||||
|
||||
v, err := p.hypergraph.GetVertex(spentAddress)
|
||||
if err == nil && v != nil {
|
||||
return false, errors.Wrap(
|
||||
errors.New("merge target already used"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
|
||||
// Also check against the ProverJoin spent marker
|
||||
joinSpentMergeBI, err := poseidon.HashBytes(slices.Concat(
|
||||
[]byte("PROVER_JOIN_MERGE"),
|
||||
mt.PublicKey,
|
||||
))
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify")
|
||||
}
|
||||
|
||||
joinSpentAddress := [64]byte{}
|
||||
copy(joinSpentAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(joinSpentAddress[32:], joinSpentMergeBI.FillBytes(make([]byte, 32)))
|
||||
|
||||
v, err = p.hypergraph.GetVertex(joinSpentAddress)
|
||||
if err == nil && v != nil {
|
||||
return false, errors.Wrap(
|
||||
errors.New("merge target already used in join"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
|
||||
// Track peer ID for seniority lookup
|
||||
if mt.KeyType == crypto.KeyTypeEd448 {
|
||||
pk, err := pcrypto.UnmarshalEd448PublicKey(mt.PublicKey)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify")
|
||||
}
|
||||
|
||||
peerId, err := peer.IDFromPublicKey(pk)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify")
|
||||
}
|
||||
|
||||
peerIds = append(peerIds, peerId.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Get existing seniority
|
||||
existingSeniorityData, err := p.rdfMultiprover.Get(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
"prover:Prover",
|
||||
"Seniority",
|
||||
vertexData,
|
||||
)
|
||||
var existingSeniority uint64 = 0
|
||||
if err == nil && len(existingSeniorityData) == 8 {
|
||||
existingSeniority = binary.BigEndian.Uint64(existingSeniorityData)
|
||||
}
|
||||
|
||||
// Calculate seniority from merge targets
|
||||
var mergeSeniority uint64 = 0
|
||||
if len(peerIds) > 0 {
|
||||
seniorityBig := compat.GetAggregatedSeniority(peerIds)
|
||||
if seniorityBig.IsUint64() {
|
||||
mergeSeniority = seniorityBig.Uint64()
|
||||
}
|
||||
}
|
||||
|
||||
// Merge is only allowed if the resulting seniority would be higher
|
||||
if mergeSeniority <= existingSeniority {
|
||||
return false, errors.Wrap(
|
||||
errors.New("merge would not increase seniority"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
|
||||
// Domain for seniority merge
|
||||
mergeDomainPreimage := slices.Concat(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
[]byte("PROVER_SENIORITY_MERGE"),
|
||||
)
|
||||
mergeDomain, err := poseidon.HashBytes(mergeDomainPreimage)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify")
|
||||
}
|
||||
|
||||
// Recreate the message that was signed
|
||||
message := binary.BigEndian.AppendUint64(nil, p.FrameNumber)
|
||||
for _, mt := range p.MergeTargets {
|
||||
message = append(message, mt.PublicKey...)
|
||||
}
|
||||
|
||||
// Validate signature
|
||||
ok, err := p.keyManager.ValidateSignature(
|
||||
crypto.KeyTypeBLS48581G1,
|
||||
pubKeyBytes,
|
||||
message,
|
||||
p.PublicKeySignatureBLS48581.Signature,
|
||||
mergeDomain.Bytes(),
|
||||
)
|
||||
if err != nil || !ok {
|
||||
return false, errors.Wrap(errors.New("invalid seniority merge signature"), "verify")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
var _ intrinsics.IntrinsicOperation = (*ProverSeniorityMerge)(nil)
|
||||
@ -0,0 +1,840 @@
|
||||
package global_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/iden3/go-iden3-crypto/poseidon"
|
||||
pcrypto "github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"source.quilibrium.com/quilibrium/monorepo/hypergraph"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/global"
|
||||
hgstate "source.quilibrium.com/quilibrium/monorepo/node/execution/state/hypergraph"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/execution/intrinsics"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/mocks"
|
||||
qcrypto "source.quilibrium.com/quilibrium/monorepo/types/tries"
|
||||
)
|
||||
|
||||
func TestProverSeniorityMerge_Verify(t *testing.T) {
|
||||
t.Run("verify passes signature validation but fails seniority check with test data", func(t *testing.T) {
|
||||
// This test verifies that all signature validations pass correctly.
|
||||
// The final seniority check will fail because test Ed448 keys have 0 seniority
|
||||
// in the compat.GetAggregatedSeniority() lookup. This is expected behavior -
|
||||
// in production, only merge targets with actual seniority would be used.
|
||||
|
||||
// Setup
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockHypergraph := new(mocks.MockHypergraph)
|
||||
|
||||
// Test data
|
||||
frameNumber := uint64(12345)
|
||||
pubKey := make([]byte, 585) // Simulate a BLS48581G1 public key
|
||||
for i := range pubKey {
|
||||
pubKey[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Create Ed448 key for merge target
|
||||
_, ed448PubKey, err := pcrypto.GenerateEd448Key(rand.Reader)
|
||||
require.NoError(t, err)
|
||||
rawEd448PubKey, err := ed448PubKey.Raw()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute prover address from public key
|
||||
addressBI, err := poseidon.HashBytes(pubKey)
|
||||
require.NoError(t, err)
|
||||
address := addressBI.FillBytes(make([]byte, 32))
|
||||
|
||||
// Create full address
|
||||
fullAddress := [64]byte{}
|
||||
copy(fullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(fullAddress[32:], address)
|
||||
|
||||
// Create a tree showing prover exists with a public key (no existing seniority)
|
||||
tree := &qcrypto.VectorCommitmentTree{}
|
||||
tree.Insert([]byte{0}, pubKey, nil, big.NewInt(int64(len(pubKey))))
|
||||
|
||||
// Configure mock hypergraph - prover exists
|
||||
mockHypergraph.On("GetVertexData", fullAddress).Return(tree, nil)
|
||||
mockHypergraph.On("GetVertex", mock.Anything).Return(nil, assert.AnError) // no spent merge
|
||||
|
||||
// Configure mock key manager for Ed448 merge target signature (this WILL be called)
|
||||
mockKeyManager.On("ValidateSignature",
|
||||
crypto.KeyTypeEd448,
|
||||
rawEd448PubKey,
|
||||
pubKey,
|
||||
[]byte("ed448_signature"),
|
||||
[]byte("PROVER_SENIORITY_MERGE"),
|
||||
).Return(true, nil)
|
||||
|
||||
// Note: BLS signature validation will NOT be called because seniority check
|
||||
// happens before final signature validation
|
||||
|
||||
// Create the operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
seniorityMerge, err := global.NewProverSeniorityMerge(
|
||||
frameNumber,
|
||||
[]*global.SeniorityMerge{
|
||||
{
|
||||
KeyType: crypto.KeyTypeEd448,
|
||||
PublicKey: rawEd448PubKey,
|
||||
Signature: []byte("ed448_signature"),
|
||||
},
|
||||
},
|
||||
mockHypergraph,
|
||||
rdfMultiprover,
|
||||
mockKeyManager,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up the addressed signature
|
||||
seniorityMerge.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Signature: []byte("signature"),
|
||||
Address: address,
|
||||
}
|
||||
|
||||
// Verify - should fail due to seniority check (test peer IDs have 0 seniority)
|
||||
valid, err := seniorityMerge.Verify(frameNumber)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "merge would not increase seniority")
|
||||
assert.False(t, valid)
|
||||
|
||||
// Verify that Ed448 signature validation was called (proving validation passed)
|
||||
mockKeyManager.AssertExpectations(t)
|
||||
mockHypergraph.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("verify fails if prover does not exist", func(t *testing.T) {
|
||||
// Setup
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockHypergraph := new(mocks.MockHypergraph)
|
||||
|
||||
// Test data
|
||||
frameNumber := uint64(12345)
|
||||
address := make([]byte, 32)
|
||||
for i := range address {
|
||||
address[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Create Ed448 key for merge target
|
||||
_, ed448PubKey, err := pcrypto.GenerateEd448Key(rand.Reader)
|
||||
require.NoError(t, err)
|
||||
rawEd448PubKey, err := ed448PubKey.Raw()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create full address
|
||||
fullAddress := [64]byte{}
|
||||
copy(fullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(fullAddress[32:], address)
|
||||
|
||||
// Configure mock hypergraph - prover does not exist
|
||||
mockHypergraph.On("GetVertexData", fullAddress).Return(nil, assert.AnError)
|
||||
|
||||
// Create the operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
seniorityMerge, err := global.NewProverSeniorityMerge(
|
||||
frameNumber,
|
||||
[]*global.SeniorityMerge{
|
||||
{
|
||||
KeyType: crypto.KeyTypeEd448,
|
||||
PublicKey: rawEd448PubKey,
|
||||
Signature: []byte("ed448_signature"),
|
||||
},
|
||||
},
|
||||
mockHypergraph,
|
||||
rdfMultiprover,
|
||||
mockKeyManager,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
seniorityMerge.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Signature: []byte("signature"),
|
||||
Address: address,
|
||||
}
|
||||
|
||||
// Verify should fail
|
||||
valid, err := seniorityMerge.Verify(frameNumber)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "prover not found")
|
||||
assert.False(t, valid)
|
||||
})
|
||||
|
||||
t.Run("verify fails if no merge targets", func(t *testing.T) {
|
||||
// Setup
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockHypergraph := new(mocks.MockHypergraph)
|
||||
|
||||
// Test data
|
||||
frameNumber := uint64(12345)
|
||||
address := make([]byte, 32)
|
||||
for i := range address {
|
||||
address[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Create the operation with no merge targets
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
seniorityMerge, err := global.NewProverSeniorityMerge(
|
||||
frameNumber,
|
||||
[]*global.SeniorityMerge{}, // empty
|
||||
mockHypergraph,
|
||||
rdfMultiprover,
|
||||
mockKeyManager,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
seniorityMerge.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Signature: []byte("signature"),
|
||||
Address: address,
|
||||
}
|
||||
|
||||
// Verify should fail
|
||||
valid, err := seniorityMerge.Verify(frameNumber)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no merge targets")
|
||||
assert.False(t, valid)
|
||||
})
|
||||
|
||||
t.Run("verify fails if merge target already used via PROVER_SENIORITY_MERGE", func(t *testing.T) {
|
||||
// Setup
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockHypergraph := new(mocks.MockHypergraph)
|
||||
|
||||
// Test data
|
||||
frameNumber := uint64(12345)
|
||||
pubKey := make([]byte, 585)
|
||||
for i := range pubKey {
|
||||
pubKey[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Create Ed448 key for merge target
|
||||
_, ed448PubKey, err := pcrypto.GenerateEd448Key(rand.Reader)
|
||||
require.NoError(t, err)
|
||||
rawEd448PubKey, err := ed448PubKey.Raw()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute prover address
|
||||
addressBI, err := poseidon.HashBytes(pubKey)
|
||||
require.NoError(t, err)
|
||||
address := addressBI.FillBytes(make([]byte, 32))
|
||||
|
||||
fullAddress := [64]byte{}
|
||||
copy(fullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(fullAddress[32:], address)
|
||||
|
||||
// Create a tree showing prover exists
|
||||
tree := &qcrypto.VectorCommitmentTree{}
|
||||
tree.Insert([]byte{0}, pubKey, nil, big.NewInt(int64(len(pubKey))))
|
||||
|
||||
// Compute spent merge address
|
||||
spentMergeBI, err := poseidon.HashBytes(slices.Concat(
|
||||
[]byte("PROVER_SENIORITY_MERGE"),
|
||||
rawEd448PubKey,
|
||||
))
|
||||
require.NoError(t, err)
|
||||
spentAddress := [64]byte{}
|
||||
copy(spentAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(spentAddress[32:], spentMergeBI.FillBytes(make([]byte, 32)))
|
||||
|
||||
// Configure mock - prover exists, merge target already used
|
||||
mockHypergraph.On("GetVertexData", fullAddress).Return(tree, nil)
|
||||
// Return a proper vertex to indicate the merge target was already used
|
||||
mockHypergraph.On("GetVertex", spentAddress).Return(
|
||||
hypergraph.NewVertex(
|
||||
[32]byte(spentAddress[:32]),
|
||||
[32]byte(spentAddress[32:]),
|
||||
make([]byte, 74),
|
||||
big.NewInt(0),
|
||||
),
|
||||
nil,
|
||||
)
|
||||
|
||||
// Configure mock key manager for Ed448 signature verification
|
||||
mockKeyManager.On("ValidateSignature",
|
||||
crypto.KeyTypeEd448,
|
||||
rawEd448PubKey,
|
||||
pubKey,
|
||||
[]byte("ed448_signature"),
|
||||
[]byte("PROVER_SENIORITY_MERGE"),
|
||||
).Return(true, nil)
|
||||
|
||||
// Create the operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
seniorityMerge, err := global.NewProverSeniorityMerge(
|
||||
frameNumber,
|
||||
[]*global.SeniorityMerge{
|
||||
{
|
||||
KeyType: crypto.KeyTypeEd448,
|
||||
PublicKey: rawEd448PubKey,
|
||||
Signature: []byte("ed448_signature"),
|
||||
},
|
||||
},
|
||||
mockHypergraph,
|
||||
rdfMultiprover,
|
||||
mockKeyManager,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
seniorityMerge.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Signature: []byte("signature"),
|
||||
Address: address,
|
||||
}
|
||||
|
||||
// Verify should fail
|
||||
valid, err := seniorityMerge.Verify(frameNumber)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "merge target already used")
|
||||
assert.False(t, valid)
|
||||
})
|
||||
|
||||
t.Run("verify fails if merge target already used via PROVER_JOIN_MERGE", func(t *testing.T) {
|
||||
// Setup
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockHypergraph := new(mocks.MockHypergraph)
|
||||
|
||||
// Test data
|
||||
frameNumber := uint64(12345)
|
||||
pubKey := make([]byte, 585)
|
||||
for i := range pubKey {
|
||||
pubKey[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Create Ed448 key for merge target
|
||||
_, ed448PubKey, err := pcrypto.GenerateEd448Key(rand.Reader)
|
||||
require.NoError(t, err)
|
||||
rawEd448PubKey, err := ed448PubKey.Raw()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute prover address
|
||||
addressBI, err := poseidon.HashBytes(pubKey)
|
||||
require.NoError(t, err)
|
||||
address := addressBI.FillBytes(make([]byte, 32))
|
||||
|
||||
fullAddress := [64]byte{}
|
||||
copy(fullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(fullAddress[32:], address)
|
||||
|
||||
// Create a tree showing prover exists
|
||||
tree := &qcrypto.VectorCommitmentTree{}
|
||||
tree.Insert([]byte{0}, pubKey, nil, big.NewInt(int64(len(pubKey))))
|
||||
|
||||
// Compute spent merge address for PROVER_SENIORITY_MERGE (not found)
|
||||
spentSeniorityMergeBI, err := poseidon.HashBytes(slices.Concat(
|
||||
[]byte("PROVER_SENIORITY_MERGE"),
|
||||
rawEd448PubKey,
|
||||
))
|
||||
require.NoError(t, err)
|
||||
spentSeniorityAddress := [64]byte{}
|
||||
copy(spentSeniorityAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(spentSeniorityAddress[32:], spentSeniorityMergeBI.FillBytes(make([]byte, 32)))
|
||||
|
||||
// Compute spent merge address for PROVER_JOIN_MERGE (found - already used in join)
|
||||
spentJoinMergeBI, err := poseidon.HashBytes(slices.Concat(
|
||||
[]byte("PROVER_JOIN_MERGE"),
|
||||
rawEd448PubKey,
|
||||
))
|
||||
require.NoError(t, err)
|
||||
spentJoinAddress := [64]byte{}
|
||||
copy(spentJoinAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(spentJoinAddress[32:], spentJoinMergeBI.FillBytes(make([]byte, 32)))
|
||||
|
||||
// Configure mock - prover exists, PROVER_SENIORITY_MERGE not used, PROVER_JOIN_MERGE used
|
||||
mockHypergraph.On("GetVertexData", fullAddress).Return(tree, nil)
|
||||
mockHypergraph.On("GetVertex", spentSeniorityAddress).Return(nil, assert.AnError) // not used
|
||||
// Return a proper vertex to indicate it was already used in join
|
||||
mockHypergraph.On("GetVertex", spentJoinAddress).Return(
|
||||
hypergraph.NewVertex(
|
||||
[32]byte(spentJoinAddress[:32]),
|
||||
[32]byte(spentJoinAddress[32:]),
|
||||
make([]byte, 74),
|
||||
big.NewInt(0),
|
||||
),
|
||||
nil,
|
||||
)
|
||||
|
||||
// Configure mock key manager for Ed448 signature verification
|
||||
mockKeyManager.On("ValidateSignature",
|
||||
crypto.KeyTypeEd448,
|
||||
rawEd448PubKey,
|
||||
pubKey,
|
||||
[]byte("ed448_signature"),
|
||||
[]byte("PROVER_SENIORITY_MERGE"),
|
||||
).Return(true, nil)
|
||||
|
||||
// Create the operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
seniorityMerge, err := global.NewProverSeniorityMerge(
|
||||
frameNumber,
|
||||
[]*global.SeniorityMerge{
|
||||
{
|
||||
KeyType: crypto.KeyTypeEd448,
|
||||
PublicKey: rawEd448PubKey,
|
||||
Signature: []byte("ed448_signature"),
|
||||
},
|
||||
},
|
||||
mockHypergraph,
|
||||
rdfMultiprover,
|
||||
mockKeyManager,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
seniorityMerge.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Signature: []byte("signature"),
|
||||
Address: address,
|
||||
}
|
||||
|
||||
// Verify should fail
|
||||
valid, err := seniorityMerge.Verify(frameNumber)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "merge target already used in join")
|
||||
assert.False(t, valid)
|
||||
})
|
||||
|
||||
t.Run("verify fails if merge target signature is invalid", func(t *testing.T) {
|
||||
// Setup
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockHypergraph := new(mocks.MockHypergraph)
|
||||
|
||||
// Test data
|
||||
frameNumber := uint64(12345)
|
||||
pubKey := make([]byte, 585)
|
||||
for i := range pubKey {
|
||||
pubKey[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Create Ed448 key for merge target
|
||||
_, ed448PubKey, err := pcrypto.GenerateEd448Key(rand.Reader)
|
||||
require.NoError(t, err)
|
||||
rawEd448PubKey, err := ed448PubKey.Raw()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute prover address
|
||||
addressBI, err := poseidon.HashBytes(pubKey)
|
||||
require.NoError(t, err)
|
||||
address := addressBI.FillBytes(make([]byte, 32))
|
||||
|
||||
fullAddress := [64]byte{}
|
||||
copy(fullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(fullAddress[32:], address)
|
||||
|
||||
// Create a tree showing prover exists
|
||||
tree := &qcrypto.VectorCommitmentTree{}
|
||||
tree.Insert([]byte{0}, pubKey, nil, big.NewInt(int64(len(pubKey))))
|
||||
|
||||
mockHypergraph.On("GetVertexData", fullAddress).Return(tree, nil)
|
||||
|
||||
// Configure mock key manager for invalid Ed448 signature
|
||||
mockKeyManager.On("ValidateSignature",
|
||||
crypto.KeyTypeEd448,
|
||||
rawEd448PubKey,
|
||||
pubKey,
|
||||
[]byte("bad_signature"),
|
||||
[]byte("PROVER_SENIORITY_MERGE"),
|
||||
).Return(false, nil)
|
||||
|
||||
// Create the operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
seniorityMerge, err := global.NewProverSeniorityMerge(
|
||||
frameNumber,
|
||||
[]*global.SeniorityMerge{
|
||||
{
|
||||
KeyType: crypto.KeyTypeEd448,
|
||||
PublicKey: rawEd448PubKey,
|
||||
Signature: []byte("bad_signature"),
|
||||
},
|
||||
},
|
||||
mockHypergraph,
|
||||
rdfMultiprover,
|
||||
mockKeyManager,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
seniorityMerge.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Signature: []byte("signature"),
|
||||
Address: address,
|
||||
}
|
||||
|
||||
// Verify should fail - invalid signature returns false without error
|
||||
valid, err := seniorityMerge.Verify(frameNumber)
|
||||
// Note: When ValidateSignature returns (false, nil), errors.Wrap(nil, "verify") returns nil
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, valid)
|
||||
})
|
||||
|
||||
t.Run("verify fails if merge would not increase seniority", func(t *testing.T) {
|
||||
// Setup
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockHypergraph := new(mocks.MockHypergraph)
|
||||
|
||||
// Test data
|
||||
frameNumber := uint64(12345)
|
||||
pubKey := make([]byte, 585)
|
||||
for i := range pubKey {
|
||||
pubKey[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Create Ed448 key for merge target (with no seniority override, so 0 seniority)
|
||||
_, ed448PubKey, err := pcrypto.GenerateEd448Key(rand.Reader)
|
||||
require.NoError(t, err)
|
||||
rawEd448PubKey, err := ed448PubKey.Raw()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute prover address
|
||||
addressBI, err := poseidon.HashBytes(pubKey)
|
||||
require.NoError(t, err)
|
||||
address := addressBI.FillBytes(make([]byte, 32))
|
||||
|
||||
fullAddress := [64]byte{}
|
||||
copy(fullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(fullAddress[32:], address)
|
||||
|
||||
// Create a tree showing prover exists with existing seniority > 0
|
||||
tree := &qcrypto.VectorCommitmentTree{}
|
||||
tree.Insert([]byte{0}, pubKey, nil, big.NewInt(int64(len(pubKey))))
|
||||
// Set existing seniority to a high value (order 3 in RDF schema)
|
||||
existingSeniority := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(existingSeniority, 1000000) // 1 million seniority
|
||||
tree.Insert([]byte{3 << 2}, existingSeniority, nil, big.NewInt(8))
|
||||
|
||||
mockHypergraph.On("GetVertexData", fullAddress).Return(tree, nil)
|
||||
mockHypergraph.On("GetVertex", mock.Anything).Return(nil, assert.AnError) // no spent merge
|
||||
|
||||
// Configure mock key manager for Ed448 merge target signature validation
|
||||
mockKeyManager.On("ValidateSignature",
|
||||
crypto.KeyTypeEd448,
|
||||
rawEd448PubKey,
|
||||
pubKey,
|
||||
[]byte("ed448_signature"),
|
||||
[]byte("PROVER_SENIORITY_MERGE"),
|
||||
).Return(true, nil)
|
||||
|
||||
// Create the operation with a merge target that has 0 seniority
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
seniorityMerge, err := global.NewProverSeniorityMerge(
|
||||
frameNumber,
|
||||
[]*global.SeniorityMerge{
|
||||
{
|
||||
KeyType: crypto.KeyTypeEd448,
|
||||
PublicKey: rawEd448PubKey,
|
||||
Signature: []byte("ed448_signature"),
|
||||
},
|
||||
},
|
||||
mockHypergraph,
|
||||
rdfMultiprover,
|
||||
mockKeyManager,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
seniorityMerge.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Signature: []byte("signature"),
|
||||
Address: address,
|
||||
}
|
||||
|
||||
// Verify should fail because merge seniority (0) <= existing seniority (1000000)
|
||||
valid, err := seniorityMerge.Verify(frameNumber)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "merge would not increase seniority")
|
||||
assert.False(t, valid)
|
||||
})
|
||||
|
||||
t.Run("verify fails if request is outdated", func(t *testing.T) {
|
||||
// Setup
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockHypergraph := new(mocks.MockHypergraph)
|
||||
|
||||
// Test data - request from frame 100, but current frame is 200
|
||||
requestFrameNumber := uint64(100)
|
||||
currentFrameNumber := uint64(200)
|
||||
address := make([]byte, 32)
|
||||
|
||||
// Create Ed448 key for merge target
|
||||
_, ed448PubKey, err := pcrypto.GenerateEd448Key(rand.Reader)
|
||||
require.NoError(t, err)
|
||||
rawEd448PubKey, err := ed448PubKey.Raw()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create the operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
seniorityMerge, err := global.NewProverSeniorityMerge(
|
||||
requestFrameNumber,
|
||||
[]*global.SeniorityMerge{
|
||||
{
|
||||
KeyType: crypto.KeyTypeEd448,
|
||||
PublicKey: rawEd448PubKey,
|
||||
Signature: []byte("ed448_signature"),
|
||||
},
|
||||
},
|
||||
mockHypergraph,
|
||||
rdfMultiprover,
|
||||
mockKeyManager,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
seniorityMerge.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Signature: []byte("signature"),
|
||||
Address: address,
|
||||
}
|
||||
|
||||
// Verify should fail due to outdated request
|
||||
valid, err := seniorityMerge.Verify(currentFrameNumber)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "outdated request")
|
||||
assert.False(t, valid)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProverSeniorityMerge_Materialize(t *testing.T) {
|
||||
t.Run("Materialize fails if prover does not exist", func(t *testing.T) {
|
||||
// Setup
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockHypergraph := new(mocks.MockHypergraph)
|
||||
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
|
||||
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
|
||||
|
||||
// Test data
|
||||
frameNumber := uint64(252900)
|
||||
address := make([]byte, 32)
|
||||
for i := range address {
|
||||
address[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
fullAddress := [64]byte{}
|
||||
copy(fullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(fullAddress[32:], address)
|
||||
|
||||
// Create Ed448 key for merge target
|
||||
_, ed448PubKey, err := pcrypto.GenerateEd448Key(rand.Reader)
|
||||
require.NoError(t, err)
|
||||
rawEd448PubKey, err := ed448PubKey.Raw()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Mock - return nil for the prover vertex (prover doesn't exist)
|
||||
mockHypergraph.On("GetVertexData", fullAddress).Return(nil, assert.AnError)
|
||||
|
||||
// Create the operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
seniorityMerge, err := global.NewProverSeniorityMerge(
|
||||
frameNumber,
|
||||
[]*global.SeniorityMerge{
|
||||
{
|
||||
KeyType: crypto.KeyTypeEd448,
|
||||
PublicKey: rawEd448PubKey,
|
||||
Signature: []byte("ed448_signature"),
|
||||
},
|
||||
},
|
||||
mockHypergraph,
|
||||
rdfMultiprover,
|
||||
mockKeyManager,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
seniorityMerge.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Signature: []byte("signature"),
|
||||
Address: address,
|
||||
}
|
||||
|
||||
// Call Materialize - should fail
|
||||
newState, err := seniorityMerge.Materialize(frameNumber, hypergraphState)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "prover not found")
|
||||
assert.Nil(t, newState)
|
||||
})
|
||||
|
||||
t.Run("Materialize fails if no merge targets", func(t *testing.T) {
|
||||
// Setup
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockHypergraph := new(mocks.MockHypergraph)
|
||||
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
|
||||
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
|
||||
|
||||
// Test data
|
||||
frameNumber := uint64(252900)
|
||||
address := make([]byte, 32)
|
||||
|
||||
// Create the operation with no merge targets
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
seniorityMerge, err := global.NewProverSeniorityMerge(
|
||||
frameNumber,
|
||||
[]*global.SeniorityMerge{}, // empty
|
||||
mockHypergraph,
|
||||
rdfMultiprover,
|
||||
mockKeyManager,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
seniorityMerge.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Signature: []byte("signature"),
|
||||
Address: address,
|
||||
}
|
||||
|
||||
// Call Materialize - should fail
|
||||
newState, err := seniorityMerge.Materialize(frameNumber, hypergraphState)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no merge targets")
|
||||
assert.Nil(t, newState)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestProverSeniorityMerge_GetCost(t *testing.T) {
|
||||
// Setup
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockHypergraph := new(mocks.MockHypergraph)
|
||||
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
|
||||
|
||||
// Test data
|
||||
frameNumber := uint64(12345)
|
||||
|
||||
// Create Ed448 key for merge target
|
||||
_, ed448PubKey, err := pcrypto.GenerateEd448Key(rand.Reader)
|
||||
require.NoError(t, err)
|
||||
rawEd448PubKey, err := ed448PubKey.Raw()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create the operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
seniorityMerge, err := global.NewProverSeniorityMerge(
|
||||
frameNumber,
|
||||
[]*global.SeniorityMerge{
|
||||
{
|
||||
KeyType: crypto.KeyTypeEd448,
|
||||
PublicKey: rawEd448PubKey,
|
||||
Signature: []byte("ed448_signature"),
|
||||
},
|
||||
},
|
||||
mockHypergraph,
|
||||
rdfMultiprover,
|
||||
mockKeyManager,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// GetCost should return 0
|
||||
cost, err := seniorityMerge.GetCost()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), cost.Int64())
|
||||
}
|
||||
|
||||
func TestProverSeniorityMerge_GetWriteAddresses(t *testing.T) {
|
||||
t.Run("GetWriteAddresses returns prover and spent merge addresses", func(t *testing.T) {
|
||||
// Setup
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockHypergraph := new(mocks.MockHypergraph)
|
||||
|
||||
// Test data
|
||||
frameNumber := uint64(12345)
|
||||
address := make([]byte, 32)
|
||||
for i := range address {
|
||||
address[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Create Ed448 key for merge target
|
||||
_, ed448PubKey, err := pcrypto.GenerateEd448Key(rand.Reader)
|
||||
require.NoError(t, err)
|
||||
rawEd448PubKey, err := ed448PubKey.Raw()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create the operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
seniorityMerge, err := global.NewProverSeniorityMerge(
|
||||
frameNumber,
|
||||
[]*global.SeniorityMerge{
|
||||
{
|
||||
KeyType: crypto.KeyTypeEd448,
|
||||
PublicKey: rawEd448PubKey,
|
||||
Signature: []byte("ed448_signature"),
|
||||
},
|
||||
},
|
||||
mockHypergraph,
|
||||
rdfMultiprover,
|
||||
mockKeyManager,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
seniorityMerge.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Signature: []byte("signature"),
|
||||
Address: address,
|
||||
}
|
||||
|
||||
// Get write addresses
|
||||
addresses, err := seniorityMerge.GetWriteAddresses(frameNumber)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should have at least 2 addresses: prover address + 1 spent merge address
|
||||
assert.GreaterOrEqual(t, len(addresses), 2)
|
||||
|
||||
// Verify prover address is included
|
||||
proverFullAddress := slices.Concat(intrinsics.GLOBAL_INTRINSIC_ADDRESS[:], address)
|
||||
found := false
|
||||
for _, addr := range addresses {
|
||||
if bytes.Equal(addr, proverFullAddress) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "prover address should be in write addresses")
|
||||
})
|
||||
|
||||
t.Run("GetWriteAddresses with multiple merge targets", func(t *testing.T) {
|
||||
// Setup
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockHypergraph := new(mocks.MockHypergraph)
|
||||
|
||||
// Test data
|
||||
frameNumber := uint64(12345)
|
||||
address := make([]byte, 32)
|
||||
for i := range address {
|
||||
address[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Create two Ed448 keys for merge targets
|
||||
_, ed448PubKey1, err := pcrypto.GenerateEd448Key(rand.Reader)
|
||||
require.NoError(t, err)
|
||||
rawEd448PubKey1, err := ed448PubKey1.Raw()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, ed448PubKey2, err := pcrypto.GenerateEd448Key(rand.Reader)
|
||||
require.NoError(t, err)
|
||||
rawEd448PubKey2, err := ed448PubKey2.Raw()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create the operation with two merge targets
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
seniorityMerge, err := global.NewProverSeniorityMerge(
|
||||
frameNumber,
|
||||
[]*global.SeniorityMerge{
|
||||
{
|
||||
KeyType: crypto.KeyTypeEd448,
|
||||
PublicKey: rawEd448PubKey1,
|
||||
Signature: []byte("ed448_signature_1"),
|
||||
},
|
||||
{
|
||||
KeyType: crypto.KeyTypeEd448,
|
||||
PublicKey: rawEd448PubKey2,
|
||||
Signature: []byte("ed448_signature_2"),
|
||||
},
|
||||
},
|
||||
mockHypergraph,
|
||||
rdfMultiprover,
|
||||
mockKeyManager,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
seniorityMerge.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Signature: []byte("signature"),
|
||||
Address: address,
|
||||
}
|
||||
|
||||
// Get write addresses
|
||||
addresses, err := seniorityMerge.GetWriteAddresses(frameNumber)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should have 3 addresses: prover address + 2 spent merge addresses
|
||||
assert.Equal(t, 3, len(addresses))
|
||||
})
|
||||
}
|
||||
|
||||
@ -453,3 +453,41 @@ func (p *ProverUpdate) FromBytes(data []byte) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ToBytes serializes a ProverSeniorityMerge to bytes using protobuf
|
||||
func (p *ProverSeniorityMerge) ToBytes() ([]byte, error) {
|
||||
pb := p.ToProtobuf()
|
||||
return pb.ToCanonicalBytes()
|
||||
}
|
||||
|
||||
// ToRequestBytes serializes a ProverSeniorityMerge to MessageRequest bytes
|
||||
// using protobuf
|
||||
func (p *ProverSeniorityMerge) ToRequestBytes() ([]byte, error) {
|
||||
pb := p.ToProtobuf()
|
||||
req := &protobufs.MessageRequest{
|
||||
Request: &protobufs.MessageRequest_SeniorityMerge{
|
||||
SeniorityMerge: pb,
|
||||
},
|
||||
}
|
||||
return req.ToCanonicalBytes()
|
||||
}
|
||||
|
||||
// FromBytes deserializes a ProverSeniorityMerge from bytes using protobuf
|
||||
func (p *ProverSeniorityMerge) FromBytes(data []byte) error {
|
||||
pb := &protobufs.ProverSeniorityMerge{}
|
||||
if err := pb.FromCanonicalBytes(data); err != nil {
|
||||
return errors.Wrap(err, "from bytes")
|
||||
}
|
||||
|
||||
converted, err := ProverSeniorityMergeFromProtobuf(pb, nil, nil, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "from bytes")
|
||||
}
|
||||
|
||||
// Copy only the data fields, runtime dependencies will be set separately
|
||||
p.FrameNumber = converted.FrameNumber
|
||||
p.PublicKeySignatureBLS48581 = converted.PublicKeySignatureBLS48581
|
||||
p.MergeTargets = converted.MergeTargets
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -383,7 +383,7 @@ func main() {
|
||||
}
|
||||
|
||||
if *dangerClearPending {
|
||||
db := store.NewPebbleDB(logger, nodeConfig.DB, 0)
|
||||
db := store.NewPebbleDB(logger, nodeConfig, 0)
|
||||
defer db.Close()
|
||||
consensusStore := store.NewPebbleConsensusStore(db, logger)
|
||||
state, err := consensusStore.GetConsensusState(nil)
|
||||
@ -443,7 +443,7 @@ func main() {
|
||||
}
|
||||
|
||||
if *compactDB {
|
||||
db := store.NewPebbleDB(logger, nodeConfig.DB, uint(*core))
|
||||
db := store.NewPebbleDB(logger, nodeConfig, uint(*core))
|
||||
if err := db.CompactAll(); err != nil {
|
||||
logger.Fatal("failed to compact database", zap.Error(err))
|
||||
}
|
||||
|
||||
@ -125,9 +125,9 @@ func TestHypergraphSyncServer(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
clientKvdb := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestclient/store"}, 0)
|
||||
serverKvdb := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestserver/store"}, 0)
|
||||
controlKvdb := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestcontrol/store"}, 0)
|
||||
clientKvdb := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestclient/store"}}, 0)
|
||||
serverKvdb := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestserver/store"}}, 0)
|
||||
controlKvdb := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestcontrol/store"}}, 0)
|
||||
|
||||
clientHypergraphStore := store.NewPebbleHypergraphStore(
|
||||
&config.DBConfig{Path: ".configtestclient/store"},
|
||||
@ -477,9 +477,9 @@ func TestHypergraphPartialSync(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
clientKvdb := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestclient/store"}, 0)
|
||||
serverKvdb := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestserver/store"}, 0)
|
||||
controlKvdb := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestcontrol/store"}, 0)
|
||||
clientKvdb := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestclient/store"}}, 0)
|
||||
serverKvdb := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestserver/store"}}, 0)
|
||||
controlKvdb := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestcontrol/store"}}, 0)
|
||||
|
||||
clientHypergraphStore := store.NewPebbleHypergraphStore(
|
||||
&config.DBConfig{Path: ".configtestclient/store"},
|
||||
@ -717,7 +717,7 @@ func TestHypergraphSyncWithConcurrentCommits(t *testing.T) {
|
||||
logDuration("generated data trees", start)
|
||||
|
||||
setupStart := time.Now()
|
||||
serverDB := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestserver/store"}, 0)
|
||||
serverDB := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestserver/store"}}, 0)
|
||||
defer serverDB.Close()
|
||||
|
||||
serverStore := store.NewPebbleHypergraphStore(
|
||||
@ -747,7 +747,7 @@ func TestHypergraphSyncWithConcurrentCommits(t *testing.T) {
|
||||
|
||||
clientSetupStart := time.Now()
|
||||
for i := 0; i < clientCount; i++ {
|
||||
clientDBs[i] = store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: fmt.Sprintf(".configtestclient%d/store", i)}, 0)
|
||||
clientDBs[i] = store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: fmt.Sprintf(".configtestclient%d/store", i)}}, 0)
|
||||
clientStores[i] = store.NewPebbleHypergraphStore(
|
||||
&config.DBConfig{InMemoryDONOTUSE: true, Path: fmt.Sprintf(".configtestclient%d/store", i)},
|
||||
clientDBs[i],
|
||||
@ -1166,7 +1166,7 @@ func TestHypergraphSyncWithExpectedRoot(t *testing.T) {
|
||||
dataTrees[i] = buildDataTree(t, inclusionProver)
|
||||
}
|
||||
|
||||
serverDB := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestserver/store"}, 0)
|
||||
serverDB := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestserver/store"}}, 0)
|
||||
defer serverDB.Close()
|
||||
|
||||
serverStore := store.NewPebbleHypergraphStore(
|
||||
@ -1274,7 +1274,7 @@ func TestHypergraphSyncWithExpectedRoot(t *testing.T) {
|
||||
clientCounter := 0
|
||||
createClient := func(name string) (*store.PebbleDB, *hgcrdt.HypergraphCRDT) {
|
||||
clientCounter++
|
||||
clientDB := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: fmt.Sprintf(".configtestclient%d/store", clientCounter)}, 0)
|
||||
clientDB := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: fmt.Sprintf(".configtestclient%d/store", clientCounter)}}, 0)
|
||||
clientStore := store.NewPebbleHypergraphStore(
|
||||
&config.DBConfig{InMemoryDONOTUSE: true, Path: fmt.Sprintf(".configtestclient%d/store", clientCounter)},
|
||||
clientDB,
|
||||
@ -1422,10 +1422,10 @@ func TestHypergraphSyncWithModifiedEntries(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create server and client databases
|
||||
serverDB := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestserver/store"}, 0)
|
||||
serverDB := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestserver/store"}}, 0)
|
||||
defer serverDB.Close()
|
||||
|
||||
clientDB := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestclient/store"}, 0)
|
||||
clientDB := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestclient/store"}}, 0)
|
||||
defer clientDB.Close()
|
||||
|
||||
serverStore := store.NewPebbleHypergraphStore(
|
||||
@ -1649,10 +1649,10 @@ func TestHypergraphBidirectionalSyncWithDisjointData(t *testing.T) {
|
||||
t.Log("Generated data trees")
|
||||
|
||||
// Create databases and stores for both nodes
|
||||
nodeADB := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestnodeA/store"}, 0)
|
||||
nodeADB := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestnodeA/store"}}, 0)
|
||||
defer nodeADB.Close()
|
||||
|
||||
nodeBDB := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestnodeB/store"}, 0)
|
||||
nodeBDB := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestnodeB/store"}}, 0)
|
||||
defer nodeBDB.Close()
|
||||
|
||||
nodeAStore := store.NewPebbleHypergraphStore(
|
||||
@ -1932,10 +1932,10 @@ func TestHypergraphBidirectionalSyncClientDriven(t *testing.T) {
|
||||
t.Log("Generated data trees")
|
||||
|
||||
// Create databases and stores for both nodes
|
||||
nodeADB := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestnodeA_cd/store"}, 0)
|
||||
nodeADB := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestnodeA_cd/store"}}, 0)
|
||||
defer nodeADB.Close()
|
||||
|
||||
nodeBDB := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestnodeB_cd/store"}, 0)
|
||||
nodeBDB := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtestnodeB_cd/store"}}, 0)
|
||||
defer nodeBDB.Close()
|
||||
|
||||
nodeAStore := store.NewPebbleHypergraphStore(
|
||||
@ -2261,10 +2261,10 @@ func TestHypergraphSyncWithPrefixLengthMismatch(t *testing.T) {
|
||||
runSyncTest := func(direction string) {
|
||||
t.Run(direction, func(t *testing.T) {
|
||||
// Create fresh databases for this sub-test
|
||||
nodeADB := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: fmt.Sprintf(".configtestnodeA_%s/store", direction)}, 0)
|
||||
nodeADB := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: fmt.Sprintf(".configtestnodeA_%s/store", direction)}}, 0)
|
||||
defer nodeADB.Close()
|
||||
|
||||
nodeBDB := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: fmt.Sprintf(".configtestnodeB_%s/store", direction)}, 0)
|
||||
nodeBDB := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: fmt.Sprintf(".configtestnodeB_%s/store", direction)}}, 0)
|
||||
defer nodeBDB.Close()
|
||||
|
||||
nodeAStore := store.NewPebbleHypergraphStore(
|
||||
@ -2569,7 +2569,7 @@ func TestMainnetBlossomsubFrameReceptionAndHypersync(t *testing.T) {
|
||||
globalFrameBitmask := []byte{0x00, 0x00}
|
||||
|
||||
// Create in-memory hypergraph store for the client
|
||||
clientDB := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest_mainnet_client/store"}, 0)
|
||||
clientDB := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest_mainnet_client/store"}}, 0)
|
||||
defer clientDB.Close()
|
||||
|
||||
clientStore := store.NewPebbleHypergraphStore(
|
||||
@ -3170,7 +3170,7 @@ waitLoop:
|
||||
t.Log("Verifying sync-based repair approach...")
|
||||
|
||||
// Create second in-memory hypergraph
|
||||
repairDB := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest_mainnet_repair/store"}, 0)
|
||||
repairDB := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest_mainnet_repair/store"}}, 0)
|
||||
defer repairDB.Close()
|
||||
|
||||
repairStore := store.NewPebbleHypergraphStore(
|
||||
@ -3401,7 +3401,7 @@ func TestHypergraphSyncWithPagination(t *testing.T) {
|
||||
t.Log("Generated data trees")
|
||||
|
||||
// Create server DB and store
|
||||
serverDB := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest_pagination_server/store"}, 0)
|
||||
serverDB := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest_pagination_server/store"}}, 0)
|
||||
defer serverDB.Close()
|
||||
|
||||
serverStore := store.NewPebbleHypergraphStore(
|
||||
@ -3422,7 +3422,7 @@ func TestHypergraphSyncWithPagination(t *testing.T) {
|
||||
)
|
||||
|
||||
// Create client DB and store
|
||||
clientDB := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest_pagination_client/store"}, 0)
|
||||
clientDB := store.NewPebbleDB(logger, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest_pagination_client/store"}}, 0)
|
||||
defer clientDB.Close()
|
||||
|
||||
clientStore := store.NewPebbleHypergraphStore(
|
||||
|
||||
@ -62,17 +62,21 @@ func (p *PebbleHypergraphStore) NewShardSnapshot(
|
||||
func(),
|
||||
error,
|
||||
) {
|
||||
memConfig := *p.config
|
||||
memConfig.InMemoryDONOTUSE = true
|
||||
memConfig.Path = fmt.Sprintf(
|
||||
memDBConfig := *p.config
|
||||
memDBConfig.InMemoryDONOTUSE = true
|
||||
memDBConfig.Path = fmt.Sprintf(
|
||||
"memory-shard-%x",
|
||||
shardKey.L2[:4],
|
||||
)
|
||||
// Wrap DBConfig in a minimal Config for NewPebbleDB
|
||||
memConfig := &config.Config{
|
||||
DB: &memDBConfig,
|
||||
}
|
||||
|
||||
memDB := NewPebbleDB(p.logger, &memConfig, 0)
|
||||
memDB := NewPebbleDB(p.logger, memConfig, 0)
|
||||
managedDB := newManagedKVDB(memDB)
|
||||
snapshotStore := NewPebbleHypergraphStore(
|
||||
&memConfig,
|
||||
&memDBConfig,
|
||||
managedDB,
|
||||
p.logger,
|
||||
p.verenc,
|
||||
@ -94,6 +98,330 @@ func (p *PebbleHypergraphStore) NewShardSnapshot(
|
||||
return snapshotStore, release, nil
|
||||
}
|
||||
|
||||
// pebbleDBSnapshot wraps a pebble.Snapshot to implement tries.DBSnapshot.
|
||||
type pebbleDBSnapshot struct {
|
||||
snap *pebble.Snapshot
|
||||
}
|
||||
|
||||
func (s *pebbleDBSnapshot) Close() error {
|
||||
if s.snap == nil {
|
||||
return nil
|
||||
}
|
||||
return s.snap.Close()
|
||||
}
|
||||
|
||||
// NewDBSnapshot creates a point-in-time snapshot of the database.
|
||||
// This is used to ensure consistency when creating shard snapshots.
|
||||
func (p *PebbleHypergraphStore) NewDBSnapshot() (tries.DBSnapshot, error) {
|
||||
if p.pebble == nil {
|
||||
return nil, errors.New("pebble handle not available for snapshot")
|
||||
}
|
||||
snap := p.pebble.NewSnapshot()
|
||||
return &pebbleDBSnapshot{snap: snap}, nil
|
||||
}
|
||||
|
||||
// NewShardSnapshotFromDBSnapshot creates a shard snapshot using data from
|
||||
// an existing database snapshot. This ensures the shard snapshot reflects
|
||||
// the exact state at the time the DB snapshot was taken.
|
||||
func (p *PebbleHypergraphStore) NewShardSnapshotFromDBSnapshot(
|
||||
shardKey tries.ShardKey,
|
||||
dbSnapshot tries.DBSnapshot,
|
||||
) (
|
||||
tries.TreeBackingStore,
|
||||
func(),
|
||||
error,
|
||||
) {
|
||||
pebbleSnap, ok := dbSnapshot.(*pebbleDBSnapshot)
|
||||
if !ok || pebbleSnap.snap == nil {
|
||||
return nil, nil, errors.New("invalid database snapshot")
|
||||
}
|
||||
|
||||
memDBConfig := *p.config
|
||||
memDBConfig.InMemoryDONOTUSE = true
|
||||
memDBConfig.Path = fmt.Sprintf(
|
||||
"memory-shard-%x",
|
||||
shardKey.L2[:4],
|
||||
)
|
||||
// Wrap DBConfig in a minimal Config for NewPebbleDB
|
||||
memConfig := &config.Config{
|
||||
DB: &memDBConfig,
|
||||
}
|
||||
|
||||
memDB := NewPebbleDB(p.logger, memConfig, 0)
|
||||
managedDB := newManagedKVDB(memDB)
|
||||
snapshotStore := NewPebbleHypergraphStore(
|
||||
&memDBConfig,
|
||||
managedDB,
|
||||
p.logger,
|
||||
p.verenc,
|
||||
p.prover,
|
||||
)
|
||||
snapshotStore.pebble = nil
|
||||
|
||||
// Copy data from the pebble snapshot instead of the live DB
|
||||
if err := p.copyShardDataFromSnapshot(managedDB, shardKey, pebbleSnap.snap); err != nil {
|
||||
_ = managedDB.Close()
|
||||
return nil, nil, errors.Wrap(err, "copy shard snapshot from db snapshot")
|
||||
}
|
||||
|
||||
release := func() {
|
||||
if err := managedDB.Close(); err != nil {
|
||||
p.logger.Warn("failed to close shard snapshot", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
return snapshotStore, release, nil
|
||||
}
|
||||
|
||||
// copyShardDataFromSnapshot copies shard data from a pebble snapshot to the
|
||||
// destination DB. This is similar to copyShardData but reads from a snapshot
|
||||
// instead of the live database.
|
||||
func (p *PebbleHypergraphStore) copyShardDataFromSnapshot(
|
||||
dst store.KVDB,
|
||||
shardKey tries.ShardKey,
|
||||
snap *pebble.Snapshot,
|
||||
) error {
|
||||
prefixes := []byte{
|
||||
VERTEX_ADDS_TREE_NODE,
|
||||
VERTEX_REMOVES_TREE_NODE,
|
||||
HYPEREDGE_ADDS_TREE_NODE,
|
||||
HYPEREDGE_REMOVES_TREE_NODE,
|
||||
VERTEX_ADDS_TREE_NODE_BY_PATH,
|
||||
VERTEX_REMOVES_TREE_NODE_BY_PATH,
|
||||
HYPEREDGE_ADDS_TREE_NODE_BY_PATH,
|
||||
HYPEREDGE_REMOVES_TREE_NODE_BY_PATH,
|
||||
VERTEX_ADDS_TREE_ROOT,
|
||||
VERTEX_REMOVES_TREE_ROOT,
|
||||
HYPEREDGE_ADDS_TREE_ROOT,
|
||||
HYPEREDGE_REMOVES_TREE_ROOT,
|
||||
VERTEX_ADDS_CHANGE_RECORD,
|
||||
VERTEX_REMOVES_CHANGE_RECORD,
|
||||
HYPEREDGE_ADDS_CHANGE_RECORD,
|
||||
HYPEREDGE_REMOVES_CHANGE_RECORD,
|
||||
HYPERGRAPH_VERTEX_ADDS_SHARD_COMMIT,
|
||||
HYPERGRAPH_VERTEX_REMOVES_SHARD_COMMIT,
|
||||
HYPERGRAPH_HYPEREDGE_ADDS_SHARD_COMMIT,
|
||||
HYPERGRAPH_HYPEREDGE_REMOVES_SHARD_COMMIT,
|
||||
}
|
||||
|
||||
for _, prefix := range prefixes {
|
||||
if err := p.copyPrefixedRangeFromSnapshot(dst, prefix, shardKey, snap); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := p.copyVertexDataForShardFromSnapshot(dst, shardKey, snap); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := p.copyCoveredPrefixFromSnapshot(dst, snap); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PebbleHypergraphStore) copyPrefixedRangeFromSnapshot(
|
||||
dst store.KVDB,
|
||||
prefix byte,
|
||||
shardKey tries.ShardKey,
|
||||
snap *pebble.Snapshot,
|
||||
) error {
|
||||
start, end := shardRangeBounds(prefix, shardKey)
|
||||
iter, err := snap.NewIter(&pebble.IterOptions{
|
||||
LowerBound: start,
|
||||
UpperBound: end,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "snapshot: iter range from snapshot")
|
||||
}
|
||||
defer iter.Close()
|
||||
|
||||
for valid := iter.First(); valid; valid = iter.Next() {
|
||||
key := append([]byte(nil), iter.Key()...)
|
||||
val := append([]byte(nil), iter.Value()...)
|
||||
if err := dst.Set(key, val); err != nil {
|
||||
return errors.Wrap(err, "snapshot: set range value")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PebbleHypergraphStore) copyVertexDataForShardFromSnapshot(
|
||||
dst store.KVDB,
|
||||
shardKey tries.ShardKey,
|
||||
snap *pebble.Snapshot,
|
||||
) error {
|
||||
sets := []struct {
|
||||
setType string
|
||||
phaseType string
|
||||
}{
|
||||
{string(hypergraph.VertexAtomType), string(hypergraph.AddsPhaseType)},
|
||||
{string(hypergraph.VertexAtomType), string(hypergraph.RemovesPhaseType)},
|
||||
}
|
||||
|
||||
vertexKeys := make(map[string]struct{})
|
||||
for _, cfg := range sets {
|
||||
// Use snapshot-based iteration
|
||||
iter, err := p.iterateRawLeavesFromSnapshot(cfg.setType, cfg.phaseType, shardKey, snap)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "snapshot: iterate raw leaves from snapshot")
|
||||
}
|
||||
for valid := iter.First(); valid; valid = iter.Next() {
|
||||
leaf, err := iter.Leaf()
|
||||
if err != nil || leaf == nil {
|
||||
continue
|
||||
}
|
||||
if len(leaf.UnderlyingData) == 0 {
|
||||
continue
|
||||
}
|
||||
keyStr := string(leaf.Key)
|
||||
if _, ok := vertexKeys[keyStr]; ok {
|
||||
continue
|
||||
}
|
||||
vertexKeys[keyStr] = struct{}{}
|
||||
buf := append([]byte(nil), leaf.UnderlyingData...)
|
||||
if err := dst.Set(hypergraphVertexDataKey(leaf.Key), buf); err != nil {
|
||||
iter.Close()
|
||||
return errors.Wrap(err, "snapshot: copy vertex data")
|
||||
}
|
||||
}
|
||||
iter.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PebbleHypergraphStore) copyCoveredPrefixFromSnapshot(
|
||||
dst store.KVDB,
|
||||
snap *pebble.Snapshot,
|
||||
) error {
|
||||
val, closer, err := snap.Get([]byte{HYPERGRAPH_COVERED_PREFIX})
|
||||
if err != nil {
|
||||
if errors.Is(err, pebble.ErrNotFound) {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "snapshot: get covered prefix")
|
||||
}
|
||||
defer closer.Close()
|
||||
buf := append([]byte(nil), val...)
|
||||
return dst.Set([]byte{HYPERGRAPH_COVERED_PREFIX}, buf)
|
||||
}
|
||||
|
||||
// pebbleSnapshotRawLeafIterator iterates over raw leaves from a pebble snapshot.
|
||||
type pebbleSnapshotRawLeafIterator struct {
|
||||
iter *pebble.Iterator
|
||||
shardKey tries.ShardKey
|
||||
snap *pebble.Snapshot
|
||||
setType string
|
||||
db *PebbleHypergraphStore
|
||||
}
|
||||
|
||||
func (p *PebbleHypergraphStore) iterateRawLeavesFromSnapshot(
|
||||
setType string,
|
||||
phaseType string,
|
||||
shardKey tries.ShardKey,
|
||||
snap *pebble.Snapshot,
|
||||
) (*pebbleSnapshotRawLeafIterator, error) {
|
||||
// Determine the key prefix based on set and phase type
|
||||
var keyPrefix byte
|
||||
switch hypergraph.AtomType(setType) {
|
||||
case hypergraph.VertexAtomType:
|
||||
switch hypergraph.PhaseType(phaseType) {
|
||||
case hypergraph.AddsPhaseType:
|
||||
keyPrefix = VERTEX_ADDS_TREE_NODE
|
||||
case hypergraph.RemovesPhaseType:
|
||||
keyPrefix = VERTEX_REMOVES_TREE_NODE
|
||||
default:
|
||||
return nil, errors.New("unknown phase type")
|
||||
}
|
||||
case hypergraph.HyperedgeAtomType:
|
||||
switch hypergraph.PhaseType(phaseType) {
|
||||
case hypergraph.AddsPhaseType:
|
||||
keyPrefix = HYPEREDGE_ADDS_TREE_NODE
|
||||
case hypergraph.RemovesPhaseType:
|
||||
keyPrefix = HYPEREDGE_REMOVES_TREE_NODE
|
||||
default:
|
||||
return nil, errors.New("unknown phase type")
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("unknown set type")
|
||||
}
|
||||
|
||||
start, end := shardRangeBounds(keyPrefix, shardKey)
|
||||
iter, err := snap.NewIter(&pebble.IterOptions{
|
||||
LowerBound: start,
|
||||
UpperBound: end,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "iterate raw leaves from snapshot")
|
||||
}
|
||||
|
||||
return &pebbleSnapshotRawLeafIterator{
|
||||
iter: iter,
|
||||
shardKey: shardKey,
|
||||
snap: snap,
|
||||
setType: setType,
|
||||
db: p,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (i *pebbleSnapshotRawLeafIterator) First() bool {
|
||||
return i.iter.First()
|
||||
}
|
||||
|
||||
func (i *pebbleSnapshotRawLeafIterator) Next() bool {
|
||||
return i.iter.Next()
|
||||
}
|
||||
|
||||
func (i *pebbleSnapshotRawLeafIterator) Close() {
|
||||
i.iter.Close()
|
||||
}
|
||||
|
||||
func (i *pebbleSnapshotRawLeafIterator) Leaf() (*tries.RawLeafData, error) {
|
||||
if !i.iter.Valid() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
nodeData := i.iter.Value()
|
||||
if len(nodeData) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Only process leaf nodes (type byte == TypeLeaf)
|
||||
if nodeData[0] != tries.TypeLeaf {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
leaf, err := tries.DeserializeLeafNode(i.db, bytes.NewReader(nodeData[1:]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &tries.RawLeafData{
|
||||
Key: slices.Clone(leaf.Key),
|
||||
Value: slices.Clone(leaf.Value),
|
||||
HashTarget: slices.Clone(leaf.HashTarget),
|
||||
Commitment: slices.Clone(leaf.Commitment),
|
||||
}
|
||||
|
||||
if leaf.Size != nil {
|
||||
result.Size = leaf.Size.FillBytes(make([]byte, 32))
|
||||
}
|
||||
|
||||
// Load vertex data from snapshot if this is a vertex set
|
||||
if i.setType == string(hypergraph.VertexAtomType) {
|
||||
dataVal, closer, err := i.snap.Get(hypergraphVertexDataKey(leaf.Key))
|
||||
if err == nil {
|
||||
result.UnderlyingData = append([]byte(nil), dataVal...)
|
||||
closer.Close()
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type PebbleVertexDataIterator struct {
|
||||
i store.Iterator
|
||||
db *PebbleHypergraphStore
|
||||
|
||||
@ -29,7 +29,7 @@ import (
|
||||
|
||||
type PebbleDB struct {
|
||||
db *pebble.DB
|
||||
config *config.DBConfig
|
||||
config *config.Config
|
||||
}
|
||||
|
||||
func (p *PebbleDB) DB() *pebble.DB {
|
||||
@ -38,7 +38,7 @@ func (p *PebbleDB) DB() *pebble.DB {
|
||||
|
||||
// pebbleMigrations contains ordered migration steps. New migrations append to
|
||||
// the end.
|
||||
var pebbleMigrations = []func(*pebble.Batch, *pebble.DB, *config.DBConfig) error{
|
||||
var pebbleMigrations = []func(*pebble.Batch, *pebble.DB, *config.Config) error{
|
||||
migration_2_1_0_4,
|
||||
migration_2_1_0_5,
|
||||
migration_2_1_0_8,
|
||||
@ -92,11 +92,12 @@ var pebbleMigrations = []func(*pebble.Batch, *pebble.DB, *config.DBConfig) error
|
||||
migration_2_1_0_1816,
|
||||
migration_2_1_0_1817,
|
||||
migration_2_1_0_1818,
|
||||
migration_2_1_0_1819,
|
||||
}
|
||||
|
||||
func NewPebbleDB(
|
||||
logger *zap.Logger,
|
||||
config *config.DBConfig,
|
||||
cfg *config.Config,
|
||||
coreId uint,
|
||||
) *PebbleDB {
|
||||
opts := &pebble.Options{
|
||||
@ -108,15 +109,15 @@ func NewPebbleDB(
|
||||
FormatMajorVersion: pebble.FormatNewest,
|
||||
}
|
||||
|
||||
if config.InMemoryDONOTUSE {
|
||||
if cfg.DB.InMemoryDONOTUSE {
|
||||
opts.FS = vfs.NewMem()
|
||||
}
|
||||
|
||||
path := config.Path
|
||||
if coreId > 0 && len(config.WorkerPaths) > int(coreId-1) {
|
||||
path = config.WorkerPaths[coreId-1]
|
||||
path := cfg.DB.Path
|
||||
if coreId > 0 && len(cfg.DB.WorkerPaths) > int(coreId-1) {
|
||||
path = cfg.DB.WorkerPaths[coreId-1]
|
||||
} else if coreId > 0 {
|
||||
path = fmt.Sprintf(config.WorkerPathPrefix, coreId)
|
||||
path = fmt.Sprintf(cfg.DB.WorkerPathPrefix, coreId)
|
||||
}
|
||||
|
||||
storeType := "store"
|
||||
@ -124,7 +125,7 @@ func NewPebbleDB(
|
||||
storeType = "worker store"
|
||||
}
|
||||
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) && !config.InMemoryDONOTUSE {
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) && !cfg.DB.InMemoryDONOTUSE {
|
||||
logger.Warn(
|
||||
fmt.Sprintf("%s not found, creating", storeType),
|
||||
zap.String("path", path),
|
||||
@ -149,7 +150,7 @@ func NewPebbleDB(
|
||||
}
|
||||
|
||||
db, err := pebble.Open(path, opts)
|
||||
if err != nil && shouldAttemptLegacyOpen(err, config.InMemoryDONOTUSE) {
|
||||
if err != nil && shouldAttemptLegacyOpen(err, cfg.DB.InMemoryDONOTUSE) {
|
||||
logger.Warn(
|
||||
fmt.Sprintf(
|
||||
"failed to open %s with pebble v2, trying legacy open",
|
||||
@ -193,7 +194,7 @@ func NewPebbleDB(
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
pebbleDB := &PebbleDB{db, config}
|
||||
pebbleDB := &PebbleDB{db, cfg}
|
||||
if err := pebbleDB.migrate(logger); err != nil {
|
||||
logger.Error(
|
||||
fmt.Sprintf("failed to migrate %s", storeType),
|
||||
@ -254,7 +255,7 @@ func ensurePebbleLegacyCompatibility(
|
||||
}
|
||||
|
||||
func (p *PebbleDB) migrate(logger *zap.Logger) error {
|
||||
if p.config.InMemoryDONOTUSE {
|
||||
if p.config.DB.InMemoryDONOTUSE {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -488,7 +489,7 @@ func rightAlign(data []byte, size int) []byte {
|
||||
|
||||
// Resolves all the variations of store issues from any series of upgrade steps
|
||||
// in 2.1.0.1->2.1.0.3
|
||||
func migration_2_1_0_4(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_4(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
// batches don't use this but for backcompat the parameter is required
|
||||
wo := &pebble.WriteOptions{}
|
||||
|
||||
@ -589,138 +590,138 @@ func migration_2_1_0_4(b *pebble.Batch, db *pebble.DB, config *config.DBConfig)
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_5(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_5(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
// We just re-run it again
|
||||
return migration_2_1_0_4(b, db, config)
|
||||
return migration_2_1_0_4(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_8(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_8(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
// these migration entries exist solely to advance migration number so all
|
||||
// nodes are consistent
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_81(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_81(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
// these migration entries exist solely to advance migration number so all
|
||||
// nodes are consistent
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_10(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_10(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
// these migration entries exist solely to advance migration number so all
|
||||
// nodes are consistent
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_11(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_11(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_14(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_14(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_141(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_14(b, db, config)
|
||||
func migration_2_1_0_141(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_14(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_142(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_14(b, db, config)
|
||||
func migration_2_1_0_142(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_14(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_143(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_14(b, db, config)
|
||||
func migration_2_1_0_143(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_14(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_144(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_14(b, db, config)
|
||||
func migration_2_1_0_144(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_14(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_145(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_14(b, db, config)
|
||||
func migration_2_1_0_145(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_14(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_146(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_14(b, db, config)
|
||||
func migration_2_1_0_146(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_14(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_147(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_14(b, db, config)
|
||||
func migration_2_1_0_147(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_14(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_148(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_14(b, db, config)
|
||||
func migration_2_1_0_148(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_14(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_149(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_149(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_1410(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_149(b, db, config)
|
||||
func migration_2_1_0_1410(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_149(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_1411(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_149(b, db, config)
|
||||
func migration_2_1_0_1411(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_149(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_15(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_15(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_151(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_15(b, db, config)
|
||||
func migration_2_1_0_151(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_15(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_152(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_15(b, db, config)
|
||||
func migration_2_1_0_152(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_15(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_153(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_15(b, db, config)
|
||||
func migration_2_1_0_153(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_15(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_154(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_15(b, db, config)
|
||||
func migration_2_1_0_154(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_15(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_155(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_15(b, db, config)
|
||||
func migration_2_1_0_155(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_15(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_156(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_15(b, db, config)
|
||||
func migration_2_1_0_156(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_15(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_157(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_15(b, db, config)
|
||||
func migration_2_1_0_157(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_15(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_158(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_15(b, db, config)
|
||||
func migration_2_1_0_158(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_15(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_159(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_15(b, db, config)
|
||||
func migration_2_1_0_159(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_15(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_17(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_17(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_171(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_171(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_172(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_172(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_173(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_173(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_18(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_18(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
// Global shard key: L1={0,0,0}, L2=0xff*32
|
||||
globalShardKey := tries.ShardKey{
|
||||
L1: [3]byte{},
|
||||
@ -801,69 +802,69 @@ func migration_2_1_0_18(b *pebble.Batch, db *pebble.DB, config *config.DBConfig)
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_181(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_18(b, db, config)
|
||||
func migration_2_1_0_181(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_18(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_182(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_182(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_183(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_183(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_184(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_184(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_185(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_185(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_186(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_186(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_187(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_187(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_188(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_188(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_189(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_189(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_1810(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_189(b, db, config)
|
||||
func migration_2_1_0_1810(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_189(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_1811(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_189(b, db, config)
|
||||
func migration_2_1_0_1811(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_189(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_1812(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_189(b, db, config)
|
||||
func migration_2_1_0_1812(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_189(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_1813(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_189(b, db, config)
|
||||
func migration_2_1_0_1813(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_189(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_1814(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_189(b, db, config)
|
||||
func migration_2_1_0_1814(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_189(b, db, cfg)
|
||||
}
|
||||
|
||||
func migration_2_1_0_1815(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_189(b, db, config)
|
||||
func migration_2_1_0_1815(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_189(b, db, cfg)
|
||||
}
|
||||
|
||||
// migration_2_1_0_1816 recalculates commitments for the global prover trees
|
||||
// to fix potential corruption from earlier versions of sync.
|
||||
func migration_2_1_0_1816(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
func migration_2_1_0_1816(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
// Global prover shard key: L1={0,0,0}, L2=0xff*32
|
||||
globalShardKey := tries.ShardKey{
|
||||
L1: [3]byte{},
|
||||
@ -892,8 +893,8 @@ func migration_2_1_0_1816(b *pebble.Batch, db *pebble.DB, config *config.DBConfi
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_1817(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return migration_2_1_0_1816(b, db, config)
|
||||
func migration_2_1_0_1817(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return migration_2_1_0_1816(b, db, cfg)
|
||||
}
|
||||
|
||||
// migration_2_1_0_1818 repairs corrupted global prover shard tree data by:
|
||||
@ -903,14 +904,14 @@ func migration_2_1_0_1817(b *pebble.Batch, db *pebble.DB, config *config.DBConfi
|
||||
// 4. Wiping all tree data for the global prover shard from the actual DB
|
||||
// 5. Setting up a local gRPC sync server backed by the in-memory hypergraph
|
||||
// 6. Syncing from the in-memory instance back to the actual DB hypergraph
|
||||
func migration_2_1_0_1818(b *pebble.Batch, db *pebble.DB, config *config.DBConfig) error {
|
||||
return doMigration1818(db, config)
|
||||
func migration_2_1_0_1818(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return doMigration1818(db, cfg)
|
||||
}
|
||||
|
||||
// doMigration1818 performs the actual migration work for migration_2_1_0_1818.
|
||||
// It uses the sync protocol to repair corrupted tree data by syncing to an
|
||||
// in-memory instance and back.
|
||||
func doMigration1818(db *pebble.DB, config *config.DBConfig) error {
|
||||
func doMigration1818(db *pebble.DB, cfg *config.Config) error {
|
||||
logger := zap.L()
|
||||
|
||||
// Global prover shard key: L1={0,0,0}, L2=0xff*32
|
||||
@ -928,7 +929,7 @@ func doMigration1818(db *pebble.DB, config *config.DBConfig) error {
|
||||
|
||||
// Create hypergraph from actual DB
|
||||
actualDBWrapper := &PebbleDB{db: db}
|
||||
actualStore := NewPebbleHypergraphStore(config, actualDBWrapper, logger, nil, prover)
|
||||
actualStore := NewPebbleHypergraphStore(cfg.DB, actualDBWrapper, logger, nil, prover)
|
||||
|
||||
actualHG, err := actualStore.LoadHypergraph(nil, 0)
|
||||
if err != nil {
|
||||
@ -949,7 +950,7 @@ func doMigration1818(db *pebble.DB, config *config.DBConfig) error {
|
||||
defer memDB.Close()
|
||||
|
||||
memDBWrapper := &PebbleDB{db: memDB}
|
||||
memStore := NewPebbleHypergraphStore(config, memDBWrapper, logger, nil, prover)
|
||||
memStore := NewPebbleHypergraphStore(cfg.DB, memDBWrapper, logger, nil, prover)
|
||||
memHG, err := memStore.LoadHypergraph(nil, 0)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "load in-memory hypergraph")
|
||||
@ -1060,7 +1061,7 @@ func doMigration1818(db *pebble.DB, config *config.DBConfig) error {
|
||||
logger.Info("migration 1818: wiped tree data from actual DB")
|
||||
|
||||
// Reload actual hypergraph after wipe
|
||||
actualStore2 := NewPebbleHypergraphStore(config, actualDBWrapper, logger, nil, prover)
|
||||
actualStore2 := NewPebbleHypergraphStore(cfg.DB, actualDBWrapper, logger, nil, prover)
|
||||
actualHG2, err := actualStore2.LoadHypergraph(nil, 0)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "reload actual hypergraph after wipe")
|
||||
@ -1124,6 +1125,17 @@ func doMigration1818(db *pebble.DB, config *config.DBConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// migration_2_1_0_1819 re-runs migration_2_1_0_18 for non-archive mode nodes.
|
||||
// This ensures that nodes which do not have ArchiveMode enabled will have the
|
||||
// global shard hypergraph data cleaned up.
|
||||
func migration_2_1_0_1819(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
// Only run for non-archive mode nodes
|
||||
if cfg.Engine != nil && cfg.Engine.ArchiveMode {
|
||||
return nil
|
||||
}
|
||||
return migration_2_1_0_18(b, db, cfg)
|
||||
}
|
||||
|
||||
// pebbleBatchDB wraps a *pebble.Batch to implement store.KVDB for use in migrations
|
||||
type pebbleBatchDB struct {
|
||||
b *pebble.Batch
|
||||
|
||||
@ -62,9 +62,10 @@ const (
|
||||
TraversalProofType uint32 = 0x0316
|
||||
GlobalProposalType uint32 = 0x0317
|
||||
AppShardProposalType uint32 = 0x0318
|
||||
AltShardUpdateType uint32 = 0x0319
|
||||
TimeoutStateType uint32 = 0x031C
|
||||
TimeoutCertificateType uint32 = 0x031D
|
||||
AltShardUpdateType uint32 = 0x0319
|
||||
ProverSeniorityMergeType uint32 = 0x031A
|
||||
TimeoutStateType uint32 = 0x031C
|
||||
TimeoutCertificateType uint32 = 0x031D
|
||||
|
||||
// Hypergraph types (0x0400 - 0x04FF)
|
||||
HypergraphConfigurationType uint32 = 0x0401
|
||||
|
||||
@ -6139,3 +6139,150 @@ func (g *GlobalAlert) Validate() error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProverSeniorityMerge serialization methods
|
||||
|
||||
func (p *ProverSeniorityMerge) ToCanonicalBytes() ([]byte, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
// Write type prefix
|
||||
if err := binary.Write(buf, binary.BigEndian, ProverSeniorityMergeType); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
|
||||
// Write frame_number
|
||||
if err := binary.Write(buf, binary.BigEndian, p.FrameNumber); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
|
||||
// Write public_key_signature_bls48581
|
||||
if p.PublicKeySignatureBls48581 != nil {
|
||||
sigBytes, err := p.PublicKeySignatureBls48581.ToCanonicalBytes()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
if err := binary.Write(
|
||||
buf,
|
||||
binary.BigEndian,
|
||||
uint32(len(sigBytes)),
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
if _, err := buf.Write(sigBytes); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
} else {
|
||||
if err := binary.Write(buf, binary.BigEndian, uint32(0)); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
}
|
||||
|
||||
// Write merge_targets count
|
||||
if err := binary.Write(
|
||||
buf,
|
||||
binary.BigEndian,
|
||||
uint32(len(p.MergeTargets)),
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
|
||||
// Write each merge target
|
||||
for _, mt := range p.MergeTargets {
|
||||
mtBytes, err := mt.ToCanonicalBytes()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
if err := binary.Write(
|
||||
buf,
|
||||
binary.BigEndian,
|
||||
uint32(len(mtBytes)),
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
if _, err := buf.Write(mtBytes); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (p *ProverSeniorityMerge) FromCanonicalBytes(data []byte) error {
|
||||
buf := bytes.NewBuffer(data)
|
||||
|
||||
// Read and verify type prefix
|
||||
var typePrefix uint32
|
||||
if err := binary.Read(buf, binary.BigEndian, &typePrefix); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
if typePrefix != ProverSeniorityMergeType {
|
||||
return errors.Wrap(
|
||||
errors.New("invalid type prefix"),
|
||||
"from canonical bytes",
|
||||
)
|
||||
}
|
||||
|
||||
// Read frame_number
|
||||
if err := binary.Read(buf, binary.BigEndian, &p.FrameNumber); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
|
||||
// Read public_key_signature_bls48581
|
||||
var sigLen uint32
|
||||
if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
if sigLen > 118 {
|
||||
return errors.Wrap(
|
||||
errors.New("invalid signature length"),
|
||||
"from canonical bytes",
|
||||
)
|
||||
}
|
||||
if sigLen > 0 {
|
||||
sigBytes := make([]byte, sigLen)
|
||||
if _, err := buf.Read(sigBytes); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
p.PublicKeySignatureBls48581 = &BLS48581AddressedSignature{}
|
||||
if err := p.PublicKeySignatureBls48581.FromCanonicalBytes(sigBytes); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
}
|
||||
|
||||
// Read merge_targets count
|
||||
var mtCount uint32
|
||||
if err := binary.Read(buf, binary.BigEndian, &mtCount); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
if mtCount > 100 {
|
||||
return errors.Wrap(
|
||||
errors.New("too many merge targets"),
|
||||
"from canonical bytes",
|
||||
)
|
||||
}
|
||||
|
||||
// Read each merge target
|
||||
p.MergeTargets = make([]*SeniorityMerge, mtCount)
|
||||
for i := uint32(0); i < mtCount; i++ {
|
||||
var mtLen uint32
|
||||
if err := binary.Read(buf, binary.BigEndian, &mtLen); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
if mtLen > 1000 {
|
||||
return errors.Wrap(
|
||||
errors.New("invalid merge target length"),
|
||||
"from canonical bytes",
|
||||
)
|
||||
}
|
||||
mtBytes := make([]byte, mtLen)
|
||||
if _, err := buf.Read(mtBytes); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
p.MergeTargets[i] = &SeniorityMerge{}
|
||||
if err := p.MergeTargets[i].FromCanonicalBytes(mtBytes); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -78,6 +78,18 @@ message ProverReject {
|
||||
repeated bytes filters = 4;
|
||||
}
|
||||
|
||||
// ProverSeniorityMerge allows existing provers to claim seniority from their
|
||||
// old peer keys. This is used as a repair mechanism for provers who joined
|
||||
// before the seniority merge bug was fixed.
|
||||
message ProverSeniorityMerge {
|
||||
// The frame number when this request is made
|
||||
uint64 frame_number = 1;
|
||||
// The BLS48-581 signature proving ownership of the prover key
|
||||
quilibrium.node.keys.pb.BLS48581AddressedSignature public_key_signature_bls48581 = 2;
|
||||
// The merge targets containing old peer keys to claim seniority from
|
||||
repeated SeniorityMerge merge_targets = 3;
|
||||
}
|
||||
|
||||
// AltShardUpdate allows external entities to maintain their own state trees
|
||||
// with provable ownership through signature verification. The shard address
|
||||
// is derived from the poseidon hash of the BLS48-581 public key.
|
||||
@ -127,6 +139,7 @@ message MessageRequest {
|
||||
quilibrium.node.compute.pb.CodeFinalize code_finalize = 24;
|
||||
quilibrium.node.global.pb.FrameHeader shard = 25;
|
||||
quilibrium.node.global.pb.AltShardUpdate alt_shard_update = 26;
|
||||
quilibrium.node.global.pb.ProverSeniorityMerge seniority_merge = 27;
|
||||
}
|
||||
int64 timestamp = 99;
|
||||
}
|
||||
|
||||
@ -24,6 +24,12 @@ type ShardKey struct {
|
||||
L2 [32]byte
|
||||
}
|
||||
|
||||
// DBSnapshot represents a point-in-time snapshot of the database.
|
||||
// This is used to ensure consistency when creating shard snapshots.
|
||||
type DBSnapshot interface {
|
||||
io.Closer
|
||||
}
|
||||
|
||||
type ChangeRecord struct {
|
||||
Key []byte
|
||||
OldValue *VectorCommitmentTree
|
||||
@ -551,6 +557,18 @@ type TreeBackingStore interface {
|
||||
) ([]byte, error)
|
||||
GetRootCommits(frameNumber uint64) (map[ShardKey][][]byte, error)
|
||||
NewShardSnapshot(shardKey ShardKey) (TreeBackingStore, func(), error)
|
||||
// NewDBSnapshot creates a point-in-time snapshot of the entire database.
|
||||
// This is used to ensure consistency when creating shard snapshots - the
|
||||
// returned DBSnapshot should be passed to NewShardSnapshotFromDBSnapshot.
|
||||
// The caller must call Close() on the returned DBSnapshot when done.
|
||||
NewDBSnapshot() (DBSnapshot, error)
|
||||
// NewShardSnapshotFromDBSnapshot creates a shard snapshot from an existing
|
||||
// database snapshot. This ensures the shard snapshot reflects the exact state
|
||||
// at the time the DB snapshot was taken, avoiding race conditions.
|
||||
NewShardSnapshotFromDBSnapshot(
|
||||
shardKey ShardKey,
|
||||
dbSnapshot DBSnapshot,
|
||||
) (TreeBackingStore, func(), error)
|
||||
// IterateRawLeaves returns an iterator over all leaf nodes for a given
|
||||
// shard and phase set. This bypasses in-memory tree caching and reads
|
||||
// directly from the database for raw sync operations.
|
||||
@ -2043,14 +2061,16 @@ func (t *LazyVectorCommitmentTree) Delete(
|
||||
|
||||
if childBranch, ok := lastChild.(*LazyVectorCommitmentBranchNode); ok {
|
||||
// Merge this node's prefix with the child's prefix
|
||||
// Note: We do NOT update FullPrefix because children are stored
|
||||
// relative to the branch's FullPrefix, and they'd become unreachable
|
||||
mergedPrefix := []int{}
|
||||
mergedPrefix = append(mergedPrefix, n.Prefix...)
|
||||
mergedPrefix = append(mergedPrefix, lastChildIndex)
|
||||
mergedPrefix = append(mergedPrefix, childBranch.Prefix...)
|
||||
|
||||
childBranch.Prefix = mergedPrefix
|
||||
// Note: We do NOT update FullPrefix because children are stored
|
||||
// relative to the branch's FullPrefix. If we updated FullPrefix,
|
||||
// child lookups would compute wrong paths and fail.
|
||||
// The FullPrefix remains at the old value for child path compatibility.
|
||||
childBranch.Commitment = nil
|
||||
|
||||
// Delete the child from its original path to prevent orphan
|
||||
|
||||
Loading…
Reference in New Issue
Block a user