mirror of
https://github.com/QuilibriumNetwork/ceremonyclient.git
synced 2026-02-21 10:27:26 +08:00
v2.1.0.19
This commit is contained in:
parent
92c1f07562
commit
7eeb91a9a2
@ -43,7 +43,7 @@ func FormatVersion(version []byte) string {
|
||||
}
|
||||
|
||||
func GetPatchNumber() byte {
|
||||
return 0x12
|
||||
return 0x13
|
||||
}
|
||||
|
||||
func GetRCNumber() byte {
|
||||
|
||||
@ -44,7 +44,7 @@ var _ BasicResolver = (*Resolver)(nil)
|
||||
|
||||
// NewResolver creates a new Resolver instance with the specified options
|
||||
func NewResolver(opts ...Option) (*Resolver, error) {
|
||||
r := &Resolver{def: net.DefaultResolver}
|
||||
r := &Resolver{def: &net.Resolver{PreferGo: true}}
|
||||
for _, opt := range opts {
|
||||
err := opt(r)
|
||||
if err != nil {
|
||||
|
||||
@ -298,6 +298,11 @@ func (hg *HypergraphCRDT) CommitShard(
|
||||
L2: [32]byte(shardAddress[:32]),
|
||||
}
|
||||
|
||||
txn, err := hg.store.NewTransaction(false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "commit shard")
|
||||
}
|
||||
|
||||
vertexAddSet, vertexRemoveSet := hg.getOrCreateIdSet(
|
||||
shardKey,
|
||||
hg.vertexAdds,
|
||||
@ -306,9 +311,9 @@ func (hg *HypergraphCRDT) CommitShard(
|
||||
hg.getCoveredPrefix(),
|
||||
)
|
||||
vertexAddTree := vertexAddSet.GetTree()
|
||||
vertexAddTree.Commit(nil, false)
|
||||
vertexAddTree.Commit(txn, false)
|
||||
vertexRemoveTree := vertexRemoveSet.GetTree()
|
||||
vertexRemoveTree.Commit(nil, false)
|
||||
vertexRemoveTree.Commit(txn, false)
|
||||
|
||||
path := tries.GetFullPath(shardAddress[:32])
|
||||
for _, p := range shardAddress[32:] {
|
||||
@ -333,25 +338,20 @@ func (hg *HypergraphCRDT) CommitShard(
|
||||
hg.getCoveredPrefix(),
|
||||
)
|
||||
hyperedgeAddTree := hyperedgeAddSet.GetTree()
|
||||
hyperedgeAddTree.Commit(nil, false)
|
||||
hyperedgeAddTree.Commit(txn, false)
|
||||
hyperedgeRemoveTree := hyperedgeRemoveSet.GetTree()
|
||||
hyperedgeRemoveTree.Commit(nil, false)
|
||||
hyperedgeRemoveTree.Commit(txn, false)
|
||||
|
||||
hyperedgeAddNode, err := vertexAddTree.GetByPath(path)
|
||||
hyperedgeAddNode, err := hyperedgeAddTree.GetByPath(path)
|
||||
if err != nil && !strings.Contains(err.Error(), "not found") {
|
||||
return nil, errors.Wrap(err, "commit shard")
|
||||
}
|
||||
|
||||
hyperedgeRemoveNode, err := vertexRemoveTree.GetByPath(path)
|
||||
hyperedgeRemoveNode, err := hyperedgeRemoveTree.GetByPath(path)
|
||||
if err != nil && !strings.Contains(err.Error(), "not found") {
|
||||
return nil, errors.Wrap(err, "commit shard")
|
||||
}
|
||||
|
||||
txn, err := hg.store.NewTransaction(false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "commit shard")
|
||||
}
|
||||
|
||||
vertexAddCommit := make([]byte, 64)
|
||||
if vertexAddNode != nil {
|
||||
switch n := vertexAddNode.(type) {
|
||||
|
||||
@ -177,6 +177,17 @@ func (hg *HypergraphCRDT) Sync(
|
||||
|
||||
path := hg.getCoveredPrefix()
|
||||
|
||||
// Commit tree state through a transaction before sending initial query
|
||||
initTxn, err := hg.store.NewTransaction(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
initCommitment := set.GetTree().Commit(initTxn, false)
|
||||
if err := initTxn.Commit(); err != nil {
|
||||
initTxn.Abort()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Send initial query for path
|
||||
sendStart := time.Now()
|
||||
if err := stream.Send(&protobufs.HypergraphComparison{
|
||||
@ -185,7 +196,7 @@ func (hg *HypergraphCRDT) Sync(
|
||||
ShardKey: slices.Concat(shardKey.L1[:], shardKey.L2[:]),
|
||||
PhaseSet: phaseSet,
|
||||
Path: toInt32Slice(path),
|
||||
Commitment: set.GetTree().Commit(nil, false),
|
||||
Commitment: initCommitment,
|
||||
IncludeLeafData: false,
|
||||
ExpectedRoot: expectedRoot,
|
||||
},
|
||||
@ -336,7 +347,15 @@ func (hg *HypergraphCRDT) Sync(
|
||||
|
||||
wg.Wait()
|
||||
|
||||
root = set.GetTree().Commit(nil, false)
|
||||
finalTxn, err := hg.store.NewTransaction(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root = set.GetTree().Commit(finalTxn, false)
|
||||
if err := finalTxn.Commit(); err != nil {
|
||||
finalTxn.Abort()
|
||||
return nil, err
|
||||
}
|
||||
hg.logger.Info(
|
||||
"hypergraph root commit",
|
||||
zap.String("root", hex.EncodeToString(root)),
|
||||
|
||||
@ -155,6 +155,7 @@ type AppConsensusEngine struct {
|
||||
globalProverRootVerifiedFrame atomic.Uint64
|
||||
globalProverRootSynced atomic.Bool
|
||||
globalProverSyncInProgress atomic.Bool
|
||||
lastGlobalFrameHeader *protobufs.GlobalFrameHeader // previous frame for deferred root check
|
||||
|
||||
// Genesis initialization
|
||||
genesisInitialized atomic.Bool
|
||||
@ -985,8 +986,22 @@ func (e *AppConsensusEngine) handleGlobalProverRoot(
|
||||
return
|
||||
}
|
||||
|
||||
frameNumber := frame.Header.FrameNumber
|
||||
expectedProverRoot := frame.Header.ProverTreeCommitment
|
||||
// Defer root check by one frame: when frame N arrives, check frame N-1's
|
||||
// root. This matches the GlobalConsensusEngine which checks the parent
|
||||
// frame's root during materialize(N-1), triggered when frame N certifies
|
||||
// frame N-1. By the time frame N arrives, the master has had time to
|
||||
// materialize N-2 (triggered when N-1 arrived), so the worker's tree
|
||||
// should reflect post-materialize(N-2) state — exactly what frame N-1's
|
||||
// ProverTreeCommitment was computed against.
|
||||
prevHeader := e.lastGlobalFrameHeader
|
||||
e.lastGlobalFrameHeader = frame.Header
|
||||
|
||||
if prevHeader == nil {
|
||||
return
|
||||
}
|
||||
|
||||
frameNumber := prevHeader.FrameNumber
|
||||
expectedProverRoot := prevHeader.ProverTreeCommitment
|
||||
|
||||
localRoot, err := e.computeLocalGlobalProverRoot(frameNumber)
|
||||
if err != nil {
|
||||
@ -997,8 +1012,7 @@ func (e *AppConsensusEngine) handleGlobalProverRoot(
|
||||
)
|
||||
e.globalProverRootSynced.Store(false)
|
||||
e.globalProverRootVerifiedFrame.Store(0)
|
||||
// Use blocking hypersync to ensure we're synced before continuing
|
||||
e.performBlockingGlobalHypersync(frame.Header.Prover, expectedProverRoot)
|
||||
e.performBlockingGlobalHypersync(prevHeader.Prover, expectedProverRoot)
|
||||
return
|
||||
}
|
||||
|
||||
@ -1015,8 +1029,7 @@ func (e *AppConsensusEngine) handleGlobalProverRoot(
|
||||
)
|
||||
e.globalProverRootSynced.Store(false)
|
||||
e.globalProverRootVerifiedFrame.Store(0)
|
||||
// Use blocking hypersync to ensure we're synced before continuing
|
||||
e.performBlockingGlobalHypersync(frame.Header.Prover, expectedProverRoot)
|
||||
e.performBlockingGlobalHypersync(prevHeader.Prover, expectedProverRoot)
|
||||
return
|
||||
}
|
||||
|
||||
@ -1162,8 +1175,11 @@ func (e *AppConsensusEngine) performBlockingGlobalHypersync(proposer []byte, exp
|
||||
)
|
||||
}
|
||||
|
||||
e.globalProverRootSynced.Store(true)
|
||||
e.logger.Info("blocking global hypersync completed")
|
||||
// Don't unconditionally set synced=true. Commit(N-1) is cached with the
|
||||
// pre-sync root, so we can't re-verify here. The next frame's deferred
|
||||
// check will call Commit(N) fresh and verify convergence — matching the
|
||||
// global engine's pattern where convergence happens on the next materialize.
|
||||
e.logger.Info("blocking global hypersync completed, convergence will be verified on next frame")
|
||||
}
|
||||
|
||||
func (e *AppConsensusEngine) GetFrame() *protobufs.AppShardFrame {
|
||||
|
||||
@ -295,11 +295,17 @@ func (p *GlobalLeaderProvider) ProveNextState(
|
||||
|
||||
requestRoot := requestTree.Commit(p.engine.inclusionProver, false)
|
||||
|
||||
// Copy shared state under lock to avoid data race with materialize()
|
||||
p.engine.shardCommitmentMu.Lock()
|
||||
shardCommitments := p.engine.shardCommitments
|
||||
proverRoot := p.engine.proverRoot
|
||||
p.engine.shardCommitmentMu.Unlock()
|
||||
|
||||
// Prove the global frame header
|
||||
newHeader, err := p.engine.frameProver.ProveGlobalFrameHeader(
|
||||
(*prior).Header,
|
||||
p.engine.shardCommitments,
|
||||
p.engine.proverRoot,
|
||||
shardCommitments,
|
||||
proverRoot,
|
||||
requestRoot,
|
||||
signer,
|
||||
timestamp,
|
||||
|
||||
@ -46,7 +46,12 @@ func (e *GlobalConsensusEngine) ensureCoverageThresholds() {
|
||||
|
||||
// triggerCoverageCheckAsync starts a coverage check in a goroutine if one is
|
||||
// not already in progress. This prevents blocking the event processing loop.
|
||||
func (e *GlobalConsensusEngine) triggerCoverageCheckAsync(frameNumber uint64) {
|
||||
// frameProver is the address of the prover who produced the triggering frame;
|
||||
// only that prover will emit split/merge messages.
|
||||
func (e *GlobalConsensusEngine) triggerCoverageCheckAsync(
|
||||
frameNumber uint64,
|
||||
frameProver []byte,
|
||||
) {
|
||||
// Skip if a coverage check is already in progress
|
||||
if !e.coverageCheckInProgress.CompareAndSwap(false, true) {
|
||||
e.logger.Debug(
|
||||
@ -59,14 +64,18 @@ func (e *GlobalConsensusEngine) triggerCoverageCheckAsync(frameNumber uint64) {
|
||||
go func() {
|
||||
defer e.coverageCheckInProgress.Store(false)
|
||||
|
||||
if err := e.checkShardCoverage(frameNumber); err != nil {
|
||||
if err := e.checkShardCoverage(frameNumber, frameProver); err != nil {
|
||||
e.logger.Error("failed to check shard coverage", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// checkShardCoverage verifies coverage levels for all active shards
|
||||
func (e *GlobalConsensusEngine) checkShardCoverage(frameNumber uint64) error {
|
||||
// checkShardCoverage verifies coverage levels for all active shards.
|
||||
// frameProver is the address of the prover who produced the triggering frame.
|
||||
func (e *GlobalConsensusEngine) checkShardCoverage(
|
||||
frameNumber uint64,
|
||||
frameProver []byte,
|
||||
) error {
|
||||
e.ensureCoverageThresholds()
|
||||
|
||||
// Get shard coverage information from prover registry
|
||||
@ -218,13 +227,13 @@ func (e *GlobalConsensusEngine) checkShardCoverage(frameNumber uint64) error {
|
||||
|
||||
// Check for high coverage (potential split)
|
||||
if proverCount > maxProvers {
|
||||
e.handleHighCoverage([]byte(shardAddress), coverage, maxProvers)
|
||||
e.handleHighCoverage([]byte(shardAddress), coverage, maxProvers, frameProver)
|
||||
}
|
||||
}
|
||||
|
||||
// Emit a single bulk merge event if there are any merge-eligible shards
|
||||
if len(allMergeGroups) > 0 {
|
||||
e.emitBulkMergeEvent(allMergeGroups)
|
||||
e.emitBulkMergeEvent(allMergeGroups, frameProver)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -344,6 +353,7 @@ func (e *GlobalConsensusEngine) handleHighCoverage(
|
||||
shardAddress []byte,
|
||||
coverage *ShardCoverage,
|
||||
maxProvers uint64,
|
||||
frameProver []byte,
|
||||
) {
|
||||
addressLen := len(shardAddress)
|
||||
|
||||
@ -369,6 +379,7 @@ func (e *GlobalConsensusEngine) handleHighCoverage(
|
||||
ProverCount: coverage.ProverCount,
|
||||
AttestedStorage: coverage.AttestedStorage,
|
||||
ProposedShards: proposedShards,
|
||||
FrameProver: frameProver,
|
||||
})
|
||||
} else {
|
||||
// Case 3.a.ii: No space to split, do nothing
|
||||
|
||||
@ -82,7 +82,10 @@ func (e *GlobalConsensusEngine) eventDistributorLoop(
|
||||
e.flushDeferredGlobalMessages(data.Frame.GetRank() + 1)
|
||||
|
||||
// Check shard coverage asynchronously to avoid blocking event processing
|
||||
e.triggerCoverageCheckAsync(data.Frame.Header.FrameNumber)
|
||||
e.triggerCoverageCheckAsync(
|
||||
data.Frame.Header.FrameNumber,
|
||||
data.Frame.Header.Prover,
|
||||
)
|
||||
|
||||
// Update global coordination metrics
|
||||
globalCoordinationTotal.Inc()
|
||||
@ -266,6 +269,16 @@ func (e *GlobalConsensusEngine) eventDistributorLoop(
|
||||
}()
|
||||
}
|
||||
|
||||
case typesconsensus.ControlEventShardSplitEligible:
|
||||
if data, ok := event.Data.(*typesconsensus.ShardSplitEventData); ok {
|
||||
e.handleShardSplitEvent(data)
|
||||
}
|
||||
|
||||
case typesconsensus.ControlEventShardMergeEligible:
|
||||
if data, ok := event.Data.(*typesconsensus.BulkShardMergeEventData); ok {
|
||||
e.handleShardMergeEvent(data)
|
||||
}
|
||||
|
||||
default:
|
||||
e.logger.Debug(
|
||||
"received unhandled event type",
|
||||
@ -306,6 +319,7 @@ func (e *GlobalConsensusEngine) emitCoverageEvent(
|
||||
|
||||
func (e *GlobalConsensusEngine) emitBulkMergeEvent(
|
||||
mergeGroups []typesconsensus.ShardMergeEventData,
|
||||
frameProver []byte,
|
||||
) {
|
||||
if len(mergeGroups) == 0 {
|
||||
return
|
||||
@ -314,6 +328,7 @@ func (e *GlobalConsensusEngine) emitBulkMergeEvent(
|
||||
// Combine all merge groups into a single bulk event
|
||||
data := &typesconsensus.BulkShardMergeEventData{
|
||||
MergeGroups: mergeGroups,
|
||||
FrameProver: frameProver,
|
||||
}
|
||||
|
||||
event := typesconsensus.ControlEvent{
|
||||
@ -370,6 +385,156 @@ func (e *GlobalConsensusEngine) emitAlertEvent(alertMessage string) {
|
||||
e.logger.Info("emitted alert message")
|
||||
}
|
||||
|
||||
const shardActionCooldownFrames = 360
|
||||
|
||||
func (e *GlobalConsensusEngine) handleShardSplitEvent(
|
||||
data *typesconsensus.ShardSplitEventData,
|
||||
) {
|
||||
// Only the prover who produced the triggering frame should emit
|
||||
if !bytes.Equal(data.FrameProver, e.getProverAddress()) {
|
||||
return
|
||||
}
|
||||
|
||||
frameNumber := e.lastObservedFrame.Load()
|
||||
if frameNumber == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
addrKey := string(data.ShardAddress)
|
||||
e.lastShardActionFrameMu.Lock()
|
||||
if last, ok := e.lastShardActionFrame[addrKey]; ok &&
|
||||
frameNumber-last < shardActionCooldownFrames {
|
||||
e.lastShardActionFrameMu.Unlock()
|
||||
e.logger.Debug(
|
||||
"skipping shard split, cooldown active",
|
||||
zap.String("shard_address", hex.EncodeToString(data.ShardAddress)),
|
||||
zap.Uint64("last_action_frame", last),
|
||||
zap.Uint64("current_frame", frameNumber),
|
||||
)
|
||||
return
|
||||
}
|
||||
e.lastShardActionFrame[addrKey] = frameNumber
|
||||
e.lastShardActionFrameMu.Unlock()
|
||||
|
||||
op := globalintrinsics.NewShardSplitOp(
|
||||
data.ShardAddress,
|
||||
data.ProposedShards,
|
||||
e.keyManager,
|
||||
e.shardsStore,
|
||||
e.proverRegistry,
|
||||
)
|
||||
|
||||
if err := op.Prove(frameNumber); err != nil {
|
||||
e.logger.Error(
|
||||
"failed to prove shard split",
|
||||
zap.Error(err),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
splitBytes, err := op.ToRequestBytes()
|
||||
if err != nil {
|
||||
e.logger.Error(
|
||||
"failed to serialize shard split",
|
||||
zap.Error(err),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
if err := e.pubsub.PublishToBitmask(
|
||||
GLOBAL_PROVER_BITMASK,
|
||||
splitBytes,
|
||||
); err != nil {
|
||||
e.logger.Error("failed to publish shard split", zap.Error(err))
|
||||
} else {
|
||||
e.logger.Info(
|
||||
"published shard split",
|
||||
zap.String("shard_address", hex.EncodeToString(data.ShardAddress)),
|
||||
zap.Int("proposed_shards", len(data.ProposedShards)),
|
||||
zap.Uint64("frame_number", frameNumber),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *GlobalConsensusEngine) handleShardMergeEvent(
|
||||
data *typesconsensus.BulkShardMergeEventData,
|
||||
) {
|
||||
// Only the prover who produced the triggering frame should emit
|
||||
if !bytes.Equal(data.FrameProver, e.getProverAddress()) {
|
||||
return
|
||||
}
|
||||
|
||||
frameNumber := e.lastObservedFrame.Load()
|
||||
if frameNumber == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, group := range data.MergeGroups {
|
||||
if len(group.ShardAddresses) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Use first shard's first 32 bytes as parent address
|
||||
parentAddress := group.ShardAddresses[0][:32]
|
||||
|
||||
// Check cooldown for the parent address
|
||||
parentKey := string(parentAddress)
|
||||
e.lastShardActionFrameMu.Lock()
|
||||
if last, ok := e.lastShardActionFrame[parentKey]; ok &&
|
||||
frameNumber-last < shardActionCooldownFrames {
|
||||
e.lastShardActionFrameMu.Unlock()
|
||||
e.logger.Debug(
|
||||
"skipping shard merge, cooldown active",
|
||||
zap.String("parent_address", hex.EncodeToString(parentAddress)),
|
||||
zap.Uint64("last_action_frame", last),
|
||||
zap.Uint64("current_frame", frameNumber),
|
||||
)
|
||||
continue
|
||||
}
|
||||
e.lastShardActionFrame[parentKey] = frameNumber
|
||||
e.lastShardActionFrameMu.Unlock()
|
||||
|
||||
op := globalintrinsics.NewShardMergeOp(
|
||||
group.ShardAddresses,
|
||||
parentAddress,
|
||||
e.keyManager,
|
||||
e.shardsStore,
|
||||
e.proverRegistry,
|
||||
)
|
||||
|
||||
if err := op.Prove(frameNumber); err != nil {
|
||||
e.logger.Error(
|
||||
"failed to prove shard merge",
|
||||
zap.Error(err),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
mergeBytes, err := op.ToRequestBytes()
|
||||
if err != nil {
|
||||
e.logger.Error(
|
||||
"failed to serialize shard merge",
|
||||
zap.Error(err),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := e.pubsub.PublishToBitmask(
|
||||
GLOBAL_PROVER_BITMASK,
|
||||
mergeBytes,
|
||||
); err != nil {
|
||||
e.logger.Error("failed to publish shard merge", zap.Error(err))
|
||||
} else {
|
||||
e.logger.Info(
|
||||
"published shard merge",
|
||||
zap.String("parent_address", hex.EncodeToString(parentAddress)),
|
||||
zap.Int("shard_count", len(group.ShardAddresses)),
|
||||
zap.Uint64("frame_number", frameNumber),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *GlobalConsensusEngine) estimateSeniorityFromConfig() uint64 {
|
||||
peerIds := []string{}
|
||||
peerIds = append(peerIds, peer.ID(e.pubsub.GetPeerID()).String())
|
||||
|
||||
@ -200,8 +200,10 @@ type GlobalConsensusEngine struct {
|
||||
activeProveRanksMu sync.Mutex
|
||||
appFrameStore map[string]*protobufs.AppShardFrame
|
||||
appFrameStoreMu sync.RWMutex
|
||||
lowCoverageStreak map[string]*coverageStreak
|
||||
proverOnlyMode atomic.Bool
|
||||
lowCoverageStreak map[string]*coverageStreak
|
||||
proverOnlyMode atomic.Bool
|
||||
lastShardActionFrame map[string]uint64
|
||||
lastShardActionFrameMu sync.Mutex
|
||||
coverageCheckInProgress atomic.Bool
|
||||
peerInfoDigestCache map[string]struct{}
|
||||
peerInfoDigestCacheMu sync.Mutex
|
||||
@ -337,6 +339,7 @@ func NewGlobalConsensusEngine(
|
||||
currentDifficulty: config.Engine.Difficulty,
|
||||
lastProvenFrameTime: time.Now(),
|
||||
blacklistMap: make(map[string]bool),
|
||||
lastShardActionFrame: make(map[string]uint64),
|
||||
peerInfoDigestCache: make(map[string]struct{}),
|
||||
keyRegistryDigestCache: make(map[string]struct{}),
|
||||
peerAuthCache: make(map[string]time.Time),
|
||||
|
||||
@ -1722,7 +1722,10 @@ func (e *GlobalConsensusEngine) addCertifiedState(
|
||||
}
|
||||
|
||||
// Trigger coverage check asynchronously to avoid blocking message processing
|
||||
e.triggerCoverageCheckAsync(parent.State.GetFrameNumber())
|
||||
e.triggerCoverageCheckAsync(
|
||||
parent.State.GetFrameNumber(),
|
||||
parent.State.Header.Prover,
|
||||
)
|
||||
}
|
||||
|
||||
func (e *GlobalConsensusEngine) handleProposal(message *pb.Message) {
|
||||
|
||||
@ -172,7 +172,6 @@ func (e *GlobalConsensusEngine) subscribeToProverMessages() error {
|
||||
GLOBAL_PROVER_BITMASK,
|
||||
func(message *pb.Message) error {
|
||||
if e.config.P2P.Network != 99 && !e.config.Engine.ArchiveMode {
|
||||
e.logger.Debug("dropping prover message, not in archive mode")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -208,7 +208,9 @@ func (e *GlobalExecutionEngine) validateBundle(
|
||||
op.GetKick() != nil ||
|
||||
op.GetUpdate() != nil ||
|
||||
op.GetShard() != nil ||
|
||||
op.GetSeniorityMerge() != nil
|
||||
op.GetSeniorityMerge() != nil ||
|
||||
op.GetShardSplit() != nil ||
|
||||
op.GetShardMerge() != nil
|
||||
|
||||
if !isGlobalOp {
|
||||
if e.config.Network == 0 &&
|
||||
@ -526,6 +528,7 @@ func (e *GlobalExecutionEngine) tryGetIntrinsic(address []byte) (
|
||||
e.rewardIssuance,
|
||||
e.proverRegistry,
|
||||
e.blsConstructor,
|
||||
e.shardsStore,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "try get intrinsic")
|
||||
|
||||
@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"source.quilibrium.com/quilibrium/monorepo/protobufs"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/consensus"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/hypergraph"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/keys"
|
||||
@ -671,6 +672,96 @@ func (p *ProverSeniorityMerge) ToProtobuf() *protobufs.ProverSeniorityMerge {
|
||||
}
|
||||
}
|
||||
|
||||
// ShardSplitFromProtobuf converts a protobuf ShardSplit to intrinsics
|
||||
func ShardSplitFromProtobuf(
|
||||
pb *protobufs.ShardSplit,
|
||||
hg hypergraph.Hypergraph,
|
||||
keyManager keys.KeyManager,
|
||||
shardsStore store.ShardsStore,
|
||||
proverRegistry consensus.ProverRegistry,
|
||||
) (*ShardSplitOp, error) {
|
||||
if pb == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
pubKeySig, err := BLS48581AddressedSignatureFromProtobuf(
|
||||
pb.PublicKeySignatureBls48581,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "shard split from protobuf")
|
||||
}
|
||||
|
||||
return &ShardSplitOp{
|
||||
ShardAddress: pb.ShardAddress,
|
||||
ProposedShards: pb.ProposedShards,
|
||||
FrameNumber: pb.FrameNumber,
|
||||
PublicKeySignatureBLS48581: *pubKeySig,
|
||||
hypergraph: hg,
|
||||
keyManager: keyManager,
|
||||
shardsStore: shardsStore,
|
||||
proverRegistry: proverRegistry,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ToProtobuf converts an intrinsics ShardSplitOp to protobuf
|
||||
func (op *ShardSplitOp) ToProtobuf() *protobufs.ShardSplit {
|
||||
if op == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &protobufs.ShardSplit{
|
||||
ShardAddress: op.ShardAddress,
|
||||
ProposedShards: op.ProposedShards,
|
||||
FrameNumber: op.FrameNumber,
|
||||
PublicKeySignatureBls48581: op.PublicKeySignatureBLS48581.ToProtobuf(),
|
||||
}
|
||||
}
|
||||
|
||||
// ShardMergeFromProtobuf converts a protobuf ShardMerge to intrinsics
|
||||
func ShardMergeFromProtobuf(
|
||||
pb *protobufs.ShardMerge,
|
||||
hg hypergraph.Hypergraph,
|
||||
keyManager keys.KeyManager,
|
||||
shardsStore store.ShardsStore,
|
||||
proverRegistry consensus.ProverRegistry,
|
||||
) (*ShardMergeOp, error) {
|
||||
if pb == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
pubKeySig, err := BLS48581AddressedSignatureFromProtobuf(
|
||||
pb.PublicKeySignatureBls48581,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "shard merge from protobuf")
|
||||
}
|
||||
|
||||
return &ShardMergeOp{
|
||||
ShardAddresses: pb.ShardAddresses,
|
||||
ParentAddress: pb.ParentAddress,
|
||||
FrameNumber: pb.FrameNumber,
|
||||
PublicKeySignatureBLS48581: *pubKeySig,
|
||||
hypergraph: hg,
|
||||
keyManager: keyManager,
|
||||
shardsStore: shardsStore,
|
||||
proverRegistry: proverRegistry,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ToProtobuf converts an intrinsics ShardMergeOp to protobuf
|
||||
func (op *ShardMergeOp) ToProtobuf() *protobufs.ShardMerge {
|
||||
if op == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &protobufs.ShardMerge{
|
||||
ShardAddresses: op.ShardAddresses,
|
||||
ParentAddress: op.ParentAddress,
|
||||
FrameNumber: op.FrameNumber,
|
||||
PublicKeySignatureBls48581: op.PublicKeySignatureBLS48581.ToProtobuf(),
|
||||
}
|
||||
}
|
||||
|
||||
// FromProtobuf converts a protobuf MessageRequest to intrinsics types
|
||||
func GlobalRequestFromProtobuf(
|
||||
pb *protobufs.MessageRequest,
|
||||
|
||||
@ -38,6 +38,7 @@ type GlobalIntrinsic struct {
|
||||
rewardIssuance consensus.RewardIssuance
|
||||
proverRegistry consensus.ProverRegistry
|
||||
blsConstructor crypto.BlsConstructor
|
||||
shardsStore store.ShardsStore
|
||||
}
|
||||
|
||||
var GLOBAL_RDF_SCHEMA = `BASE <https://types.quilibrium.com/schema-repository/>
|
||||
@ -730,6 +731,108 @@ func (a *GlobalIntrinsic) Validate(
|
||||
).Inc()
|
||||
return nil
|
||||
|
||||
case protobufs.ShardSplitType:
|
||||
pb := &protobufs.ShardSplit{}
|
||||
if err := pb.FromCanonicalBytes(input); err != nil {
|
||||
observability.ValidateErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_split",
|
||||
).Inc()
|
||||
return errors.Wrap(err, "validate")
|
||||
}
|
||||
|
||||
op, err := ShardSplitFromProtobuf(
|
||||
pb,
|
||||
a.hypergraph,
|
||||
a.keyManager,
|
||||
a.shardsStore,
|
||||
a.proverRegistry,
|
||||
)
|
||||
if err != nil {
|
||||
observability.ValidateErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_split",
|
||||
).Inc()
|
||||
return errors.Wrap(err, "validate")
|
||||
}
|
||||
|
||||
valid, err := op.Verify(frameNumber)
|
||||
if err != nil {
|
||||
observability.ValidateErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_split",
|
||||
).Inc()
|
||||
return errors.Wrap(err, "validate")
|
||||
}
|
||||
|
||||
if !valid {
|
||||
observability.ValidateErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_split",
|
||||
).Inc()
|
||||
return errors.Wrap(
|
||||
errors.New("invalid shard split"),
|
||||
"validate",
|
||||
)
|
||||
}
|
||||
|
||||
observability.ValidateTotal.WithLabelValues(
|
||||
"global",
|
||||
"shard_split",
|
||||
).Inc()
|
||||
return nil
|
||||
|
||||
case protobufs.ShardMergeType:
|
||||
pb := &protobufs.ShardMerge{}
|
||||
if err := pb.FromCanonicalBytes(input); err != nil {
|
||||
observability.ValidateErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_merge",
|
||||
).Inc()
|
||||
return errors.Wrap(err, "validate")
|
||||
}
|
||||
|
||||
op, err := ShardMergeFromProtobuf(
|
||||
pb,
|
||||
a.hypergraph,
|
||||
a.keyManager,
|
||||
a.shardsStore,
|
||||
a.proverRegistry,
|
||||
)
|
||||
if err != nil {
|
||||
observability.ValidateErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_merge",
|
||||
).Inc()
|
||||
return errors.Wrap(err, "validate")
|
||||
}
|
||||
|
||||
valid, err := op.Verify(frameNumber)
|
||||
if err != nil {
|
||||
observability.ValidateErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_merge",
|
||||
).Inc()
|
||||
return errors.Wrap(err, "validate")
|
||||
}
|
||||
|
||||
if !valid {
|
||||
observability.ValidateErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_merge",
|
||||
).Inc()
|
||||
return errors.Wrap(
|
||||
errors.New("invalid shard merge"),
|
||||
"validate",
|
||||
)
|
||||
}
|
||||
|
||||
observability.ValidateTotal.WithLabelValues(
|
||||
"global",
|
||||
"shard_merge",
|
||||
).Inc()
|
||||
return nil
|
||||
|
||||
default:
|
||||
observability.ValidateErrors.WithLabelValues(
|
||||
"global",
|
||||
@ -1268,6 +1371,110 @@ func (a *GlobalIntrinsic) InvokeStep(
|
||||
).Inc()
|
||||
return resultState, nil
|
||||
|
||||
case protobufs.ShardSplitType:
|
||||
opTimer := prometheus.NewTimer(
|
||||
observability.OperationDuration.WithLabelValues(
|
||||
"global",
|
||||
"shard_split",
|
||||
),
|
||||
)
|
||||
defer opTimer.ObserveDuration()
|
||||
|
||||
pb := &protobufs.ShardSplit{}
|
||||
if err := pb.FromCanonicalBytes(input); err != nil {
|
||||
observability.InvokeStepErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_split",
|
||||
).Inc()
|
||||
return nil, errors.Wrap(err, "invoke step")
|
||||
}
|
||||
|
||||
op, err := ShardSplitFromProtobuf(
|
||||
pb,
|
||||
a.hypergraph,
|
||||
a.keyManager,
|
||||
a.shardsStore,
|
||||
a.proverRegistry,
|
||||
)
|
||||
if err != nil {
|
||||
observability.InvokeStepErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_split",
|
||||
).Inc()
|
||||
return nil, errors.Wrap(err, "invoke step")
|
||||
}
|
||||
|
||||
matTimer := prometheus.NewTimer(
|
||||
observability.MaterializeDuration.WithLabelValues("global"),
|
||||
)
|
||||
resultState, matErr := op.Materialize(frameNumber, state)
|
||||
matTimer.ObserveDuration()
|
||||
if matErr != nil {
|
||||
observability.InvokeStepErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_split",
|
||||
).Inc()
|
||||
return nil, errors.Wrap(matErr, "invoke step")
|
||||
}
|
||||
|
||||
observability.InvokeStepTotal.WithLabelValues(
|
||||
"global",
|
||||
"shard_split",
|
||||
).Inc()
|
||||
return resultState, nil
|
||||
|
||||
case protobufs.ShardMergeType:
|
||||
opTimer := prometheus.NewTimer(
|
||||
observability.OperationDuration.WithLabelValues(
|
||||
"global",
|
||||
"shard_merge",
|
||||
),
|
||||
)
|
||||
defer opTimer.ObserveDuration()
|
||||
|
||||
pb := &protobufs.ShardMerge{}
|
||||
if err := pb.FromCanonicalBytes(input); err != nil {
|
||||
observability.InvokeStepErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_merge",
|
||||
).Inc()
|
||||
return nil, errors.Wrap(err, "invoke step")
|
||||
}
|
||||
|
||||
op, err := ShardMergeFromProtobuf(
|
||||
pb,
|
||||
a.hypergraph,
|
||||
a.keyManager,
|
||||
a.shardsStore,
|
||||
a.proverRegistry,
|
||||
)
|
||||
if err != nil {
|
||||
observability.InvokeStepErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_merge",
|
||||
).Inc()
|
||||
return nil, errors.Wrap(err, "invoke step")
|
||||
}
|
||||
|
||||
matTimer := prometheus.NewTimer(
|
||||
observability.MaterializeDuration.WithLabelValues("global"),
|
||||
)
|
||||
resultState, matErr := op.Materialize(frameNumber, state)
|
||||
matTimer.ObserveDuration()
|
||||
if matErr != nil {
|
||||
observability.InvokeStepErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_merge",
|
||||
).Inc()
|
||||
return nil, errors.Wrap(matErr, "invoke step")
|
||||
}
|
||||
|
||||
observability.InvokeStepTotal.WithLabelValues(
|
||||
"global",
|
||||
"shard_merge",
|
||||
).Inc()
|
||||
return resultState, nil
|
||||
|
||||
default:
|
||||
observability.InvokeStepErrors.WithLabelValues(
|
||||
"global",
|
||||
@ -1397,6 +1604,28 @@ func (a *GlobalIntrinsic) Lock(
|
||||
"prover_seniority_merge",
|
||||
).Inc()
|
||||
|
||||
case protobufs.ShardSplitType:
|
||||
reads, writes, err = a.tryLockShardSplit(frameNumber, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
observability.LockTotal.WithLabelValues(
|
||||
"global",
|
||||
"shard_split",
|
||||
).Inc()
|
||||
|
||||
case protobufs.ShardMergeType:
|
||||
reads, writes, err = a.tryLockShardMerge(frameNumber, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
observability.LockTotal.WithLabelValues(
|
||||
"global",
|
||||
"shard_merge",
|
||||
).Inc()
|
||||
|
||||
default:
|
||||
observability.LockErrors.WithLabelValues(
|
||||
"global",
|
||||
@ -1914,6 +2143,112 @@ func (a *GlobalIntrinsic) tryLockSeniorityMerge(
|
||||
return reads, writes, nil
|
||||
}
|
||||
|
||||
func (a *GlobalIntrinsic) tryLockShardSplit(
|
||||
frameNumber uint64,
|
||||
input []byte,
|
||||
) (
|
||||
[][]byte,
|
||||
[][]byte,
|
||||
error,
|
||||
) {
|
||||
pb := &protobufs.ShardSplit{}
|
||||
if err := pb.FromCanonicalBytes(input); err != nil {
|
||||
observability.LockErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_split",
|
||||
).Inc()
|
||||
return nil, nil, errors.Wrap(err, "lock")
|
||||
}
|
||||
|
||||
op, err := ShardSplitFromProtobuf(
|
||||
pb,
|
||||
a.hypergraph,
|
||||
a.keyManager,
|
||||
a.shardsStore,
|
||||
a.proverRegistry,
|
||||
)
|
||||
if err != nil {
|
||||
observability.LockErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_split",
|
||||
).Inc()
|
||||
return nil, nil, errors.Wrap(err, "lock")
|
||||
}
|
||||
|
||||
reads, err := op.GetReadAddresses(frameNumber)
|
||||
if err != nil {
|
||||
observability.LockErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_split",
|
||||
).Inc()
|
||||
return nil, nil, errors.Wrap(err, "lock")
|
||||
}
|
||||
|
||||
writes, err := op.GetWriteAddresses(frameNumber)
|
||||
if err != nil {
|
||||
observability.LockErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_split",
|
||||
).Inc()
|
||||
return nil, nil, errors.Wrap(err, "lock")
|
||||
}
|
||||
|
||||
return reads, writes, nil
|
||||
}
|
||||
|
||||
func (a *GlobalIntrinsic) tryLockShardMerge(
|
||||
frameNumber uint64,
|
||||
input []byte,
|
||||
) (
|
||||
[][]byte,
|
||||
[][]byte,
|
||||
error,
|
||||
) {
|
||||
pb := &protobufs.ShardMerge{}
|
||||
if err := pb.FromCanonicalBytes(input); err != nil {
|
||||
observability.LockErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_merge",
|
||||
).Inc()
|
||||
return nil, nil, errors.Wrap(err, "lock")
|
||||
}
|
||||
|
||||
op, err := ShardMergeFromProtobuf(
|
||||
pb,
|
||||
a.hypergraph,
|
||||
a.keyManager,
|
||||
a.shardsStore,
|
||||
a.proverRegistry,
|
||||
)
|
||||
if err != nil {
|
||||
observability.LockErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_merge",
|
||||
).Inc()
|
||||
return nil, nil, errors.Wrap(err, "lock")
|
||||
}
|
||||
|
||||
reads, err := op.GetReadAddresses(frameNumber)
|
||||
if err != nil {
|
||||
observability.LockErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_merge",
|
||||
).Inc()
|
||||
return nil, nil, errors.Wrap(err, "lock")
|
||||
}
|
||||
|
||||
writes, err := op.GetWriteAddresses(frameNumber)
|
||||
if err != nil {
|
||||
observability.LockErrors.WithLabelValues(
|
||||
"global",
|
||||
"shard_merge",
|
||||
).Inc()
|
||||
return nil, nil, errors.Wrap(err, "lock")
|
||||
}
|
||||
|
||||
return reads, writes, nil
|
||||
}
|
||||
|
||||
// LoadGlobalIntrinsic loads the global intrinsic from the global intrinsic
|
||||
// address. The global intrinsic is implicitly deployed and always exists at the
|
||||
// global address.
|
||||
@ -1928,6 +2263,7 @@ func LoadGlobalIntrinsic(
|
||||
rewardIssuance consensus.RewardIssuance,
|
||||
proverRegistry consensus.ProverRegistry,
|
||||
blsConstructor crypto.BlsConstructor,
|
||||
shardsStore store.ShardsStore,
|
||||
) (*GlobalIntrinsic, error) {
|
||||
// Verify the address is the global intrinsic address
|
||||
if !bytes.Equal(address, intrinsics.GLOBAL_INTRINSIC_ADDRESS[:]) {
|
||||
@ -1956,6 +2292,7 @@ func LoadGlobalIntrinsic(
|
||||
rewardIssuance: rewardIssuance,
|
||||
proverRegistry: proverRegistry,
|
||||
blsConstructor: blsConstructor,
|
||||
shardsStore: shardsStore,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@ -528,10 +528,14 @@ func (p *ProverJoin) Prove(frameNumber uint64) error {
|
||||
return errors.Wrap(err, "prove")
|
||||
}
|
||||
|
||||
// Set the public key before signing merge targets, since merge target
|
||||
// signatures are over the BLS public key and Verify() checks against it.
|
||||
blsPublicKey := prover.Public().([]byte)
|
||||
|
||||
for _, mt := range p.MergeTargets {
|
||||
if mt.signer != nil {
|
||||
mt.Signature, err = mt.signer.SignWithDomain(
|
||||
p.PublicKeySignatureBLS48581.PublicKey,
|
||||
blsPublicKey,
|
||||
[]byte("PROVER_JOIN_MERGE"),
|
||||
)
|
||||
if err != nil {
|
||||
@ -573,7 +577,7 @@ func (p *ProverJoin) Prove(frameNumber uint64) error {
|
||||
// Create the proof of possession signature over the public key with the POP
|
||||
// domain
|
||||
popSignature, err := prover.SignWithDomain(
|
||||
prover.Public().([]byte),
|
||||
blsPublicKey,
|
||||
popDomain,
|
||||
)
|
||||
if err != nil {
|
||||
@ -583,7 +587,7 @@ func (p *ProverJoin) Prove(frameNumber uint64) error {
|
||||
// Create the BLS48581SignatureWithProofOfPossession
|
||||
p.PublicKeySignatureBLS48581 = BLS48581SignatureWithProofOfPossession{
|
||||
Signature: signature,
|
||||
PublicKey: prover.Public().([]byte),
|
||||
PublicKey: blsPublicKey,
|
||||
PopSignature: popSignature,
|
||||
}
|
||||
|
||||
|
||||
@ -418,6 +418,30 @@ func GlobalRequestFromBytes(
|
||||
)
|
||||
}
|
||||
|
||||
// ToRequestBytes serializes a ShardSplitOp to MessageRequest bytes using
|
||||
// protobuf
|
||||
func (op *ShardSplitOp) ToRequestBytes() ([]byte, error) {
|
||||
pb := op.ToProtobuf()
|
||||
req := &protobufs.MessageRequest{
|
||||
Request: &protobufs.MessageRequest_ShardSplit{
|
||||
ShardSplit: pb,
|
||||
},
|
||||
}
|
||||
return req.ToCanonicalBytes()
|
||||
}
|
||||
|
||||
// ToRequestBytes serializes a ShardMergeOp to MessageRequest bytes using
|
||||
// protobuf
|
||||
func (op *ShardMergeOp) ToRequestBytes() ([]byte, error) {
|
||||
pb := op.ToProtobuf()
|
||||
req := &protobufs.MessageRequest{
|
||||
Request: &protobufs.MessageRequest_ShardMerge{
|
||||
ShardMerge: pb,
|
||||
},
|
||||
}
|
||||
return req.ToCanonicalBytes()
|
||||
}
|
||||
|
||||
// ToBytes serializes a ProverUpdate to bytes using protobuf
|
||||
func (p *ProverUpdate) ToBytes() ([]byte, error) {
|
||||
pb := p.ToProtobuf()
|
||||
|
||||
254
node/execution/intrinsics/global/global_shard_merge.go
Normal file
254
node/execution/intrinsics/global/global_shard_merge.go
Normal file
@ -0,0 +1,254 @@
|
||||
package global
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"slices"
|
||||
|
||||
"github.com/iden3/go-iden3-crypto/poseidon"
|
||||
"github.com/pkg/errors"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/consensus"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/execution/intrinsics"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/execution/state"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/hypergraph"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/keys"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/store"
|
||||
)
|
||||
|
||||
type ShardMergeOp struct {
|
||||
ShardAddresses [][]byte
|
||||
ParentAddress []byte
|
||||
FrameNumber uint64
|
||||
PublicKeySignatureBLS48581 BLS48581AddressedSignature
|
||||
|
||||
hypergraph hypergraph.Hypergraph
|
||||
keyManager keys.KeyManager
|
||||
shardsStore store.ShardsStore
|
||||
proverRegistry consensus.ProverRegistry
|
||||
}
|
||||
|
||||
func NewShardMergeOp(
|
||||
shardAddresses [][]byte,
|
||||
parentAddress []byte,
|
||||
keyManager keys.KeyManager,
|
||||
shardsStore store.ShardsStore,
|
||||
proverRegistry consensus.ProverRegistry,
|
||||
) *ShardMergeOp {
|
||||
return &ShardMergeOp{
|
||||
ShardAddresses: shardAddresses,
|
||||
ParentAddress: parentAddress,
|
||||
keyManager: keyManager,
|
||||
shardsStore: shardsStore,
|
||||
proverRegistry: proverRegistry,
|
||||
}
|
||||
}
|
||||
|
||||
func (op *ShardMergeOp) GetCost() (*big.Int, error) {
|
||||
return big.NewInt(0), nil
|
||||
}
|
||||
|
||||
func (op *ShardMergeOp) Verify(frameNumber uint64) (bool, error) {
|
||||
if op.proverRegistry == nil {
|
||||
return false, errors.New("prover registry not initialized")
|
||||
}
|
||||
|
||||
// Validate shard addresses
|
||||
if len(op.ShardAddresses) < 2 || len(op.ShardAddresses) > 8 {
|
||||
return false, errors.New("shard_addresses must have 2-8 entries")
|
||||
}
|
||||
|
||||
if len(op.ParentAddress) != 32 {
|
||||
return false, errors.New("parent_address must be 32 bytes")
|
||||
}
|
||||
|
||||
for _, addr := range op.ShardAddresses {
|
||||
if len(addr) <= 32 {
|
||||
return false, errors.New(
|
||||
"cannot merge base shards (must be > 32 bytes)",
|
||||
)
|
||||
}
|
||||
if !bytes.HasPrefix(addr, op.ParentAddress) {
|
||||
return false, errors.New(
|
||||
"all shard addresses must share the parent address prefix",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Look up the public key from the prover registry using the address
|
||||
address := op.PublicKeySignatureBLS48581.Address
|
||||
if len(address) != 32 {
|
||||
return false, errors.New("invalid address length")
|
||||
}
|
||||
|
||||
info, err := op.proverRegistry.GetProverInfo(address)
|
||||
if err != nil || info == nil {
|
||||
return false, errors.New("signer is not a registered prover")
|
||||
}
|
||||
|
||||
hasGlobal := false
|
||||
for _, alloc := range info.Allocations {
|
||||
if alloc.ConfirmationFilter == nil &&
|
||||
alloc.Status == consensus.ProverStatusActive {
|
||||
hasGlobal = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasGlobal {
|
||||
return false, errors.New("signer is not an active global prover")
|
||||
}
|
||||
|
||||
pubKey := info.PublicKey
|
||||
|
||||
// Verify BLS signature using the looked-up public key
|
||||
signedData := slices.Concat(
|
||||
big.NewInt(int64(op.FrameNumber)).FillBytes(make([]byte, 8)),
|
||||
op.ParentAddress,
|
||||
)
|
||||
|
||||
mergeDomainPreimage := slices.Concat(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
[]byte("SHARD_MERGE"),
|
||||
)
|
||||
mergeDomain, err := poseidon.HashBytes(mergeDomainPreimage)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify")
|
||||
}
|
||||
|
||||
ok, err := op.keyManager.ValidateSignature(
|
||||
crypto.KeyTypeBLS48581G1,
|
||||
pubKey,
|
||||
signedData,
|
||||
op.PublicKeySignatureBLS48581.Signature,
|
||||
mergeDomain.Bytes(),
|
||||
)
|
||||
if err != nil || !ok {
|
||||
return false, errors.Wrap(
|
||||
errors.New("invalid BLS signature"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
|
||||
// Verify all shards have fewer provers than minimum threshold
|
||||
globalProvers, err := op.proverRegistry.GetActiveProvers(nil)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify min provers")
|
||||
}
|
||||
minP := uint64(len(globalProvers)) * 2 / 3
|
||||
if minP > 6 {
|
||||
minP = 6
|
||||
}
|
||||
|
||||
for _, addr := range op.ShardAddresses {
|
||||
count, err := op.proverRegistry.GetProverCount(addr)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify prover count")
|
||||
}
|
||||
if uint64(count) >= minP {
|
||||
return false, errors.Errorf(
|
||||
"shard has %d provers (min threshold %d), merge not eligible",
|
||||
count, minP,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (op *ShardMergeOp) Materialize(
|
||||
frameNumber uint64,
|
||||
s state.State,
|
||||
) (state.State, error) {
|
||||
if op.shardsStore == nil {
|
||||
return nil, errors.New("shards store not initialized")
|
||||
}
|
||||
|
||||
// Remove each sub-shard address from the shards store
|
||||
for _, shardAddr := range op.ShardAddresses {
|
||||
// Extract L2 (first 32 bytes) and Path (remaining bytes as uint32s)
|
||||
shardKey := shardAddr[:32]
|
||||
path := make([]uint32, 0, len(shardAddr)-32)
|
||||
for _, b := range shardAddr[32:] {
|
||||
path = append(path, uint32(b))
|
||||
}
|
||||
|
||||
err := op.shardsStore.DeleteAppShard(nil, shardKey, path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize shard merge")
|
||||
}
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (op *ShardMergeOp) Prove(frameNumber uint64) error {
|
||||
if op.keyManager == nil {
|
||||
return errors.New("key manager not initialized")
|
||||
}
|
||||
|
||||
signingKey, err := op.keyManager.GetSigningKey("q-prover-key")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "prove")
|
||||
}
|
||||
|
||||
pubKey := signingKey.Public().([]byte)
|
||||
|
||||
// Derive the address from the public key
|
||||
addressBI, err := poseidon.HashBytes(pubKey)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "prove")
|
||||
}
|
||||
address := addressBI.FillBytes(make([]byte, 32))
|
||||
|
||||
signedData := slices.Concat(
|
||||
big.NewInt(int64(frameNumber)).FillBytes(make([]byte, 8)),
|
||||
op.ParentAddress,
|
||||
)
|
||||
|
||||
mergeDomainPreimage := slices.Concat(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
[]byte("SHARD_MERGE"),
|
||||
)
|
||||
mergeDomain, err := poseidon.HashBytes(mergeDomainPreimage)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "prove")
|
||||
}
|
||||
|
||||
signature, err := signingKey.SignWithDomain(
|
||||
signedData,
|
||||
mergeDomain.Bytes(),
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "prove")
|
||||
}
|
||||
|
||||
op.FrameNumber = frameNumber
|
||||
op.PublicKeySignatureBLS48581 = BLS48581AddressedSignature{
|
||||
Address: address,
|
||||
Signature: signature,
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (op *ShardMergeOp) GetReadAddresses(
|
||||
frameNumber uint64,
|
||||
) ([][]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (op *ShardMergeOp) GetWriteAddresses(
|
||||
frameNumber uint64,
|
||||
) ([][]byte, error) {
|
||||
// Shard merge writes to the shard addresses being removed
|
||||
addresses := make([][]byte, 0, len(op.ShardAddresses))
|
||||
for _, addr := range op.ShardAddresses {
|
||||
fullAddr := [64]byte{}
|
||||
copy(fullAddr[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(fullAddr[32:], addr)
|
||||
addresses = append(addresses, fullAddr[:])
|
||||
}
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
var _ intrinsics.IntrinsicOperation = (*ShardMergeOp)(nil)
|
||||
543
node/execution/intrinsics/global/global_shard_merge_test.go
Normal file
543
node/execution/intrinsics/global/global_shard_merge_test.go
Normal file
@ -0,0 +1,543 @@
|
||||
package global_test
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/iden3/go-iden3-crypto/poseidon"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/global"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/consensus"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/execution/intrinsics"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/mocks"
|
||||
)
|
||||
|
||||
func TestShardMergeOp_Prove(t *testing.T) {
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockSigner := new(mocks.MockBLSSigner)
|
||||
|
||||
parentAddress := make([]byte, 32)
|
||||
for i := range parentAddress {
|
||||
parentAddress[i] = byte(i % 256)
|
||||
}
|
||||
shardAddresses := [][]byte{
|
||||
append(slices.Clone(parentAddress), 0x00),
|
||||
append(slices.Clone(parentAddress), 0x01),
|
||||
}
|
||||
frameNumber := uint64(12345)
|
||||
pubKey := make([]byte, 585)
|
||||
for i := range pubKey {
|
||||
pubKey[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Derive expected 32-byte address from pubKey
|
||||
addressBI, err := poseidon.HashBytes(pubKey)
|
||||
require.NoError(t, err)
|
||||
expectedAddress := addressBI.FillBytes(make([]byte, 32))
|
||||
|
||||
mergeDomainPreimage := slices.Concat(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
[]byte("SHARD_MERGE"),
|
||||
)
|
||||
mergeDomain, err := poseidon.HashBytes(mergeDomainPreimage)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedMessage := slices.Concat(
|
||||
big.NewInt(int64(frameNumber)).FillBytes(make([]byte, 8)),
|
||||
parentAddress,
|
||||
)
|
||||
|
||||
mockSigner.On("Public").Return(pubKey)
|
||||
mockSigner.On("SignWithDomain", expectedMessage, mergeDomain.Bytes()).
|
||||
Return([]byte("signature"), nil)
|
||||
|
||||
mockKeyManager.On("GetSigningKey", "q-prover-key").Return(mockSigner, nil)
|
||||
|
||||
op := global.NewShardMergeOp(
|
||||
shardAddresses,
|
||||
parentAddress,
|
||||
mockKeyManager,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
err = op.Prove(frameNumber)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expectedAddress, op.PublicKeySignatureBLS48581.Address)
|
||||
assert.Equal(t, []byte("signature"), op.PublicKeySignatureBLS48581.Signature)
|
||||
assert.Equal(t, frameNumber, op.FrameNumber)
|
||||
|
||||
mockSigner.AssertExpectations(t)
|
||||
mockKeyManager.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestShardMergeOp_Verify(t *testing.T) {
|
||||
t.Run("prover registry required", func(t *testing.T) {
|
||||
parentAddress := make([]byte, 32)
|
||||
op := global.NewShardMergeOp(
|
||||
[][]byte{
|
||||
append(slices.Clone(parentAddress), 0x00),
|
||||
append(slices.Clone(parentAddress), 0x01),
|
||||
},
|
||||
parentAddress,
|
||||
nil,
|
||||
nil,
|
||||
nil, // no registry
|
||||
)
|
||||
|
||||
valid, err := op.Verify(0)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "prover registry not initialized")
|
||||
})
|
||||
|
||||
t.Run("too few shard addresses", func(t *testing.T) {
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
parentAddress := make([]byte, 32)
|
||||
op := global.NewShardMergeOp(
|
||||
[][]byte{append(slices.Clone(parentAddress), 0x00)}, // only 1
|
||||
parentAddress,
|
||||
nil,
|
||||
nil,
|
||||
mockRegistry,
|
||||
)
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: make([]byte, 32),
|
||||
}
|
||||
|
||||
valid, err := op.Verify(0)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "2-8")
|
||||
})
|
||||
|
||||
t.Run("parent address wrong length", func(t *testing.T) {
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
op := global.NewShardMergeOp(
|
||||
[][]byte{make([]byte, 34), make([]byte, 34)},
|
||||
make([]byte, 31), // wrong length
|
||||
nil,
|
||||
nil,
|
||||
mockRegistry,
|
||||
)
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: make([]byte, 32),
|
||||
}
|
||||
|
||||
valid, err := op.Verify(0)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "32 bytes")
|
||||
})
|
||||
|
||||
t.Run("base shard cannot be merged", func(t *testing.T) {
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
parentAddress := make([]byte, 32)
|
||||
op := global.NewShardMergeOp(
|
||||
[][]byte{
|
||||
make([]byte, 32), // exactly 32 bytes = base shard
|
||||
append(slices.Clone(parentAddress), 0x01),
|
||||
},
|
||||
parentAddress,
|
||||
nil,
|
||||
nil,
|
||||
mockRegistry,
|
||||
)
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: make([]byte, 32),
|
||||
}
|
||||
|
||||
valid, err := op.Verify(0)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "base shards")
|
||||
})
|
||||
|
||||
t.Run("shard does not share parent prefix", func(t *testing.T) {
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
parentAddress := make([]byte, 32)
|
||||
badShard := make([]byte, 33)
|
||||
badShard[0] = 0xFF
|
||||
op := global.NewShardMergeOp(
|
||||
[][]byte{
|
||||
append(slices.Clone(parentAddress), 0x00),
|
||||
badShard,
|
||||
},
|
||||
parentAddress,
|
||||
nil,
|
||||
nil,
|
||||
mockRegistry,
|
||||
)
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: make([]byte, 32),
|
||||
}
|
||||
|
||||
valid, err := op.Verify(0)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "parent address prefix")
|
||||
})
|
||||
|
||||
t.Run("invalid address length", func(t *testing.T) {
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
parentAddress := make([]byte, 32)
|
||||
op := global.NewShardMergeOp(
|
||||
[][]byte{
|
||||
append(slices.Clone(parentAddress), 0x00),
|
||||
append(slices.Clone(parentAddress), 0x01),
|
||||
},
|
||||
parentAddress,
|
||||
nil,
|
||||
nil,
|
||||
mockRegistry,
|
||||
)
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: make([]byte, 585), // wrong length
|
||||
}
|
||||
|
||||
valid, err := op.Verify(0)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "invalid address length")
|
||||
})
|
||||
|
||||
t.Run("signer not a registered prover", func(t *testing.T) {
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
|
||||
parentAddress := make([]byte, 32)
|
||||
shardAddresses := [][]byte{
|
||||
append(slices.Clone(parentAddress), 0x00),
|
||||
append(slices.Clone(parentAddress), 0x01),
|
||||
}
|
||||
address := make([]byte, 32)
|
||||
address[0] = 0x42
|
||||
|
||||
mockRegistry.On("GetProverInfo", address).Return(nil, nil)
|
||||
|
||||
op := global.NewShardMergeOp(
|
||||
shardAddresses, parentAddress, nil, nil, mockRegistry,
|
||||
)
|
||||
op.FrameNumber = 100
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: address,
|
||||
Signature: make([]byte, 74),
|
||||
}
|
||||
|
||||
valid, err := op.Verify(100)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "not a registered prover")
|
||||
})
|
||||
|
||||
t.Run("signer not a global prover", func(t *testing.T) {
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
|
||||
parentAddress := make([]byte, 32)
|
||||
shardAddresses := [][]byte{
|
||||
append(slices.Clone(parentAddress), 0x00),
|
||||
append(slices.Clone(parentAddress), 0x01),
|
||||
}
|
||||
address := make([]byte, 32)
|
||||
address[0] = 0x42
|
||||
|
||||
mockRegistry.On("GetProverInfo", address).Return(&consensus.ProverInfo{
|
||||
Allocations: []consensus.ProverAllocationInfo{
|
||||
{
|
||||
ConfirmationFilter: []byte("some-app-shard"),
|
||||
Status: consensus.ProverStatusActive,
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
op := global.NewShardMergeOp(
|
||||
shardAddresses, parentAddress, nil, nil, mockRegistry,
|
||||
)
|
||||
op.FrameNumber = 100
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: address,
|
||||
Signature: make([]byte, 74),
|
||||
}
|
||||
|
||||
valid, err := op.Verify(100)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "not an active global prover")
|
||||
})
|
||||
|
||||
t.Run("invalid signature fails", func(t *testing.T) {
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
|
||||
parentAddress := make([]byte, 32)
|
||||
shardAddresses := [][]byte{
|
||||
append(slices.Clone(parentAddress), 0x00),
|
||||
append(slices.Clone(parentAddress), 0x01),
|
||||
}
|
||||
pubKey := make([]byte, 585)
|
||||
address := make([]byte, 32)
|
||||
address[0] = 0x42
|
||||
|
||||
mockRegistry.On("GetProverInfo", address).Return(&consensus.ProverInfo{
|
||||
PublicKey: pubKey,
|
||||
Allocations: []consensus.ProverAllocationInfo{
|
||||
{
|
||||
ConfirmationFilter: nil,
|
||||
Status: consensus.ProverStatusActive,
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
mergeDomainPreimage := slices.Concat(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
[]byte("SHARD_MERGE"),
|
||||
)
|
||||
mergeDomain, err := poseidon.HashBytes(mergeDomainPreimage)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockKeyManager.On("ValidateSignature",
|
||||
crypto.KeyTypeBLS48581G1,
|
||||
pubKey,
|
||||
mock.Anything,
|
||||
[]byte("bad-sig"),
|
||||
mergeDomain.Bytes(),
|
||||
).Return(false, nil)
|
||||
|
||||
op := global.NewShardMergeOp(
|
||||
shardAddresses, parentAddress, mockKeyManager, nil, mockRegistry,
|
||||
)
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: address,
|
||||
Signature: []byte("bad-sig"),
|
||||
}
|
||||
|
||||
valid, err := op.Verify(0)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "invalid BLS signature")
|
||||
})
|
||||
|
||||
t.Run("shard prover count above merge threshold", func(t *testing.T) {
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
|
||||
parentAddress := make([]byte, 32)
|
||||
shardAddresses := [][]byte{
|
||||
append(slices.Clone(parentAddress), 0x00),
|
||||
append(slices.Clone(parentAddress), 0x01),
|
||||
}
|
||||
pubKey := make([]byte, 585)
|
||||
for i := range pubKey {
|
||||
pubKey[i] = byte(i % 256)
|
||||
}
|
||||
address := make([]byte, 32)
|
||||
address[0] = 0x42
|
||||
|
||||
mockRegistry.On("GetProverInfo", address).Return(&consensus.ProverInfo{
|
||||
PublicKey: pubKey,
|
||||
Allocations: []consensus.ProverAllocationInfo{
|
||||
{
|
||||
ConfirmationFilter: nil,
|
||||
Status: consensus.ProverStatusActive,
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
setupMergeSignatureValidation(mockKeyManager, pubKey)
|
||||
// 10 global provers -> minP = 10*2/3 = 6
|
||||
globalProvers := make([]*consensus.ProverInfo, 10)
|
||||
mockRegistry.On("GetActiveProvers", []byte(nil)).Return(globalProvers, nil)
|
||||
// First shard has 10 provers (>= minP=6), merge not eligible
|
||||
mockRegistry.On("GetProverCount", shardAddresses[0]).Return(10, nil)
|
||||
|
||||
op := global.NewShardMergeOp(
|
||||
shardAddresses, parentAddress, mockKeyManager, nil, mockRegistry,
|
||||
)
|
||||
op.FrameNumber = 100
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: address,
|
||||
Signature: make([]byte, 74),
|
||||
}
|
||||
|
||||
valid, err := op.Verify(100)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "merge not eligible")
|
||||
})
|
||||
|
||||
t.Run("valid merge with eligibility checks", func(t *testing.T) {
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
|
||||
parentAddress := make([]byte, 32)
|
||||
for i := range parentAddress {
|
||||
parentAddress[i] = byte(i % 256)
|
||||
}
|
||||
shardAddresses := [][]byte{
|
||||
append(slices.Clone(parentAddress), 0x00),
|
||||
append(slices.Clone(parentAddress), 0x01),
|
||||
}
|
||||
pubKey := make([]byte, 585)
|
||||
for i := range pubKey {
|
||||
pubKey[i] = byte(i % 256)
|
||||
}
|
||||
address := make([]byte, 32)
|
||||
address[0] = 0x42
|
||||
|
||||
mockRegistry.On("GetProverInfo", address).Return(&consensus.ProverInfo{
|
||||
PublicKey: pubKey,
|
||||
Allocations: []consensus.ProverAllocationInfo{
|
||||
{
|
||||
ConfirmationFilter: nil,
|
||||
Status: consensus.ProverStatusActive,
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
setupMergeSignatureValidation(mockKeyManager, pubKey)
|
||||
// 10 global provers -> minP = 10*2/3 = 6
|
||||
globalProvers := make([]*consensus.ProverInfo, 10)
|
||||
mockRegistry.On("GetActiveProvers", []byte(nil)).Return(globalProvers, nil)
|
||||
// Both shards have 2 provers (< minP=6), merge eligible
|
||||
mockRegistry.On("GetProverCount", shardAddresses[0]).Return(2, nil)
|
||||
mockRegistry.On("GetProverCount", shardAddresses[1]).Return(2, nil)
|
||||
|
||||
op := global.NewShardMergeOp(
|
||||
shardAddresses, parentAddress, mockKeyManager, nil, mockRegistry,
|
||||
)
|
||||
op.FrameNumber = 100
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: address,
|
||||
Signature: make([]byte, 74),
|
||||
}
|
||||
|
||||
valid, err := op.Verify(100)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, valid)
|
||||
})
|
||||
}
|
||||
|
||||
func TestShardMergeOp_Materialize(t *testing.T) {
|
||||
t.Run("deletes sub-shards", func(t *testing.T) {
|
||||
mockShardsStore := new(mocks.MockShardsStore)
|
||||
|
||||
parentAddress := make([]byte, 32)
|
||||
for i := range parentAddress {
|
||||
parentAddress[i] = byte(i % 256)
|
||||
}
|
||||
shardAddresses := [][]byte{
|
||||
append(slices.Clone(parentAddress), 0x00),
|
||||
append(slices.Clone(parentAddress), 0x01),
|
||||
}
|
||||
|
||||
for _, addr := range shardAddresses {
|
||||
shardKey := slices.Clone(addr[:32])
|
||||
path := make([]uint32, 0, len(addr)-32)
|
||||
for _, b := range addr[32:] {
|
||||
path = append(path, uint32(b))
|
||||
}
|
||||
mockShardsStore.On("DeleteAppShard",
|
||||
mock.Anything, // txn
|
||||
shardKey,
|
||||
path,
|
||||
).Return(nil).Once()
|
||||
}
|
||||
|
||||
op := global.NewShardMergeOp(
|
||||
shardAddresses,
|
||||
parentAddress,
|
||||
nil,
|
||||
mockShardsStore,
|
||||
nil,
|
||||
)
|
||||
|
||||
newState, err := op.Materialize(100, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, newState)
|
||||
|
||||
mockShardsStore.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("shardsStore nil returns error", func(t *testing.T) {
|
||||
op := global.NewShardMergeOp(
|
||||
[][]byte{make([]byte, 33), make([]byte, 33)},
|
||||
make([]byte, 32),
|
||||
nil,
|
||||
nil, // no store
|
||||
nil,
|
||||
)
|
||||
|
||||
_, err := op.Materialize(100, nil)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "shards store not initialized")
|
||||
})
|
||||
}
|
||||
|
||||
func TestShardMergeOp_GetCost(t *testing.T) {
|
||||
op := global.NewShardMergeOp(nil, nil, nil, nil, nil)
|
||||
cost, err := op.GetCost()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), cost.Int64())
|
||||
}
|
||||
|
||||
func TestShardMergeOp_GetWriteAddresses(t *testing.T) {
|
||||
parentAddress := make([]byte, 32)
|
||||
shardAddresses := [][]byte{
|
||||
append(slices.Clone(parentAddress), 0x00),
|
||||
append(slices.Clone(parentAddress), 0x01),
|
||||
}
|
||||
|
||||
op := global.NewShardMergeOp(shardAddresses, parentAddress, nil, nil, nil)
|
||||
|
||||
addrs, err := op.GetWriteAddresses(0)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, addrs, 2)
|
||||
|
||||
for _, addr := range addrs {
|
||||
assert.Len(t, addr, 64)
|
||||
assert.Equal(t, intrinsics.GLOBAL_INTRINSIC_ADDRESS[:], addr[:32])
|
||||
}
|
||||
}
|
||||
|
||||
func TestShardMergeOp_GetReadAddresses(t *testing.T) {
|
||||
op := global.NewShardMergeOp(nil, nil, nil, nil, nil)
|
||||
addrs, err := op.GetReadAddresses(0)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, addrs)
|
||||
}
|
||||
|
||||
func TestShardMergeOp_ToRequestBytes(t *testing.T) {
|
||||
parentAddress := make([]byte, 32)
|
||||
for i := range parentAddress {
|
||||
parentAddress[i] = byte(i % 256)
|
||||
}
|
||||
shardAddresses := [][]byte{
|
||||
append(slices.Clone(parentAddress), 0x00),
|
||||
append(slices.Clone(parentAddress), 0x01),
|
||||
}
|
||||
|
||||
op := global.NewShardMergeOp(shardAddresses, parentAddress, nil, nil, nil)
|
||||
op.FrameNumber = 12345
|
||||
op.PublicKeySignatureBLS48581 = MockAddressedSignature()
|
||||
|
||||
data, err := op.ToRequestBytes()
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, data)
|
||||
}
|
||||
|
||||
// setupMergeSignatureValidation sets up mock expectations for BLS signature
|
||||
// validation on a merge operation.
|
||||
func setupMergeSignatureValidation(
|
||||
mockKeyManager *mocks.MockKeyManager,
|
||||
pubKey []byte,
|
||||
) {
|
||||
mockKeyManager.On("ValidateSignature",
|
||||
crypto.KeyTypeBLS48581G1,
|
||||
pubKey,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
).Return(true, nil)
|
||||
}
|
||||
249
node/execution/intrinsics/global/global_shard_split.go
Normal file
249
node/execution/intrinsics/global/global_shard_split.go
Normal file
@ -0,0 +1,249 @@
|
||||
package global
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"slices"
|
||||
|
||||
"github.com/iden3/go-iden3-crypto/poseidon"
|
||||
"github.com/pkg/errors"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/consensus"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/execution/intrinsics"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/execution/state"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/hypergraph"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/keys"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/store"
|
||||
)
|
||||
|
||||
const maxProversThreshold = 32
|
||||
|
||||
type ShardSplitOp struct {
|
||||
ShardAddress []byte
|
||||
ProposedShards [][]byte
|
||||
FrameNumber uint64
|
||||
PublicKeySignatureBLS48581 BLS48581AddressedSignature
|
||||
|
||||
hypergraph hypergraph.Hypergraph
|
||||
keyManager keys.KeyManager
|
||||
shardsStore store.ShardsStore
|
||||
proverRegistry consensus.ProverRegistry
|
||||
}
|
||||
|
||||
func NewShardSplitOp(
|
||||
shardAddress []byte,
|
||||
proposedShards [][]byte,
|
||||
keyManager keys.KeyManager,
|
||||
shardsStore store.ShardsStore,
|
||||
proverRegistry consensus.ProverRegistry,
|
||||
) *ShardSplitOp {
|
||||
return &ShardSplitOp{
|
||||
ShardAddress: shardAddress,
|
||||
ProposedShards: proposedShards,
|
||||
keyManager: keyManager,
|
||||
shardsStore: shardsStore,
|
||||
proverRegistry: proverRegistry,
|
||||
}
|
||||
}
|
||||
|
||||
func (op *ShardSplitOp) GetCost() (*big.Int, error) {
|
||||
return big.NewInt(0), nil
|
||||
}
|
||||
|
||||
func (op *ShardSplitOp) Verify(frameNumber uint64) (bool, error) {
|
||||
if op.proverRegistry == nil {
|
||||
return false, errors.New("prover registry not initialized")
|
||||
}
|
||||
|
||||
// Validate shard address length
|
||||
if len(op.ShardAddress) < 32 || len(op.ShardAddress) > 63 {
|
||||
return false, errors.New("shard_address must be 32-63 bytes")
|
||||
}
|
||||
|
||||
// Validate proposed shards
|
||||
if len(op.ProposedShards) < 2 || len(op.ProposedShards) > 8 {
|
||||
return false, errors.New("proposed_shards must have 2-8 entries")
|
||||
}
|
||||
|
||||
for _, shard := range op.ProposedShards {
|
||||
if len(shard) != len(op.ShardAddress)+1 &&
|
||||
len(shard) != len(op.ShardAddress)+2 {
|
||||
return false, errors.Errorf(
|
||||
"proposed shard length %d invalid for parent length %d",
|
||||
len(shard), len(op.ShardAddress),
|
||||
)
|
||||
}
|
||||
if !bytes.HasPrefix(shard, op.ShardAddress) {
|
||||
return false, errors.New("proposed shard must share parent prefix")
|
||||
}
|
||||
}
|
||||
|
||||
// Look up the public key from the prover registry using the address
|
||||
address := op.PublicKeySignatureBLS48581.Address
|
||||
if len(address) != 32 {
|
||||
return false, errors.New("invalid address length")
|
||||
}
|
||||
|
||||
info, err := op.proverRegistry.GetProverInfo(address)
|
||||
if err != nil || info == nil {
|
||||
return false, errors.New("signer is not a registered prover")
|
||||
}
|
||||
|
||||
hasGlobal := false
|
||||
for _, alloc := range info.Allocations {
|
||||
if alloc.ConfirmationFilter == nil &&
|
||||
alloc.Status == consensus.ProverStatusActive {
|
||||
hasGlobal = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasGlobal {
|
||||
return false, errors.New("signer is not an active global prover")
|
||||
}
|
||||
|
||||
pubKey := info.PublicKey
|
||||
|
||||
// Verify BLS signature using the looked-up public key
|
||||
signedData := slices.Concat(
|
||||
big.NewInt(int64(op.FrameNumber)).FillBytes(make([]byte, 8)),
|
||||
op.ShardAddress,
|
||||
)
|
||||
|
||||
splitDomainPreimage := slices.Concat(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
[]byte("SHARD_SPLIT"),
|
||||
)
|
||||
splitDomain, err := poseidon.HashBytes(splitDomainPreimage)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify")
|
||||
}
|
||||
|
||||
ok, err := op.keyManager.ValidateSignature(
|
||||
crypto.KeyTypeBLS48581G1,
|
||||
pubKey,
|
||||
signedData,
|
||||
op.PublicKeySignatureBLS48581.Signature,
|
||||
splitDomain.Bytes(),
|
||||
)
|
||||
if err != nil || !ok {
|
||||
return false, errors.Wrap(
|
||||
errors.New("invalid BLS signature"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
|
||||
// Verify shard has enough provers to warrant split (> maxProvers)
|
||||
count, err := op.proverRegistry.GetProverCount(op.ShardAddress)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify prover count")
|
||||
}
|
||||
if count <= maxProversThreshold {
|
||||
return false, errors.Errorf(
|
||||
"shard has %d provers, split requires > %d",
|
||||
count, maxProversThreshold,
|
||||
)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (op *ShardSplitOp) Materialize(
|
||||
frameNumber uint64,
|
||||
s state.State,
|
||||
) (state.State, error) {
|
||||
if op.shardsStore == nil {
|
||||
return nil, errors.New("shards store not initialized")
|
||||
}
|
||||
|
||||
// Register each new sub-shard address in the shards store
|
||||
for _, proposedShard := range op.ProposedShards {
|
||||
// Extract L2 (first 32 bytes) and Path (remaining bytes as uint32s)
|
||||
l2 := proposedShard[:32]
|
||||
path := make([]uint32, 0, len(proposedShard)-32)
|
||||
for _, b := range proposedShard[32:] {
|
||||
path = append(path, uint32(b))
|
||||
}
|
||||
|
||||
err := op.shardsStore.PutAppShard(nil, store.ShardInfo{
|
||||
L2: slices.Clone(l2),
|
||||
Path: path,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize shard split")
|
||||
}
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (op *ShardSplitOp) Prove(frameNumber uint64) error {
|
||||
if op.keyManager == nil {
|
||||
return errors.New("key manager not initialized")
|
||||
}
|
||||
|
||||
signingKey, err := op.keyManager.GetSigningKey("q-prover-key")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "prove")
|
||||
}
|
||||
|
||||
pubKey := signingKey.Public().([]byte)
|
||||
|
||||
// Derive the address from the public key
|
||||
addressBI, err := poseidon.HashBytes(pubKey)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "prove")
|
||||
}
|
||||
address := addressBI.FillBytes(make([]byte, 32))
|
||||
|
||||
signedData := slices.Concat(
|
||||
big.NewInt(int64(frameNumber)).FillBytes(make([]byte, 8)),
|
||||
op.ShardAddress,
|
||||
)
|
||||
|
||||
splitDomainPreimage := slices.Concat(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
[]byte("SHARD_SPLIT"),
|
||||
)
|
||||
splitDomain, err := poseidon.HashBytes(splitDomainPreimage)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "prove")
|
||||
}
|
||||
|
||||
signature, err := signingKey.SignWithDomain(
|
||||
signedData,
|
||||
splitDomain.Bytes(),
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "prove")
|
||||
}
|
||||
|
||||
op.FrameNumber = frameNumber
|
||||
op.PublicKeySignatureBLS48581 = BLS48581AddressedSignature{
|
||||
Address: address,
|
||||
Signature: signature,
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (op *ShardSplitOp) GetReadAddresses(
|
||||
frameNumber uint64,
|
||||
) ([][]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (op *ShardSplitOp) GetWriteAddresses(
|
||||
frameNumber uint64,
|
||||
) ([][]byte, error) {
|
||||
// Shard split writes to shard addresses
|
||||
addresses := make([][]byte, 0, len(op.ProposedShards))
|
||||
for _, shard := range op.ProposedShards {
|
||||
addr := [64]byte{}
|
||||
copy(addr[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(addr[32:], shard)
|
||||
addresses = append(addresses, addr[:])
|
||||
}
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
var _ intrinsics.IntrinsicOperation = (*ShardSplitOp)(nil)
|
||||
527
node/execution/intrinsics/global/global_shard_split_test.go
Normal file
527
node/execution/intrinsics/global/global_shard_split_test.go
Normal file
@ -0,0 +1,527 @@
|
||||
package global_test
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/iden3/go-iden3-crypto/poseidon"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/global"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/consensus"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/execution/intrinsics"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/mocks"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/store"
|
||||
)
|
||||
|
||||
func TestShardSplitOp_Prove(t *testing.T) {
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockSigner := new(mocks.MockBLSSigner)
|
||||
|
||||
shardAddress := make([]byte, 33)
|
||||
for i := range shardAddress {
|
||||
shardAddress[i] = byte(i % 256)
|
||||
}
|
||||
proposedShards := [][]byte{
|
||||
append(slices.Clone(shardAddress), 0x00),
|
||||
append(slices.Clone(shardAddress), 0x01),
|
||||
}
|
||||
frameNumber := uint64(12345)
|
||||
pubKey := make([]byte, 585)
|
||||
for i := range pubKey {
|
||||
pubKey[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Derive expected 32-byte address from pubKey
|
||||
addressBI, err := poseidon.HashBytes(pubKey)
|
||||
require.NoError(t, err)
|
||||
expectedAddress := addressBI.FillBytes(make([]byte, 32))
|
||||
|
||||
splitDomainPreimage := slices.Concat(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
[]byte("SHARD_SPLIT"),
|
||||
)
|
||||
splitDomain, err := poseidon.HashBytes(splitDomainPreimage)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedMessage := slices.Concat(
|
||||
big.NewInt(int64(frameNumber)).FillBytes(make([]byte, 8)),
|
||||
shardAddress,
|
||||
)
|
||||
|
||||
mockSigner.On("Public").Return(pubKey)
|
||||
mockSigner.On("SignWithDomain", expectedMessage, splitDomain.Bytes()).
|
||||
Return([]byte("signature"), nil)
|
||||
|
||||
mockKeyManager.On("GetSigningKey", "q-prover-key").Return(mockSigner, nil)
|
||||
|
||||
op := global.NewShardSplitOp(
|
||||
shardAddress,
|
||||
proposedShards,
|
||||
mockKeyManager,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
err = op.Prove(frameNumber)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expectedAddress, op.PublicKeySignatureBLS48581.Address)
|
||||
assert.Equal(t, []byte("signature"), op.PublicKeySignatureBLS48581.Signature)
|
||||
assert.Equal(t, frameNumber, op.FrameNumber)
|
||||
|
||||
mockSigner.AssertExpectations(t)
|
||||
mockKeyManager.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestShardSplitOp_Verify(t *testing.T) {
|
||||
t.Run("prover registry required", func(t *testing.T) {
|
||||
op := global.NewShardSplitOp(
|
||||
make([]byte, 33),
|
||||
[][]byte{make([]byte, 34), make([]byte, 34)},
|
||||
nil,
|
||||
nil,
|
||||
nil, // no registry
|
||||
)
|
||||
|
||||
valid, err := op.Verify(0)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "prover registry not initialized")
|
||||
})
|
||||
|
||||
t.Run("shard address too short", func(t *testing.T) {
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
op := global.NewShardSplitOp(
|
||||
make([]byte, 31), // too short
|
||||
[][]byte{make([]byte, 33), make([]byte, 33)},
|
||||
nil,
|
||||
nil,
|
||||
mockRegistry,
|
||||
)
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: make([]byte, 32),
|
||||
}
|
||||
|
||||
valid, err := op.Verify(0)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "32-63 bytes")
|
||||
})
|
||||
|
||||
t.Run("shard address too long", func(t *testing.T) {
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
op := global.NewShardSplitOp(
|
||||
make([]byte, 64), // too long
|
||||
[][]byte{make([]byte, 65), make([]byte, 65)},
|
||||
nil,
|
||||
nil,
|
||||
mockRegistry,
|
||||
)
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: make([]byte, 32),
|
||||
}
|
||||
|
||||
valid, err := op.Verify(0)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "32-63 bytes")
|
||||
})
|
||||
|
||||
t.Run("too few proposed shards", func(t *testing.T) {
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
shardAddress := make([]byte, 33)
|
||||
op := global.NewShardSplitOp(
|
||||
shardAddress,
|
||||
[][]byte{append(slices.Clone(shardAddress), 0x00)}, // only 1
|
||||
nil,
|
||||
nil,
|
||||
mockRegistry,
|
||||
)
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: make([]byte, 32),
|
||||
}
|
||||
|
||||
valid, err := op.Verify(0)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "2-8")
|
||||
})
|
||||
|
||||
t.Run("proposed shard wrong prefix", func(t *testing.T) {
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
shardAddress := make([]byte, 33)
|
||||
badShard := make([]byte, 34)
|
||||
badShard[0] = 0xFF // does not match parent prefix
|
||||
op := global.NewShardSplitOp(
|
||||
shardAddress,
|
||||
[][]byte{
|
||||
append(slices.Clone(shardAddress), 0x00),
|
||||
badShard,
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
mockRegistry,
|
||||
)
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: make([]byte, 32),
|
||||
}
|
||||
|
||||
valid, err := op.Verify(0)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "prefix")
|
||||
})
|
||||
|
||||
t.Run("invalid address length", func(t *testing.T) {
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
shardAddress := make([]byte, 33)
|
||||
op := global.NewShardSplitOp(
|
||||
shardAddress,
|
||||
[][]byte{
|
||||
append(slices.Clone(shardAddress), 0x00),
|
||||
append(slices.Clone(shardAddress), 0x01),
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
mockRegistry,
|
||||
)
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: make([]byte, 585), // wrong length
|
||||
}
|
||||
|
||||
valid, err := op.Verify(0)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "invalid address length")
|
||||
})
|
||||
|
||||
t.Run("signer not a registered prover", func(t *testing.T) {
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
|
||||
shardAddress := make([]byte, 33)
|
||||
proposedShards := [][]byte{
|
||||
append(slices.Clone(shardAddress), 0x00),
|
||||
append(slices.Clone(shardAddress), 0x01),
|
||||
}
|
||||
address := make([]byte, 32)
|
||||
address[0] = 0x42
|
||||
|
||||
mockRegistry.On("GetProverInfo", address).Return(nil, nil)
|
||||
|
||||
op := global.NewShardSplitOp(
|
||||
shardAddress, proposedShards, nil, nil, mockRegistry,
|
||||
)
|
||||
op.FrameNumber = 100
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: address,
|
||||
Signature: make([]byte, 74),
|
||||
}
|
||||
|
||||
valid, err := op.Verify(100)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "not a registered prover")
|
||||
})
|
||||
|
||||
t.Run("signer not a global prover", func(t *testing.T) {
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
|
||||
shardAddress := make([]byte, 33)
|
||||
proposedShards := [][]byte{
|
||||
append(slices.Clone(shardAddress), 0x00),
|
||||
append(slices.Clone(shardAddress), 0x01),
|
||||
}
|
||||
address := make([]byte, 32)
|
||||
address[0] = 0x42
|
||||
|
||||
mockRegistry.On("GetProverInfo", address).Return(&consensus.ProverInfo{
|
||||
Allocations: []consensus.ProverAllocationInfo{
|
||||
{
|
||||
ConfirmationFilter: []byte("some-app-shard"),
|
||||
Status: consensus.ProverStatusActive,
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
op := global.NewShardSplitOp(
|
||||
shardAddress, proposedShards, nil, nil, mockRegistry,
|
||||
)
|
||||
op.FrameNumber = 100
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: address,
|
||||
Signature: make([]byte, 74),
|
||||
}
|
||||
|
||||
valid, err := op.Verify(100)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "not an active global prover")
|
||||
})
|
||||
|
||||
t.Run("invalid signature fails", func(t *testing.T) {
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
|
||||
shardAddress := make([]byte, 33)
|
||||
proposedShards := [][]byte{
|
||||
append(slices.Clone(shardAddress), 0x00),
|
||||
append(slices.Clone(shardAddress), 0x01),
|
||||
}
|
||||
pubKey := make([]byte, 585)
|
||||
address := make([]byte, 32)
|
||||
address[0] = 0x42
|
||||
|
||||
mockRegistry.On("GetProverInfo", address).Return(&consensus.ProverInfo{
|
||||
PublicKey: pubKey,
|
||||
Allocations: []consensus.ProverAllocationInfo{
|
||||
{
|
||||
ConfirmationFilter: nil,
|
||||
Status: consensus.ProverStatusActive,
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
splitDomainPreimage := slices.Concat(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
[]byte("SHARD_SPLIT"),
|
||||
)
|
||||
splitDomain, err := poseidon.HashBytes(splitDomainPreimage)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockKeyManager.On("ValidateSignature",
|
||||
crypto.KeyTypeBLS48581G1,
|
||||
pubKey,
|
||||
mock.Anything,
|
||||
[]byte("bad-sig"),
|
||||
splitDomain.Bytes(),
|
||||
).Return(false, nil)
|
||||
|
||||
op := global.NewShardSplitOp(
|
||||
shardAddress, proposedShards, mockKeyManager, nil, mockRegistry,
|
||||
)
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: address,
|
||||
Signature: []byte("bad-sig"),
|
||||
}
|
||||
|
||||
valid, err := op.Verify(0)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "invalid BLS signature")
|
||||
})
|
||||
|
||||
t.Run("shard prover count below split threshold", func(t *testing.T) {
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
|
||||
shardAddress := make([]byte, 33)
|
||||
proposedShards := [][]byte{
|
||||
append(slices.Clone(shardAddress), 0x00),
|
||||
append(slices.Clone(shardAddress), 0x01),
|
||||
}
|
||||
pubKey := make([]byte, 585)
|
||||
for i := range pubKey {
|
||||
pubKey[i] = byte(i % 256)
|
||||
}
|
||||
address := make([]byte, 32)
|
||||
address[0] = 0x42
|
||||
|
||||
mockRegistry.On("GetProverInfo", address).Return(&consensus.ProverInfo{
|
||||
PublicKey: pubKey,
|
||||
Allocations: []consensus.ProverAllocationInfo{
|
||||
{
|
||||
ConfirmationFilter: nil,
|
||||
Status: consensus.ProverStatusActive,
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
setupSplitSignatureValidation(mockKeyManager, pubKey)
|
||||
mockRegistry.On("GetProverCount", shardAddress).Return(30, nil)
|
||||
|
||||
op := global.NewShardSplitOp(
|
||||
shardAddress, proposedShards, mockKeyManager, nil, mockRegistry,
|
||||
)
|
||||
op.FrameNumber = 100
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: address,
|
||||
Signature: make([]byte, 74),
|
||||
}
|
||||
|
||||
valid, err := op.Verify(100)
|
||||
require.Error(t, err)
|
||||
assert.False(t, valid)
|
||||
assert.Contains(t, err.Error(), "split requires")
|
||||
})
|
||||
|
||||
t.Run("valid split with eligibility checks", func(t *testing.T) {
|
||||
mockKeyManager := new(mocks.MockKeyManager)
|
||||
mockRegistry := new(mocks.MockProverRegistry)
|
||||
|
||||
shardAddress := make([]byte, 33)
|
||||
for i := range shardAddress {
|
||||
shardAddress[i] = byte(i % 256)
|
||||
}
|
||||
proposedShards := [][]byte{
|
||||
append(slices.Clone(shardAddress), 0x00),
|
||||
append(slices.Clone(shardAddress), 0x01),
|
||||
}
|
||||
pubKey := make([]byte, 585)
|
||||
for i := range pubKey {
|
||||
pubKey[i] = byte(i % 256)
|
||||
}
|
||||
address := make([]byte, 32)
|
||||
address[0] = 0x42
|
||||
|
||||
mockRegistry.On("GetProverInfo", address).Return(&consensus.ProverInfo{
|
||||
PublicKey: pubKey,
|
||||
Allocations: []consensus.ProverAllocationInfo{
|
||||
{
|
||||
ConfirmationFilter: nil,
|
||||
Status: consensus.ProverStatusActive,
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
setupSplitSignatureValidation(mockKeyManager, pubKey)
|
||||
mockRegistry.On("GetProverCount", shardAddress).Return(35, nil)
|
||||
|
||||
op := global.NewShardSplitOp(
|
||||
shardAddress, proposedShards, mockKeyManager, nil, mockRegistry,
|
||||
)
|
||||
op.FrameNumber = 100
|
||||
op.PublicKeySignatureBLS48581 = global.BLS48581AddressedSignature{
|
||||
Address: address,
|
||||
Signature: make([]byte, 74),
|
||||
}
|
||||
|
||||
valid, err := op.Verify(100)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, valid)
|
||||
})
|
||||
}
|
||||
|
||||
func TestShardSplitOp_Materialize(t *testing.T) {
|
||||
t.Run("registers proposed shards", func(t *testing.T) {
|
||||
mockShardsStore := new(mocks.MockShardsStore)
|
||||
|
||||
// 33-byte shard address: 32 bytes L2 + 1 byte path
|
||||
shardAddress := make([]byte, 33)
|
||||
for i := range shardAddress {
|
||||
shardAddress[i] = byte(i % 256)
|
||||
}
|
||||
proposedShards := [][]byte{
|
||||
append(slices.Clone(shardAddress), 0x00),
|
||||
append(slices.Clone(shardAddress), 0x01),
|
||||
}
|
||||
|
||||
for _, ps := range proposedShards {
|
||||
l2 := slices.Clone(ps[:32])
|
||||
path := make([]uint32, 0, len(ps)-32)
|
||||
for _, b := range ps[32:] {
|
||||
path = append(path, uint32(b))
|
||||
}
|
||||
mockShardsStore.On("PutAppShard",
|
||||
mock.Anything, // txn
|
||||
store.ShardInfo{L2: l2, Path: path},
|
||||
).Return(nil).Once()
|
||||
}
|
||||
|
||||
op := global.NewShardSplitOp(
|
||||
shardAddress,
|
||||
proposedShards,
|
||||
nil,
|
||||
mockShardsStore,
|
||||
nil,
|
||||
)
|
||||
|
||||
newState, err := op.Materialize(100, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, newState) // state passthrough (nil in)
|
||||
|
||||
mockShardsStore.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("shardsStore nil returns error", func(t *testing.T) {
|
||||
op := global.NewShardSplitOp(
|
||||
make([]byte, 33),
|
||||
[][]byte{make([]byte, 34), make([]byte, 34)},
|
||||
nil,
|
||||
nil, // no store
|
||||
nil,
|
||||
)
|
||||
|
||||
_, err := op.Materialize(100, nil)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "shards store not initialized")
|
||||
})
|
||||
}
|
||||
|
||||
func TestShardSplitOp_GetCost(t *testing.T) {
|
||||
op := global.NewShardSplitOp(nil, nil, nil, nil, nil)
|
||||
cost, err := op.GetCost()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), cost.Int64())
|
||||
}
|
||||
|
||||
func TestShardSplitOp_GetWriteAddresses(t *testing.T) {
|
||||
shardAddress := make([]byte, 33)
|
||||
proposedShards := [][]byte{
|
||||
append(slices.Clone(shardAddress), 0x00),
|
||||
append(slices.Clone(shardAddress), 0x01),
|
||||
}
|
||||
|
||||
op := global.NewShardSplitOp(shardAddress, proposedShards, nil, nil, nil)
|
||||
|
||||
addrs, err := op.GetWriteAddresses(0)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, addrs, 2)
|
||||
|
||||
for _, addr := range addrs {
|
||||
assert.Len(t, addr, 64)
|
||||
assert.Equal(t, intrinsics.GLOBAL_INTRINSIC_ADDRESS[:], addr[:32])
|
||||
}
|
||||
}
|
||||
|
||||
func TestShardSplitOp_GetReadAddresses(t *testing.T) {
|
||||
op := global.NewShardSplitOp(nil, nil, nil, nil, nil)
|
||||
addrs, err := op.GetReadAddresses(0)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, addrs)
|
||||
}
|
||||
|
||||
func TestShardSplitOp_ToRequestBytes(t *testing.T) {
|
||||
shardAddress := make([]byte, 33)
|
||||
for i := range shardAddress {
|
||||
shardAddress[i] = byte(i % 256)
|
||||
}
|
||||
proposedShards := [][]byte{
|
||||
append(slices.Clone(shardAddress), 0x00),
|
||||
append(slices.Clone(shardAddress), 0x01),
|
||||
}
|
||||
|
||||
op := global.NewShardSplitOp(shardAddress, proposedShards, nil, nil, nil)
|
||||
op.FrameNumber = 12345
|
||||
op.PublicKeySignatureBLS48581 = MockAddressedSignature()
|
||||
|
||||
data, err := op.ToRequestBytes()
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, data)
|
||||
}
|
||||
|
||||
// setupSplitSignatureValidation sets up mock expectations for BLS signature
|
||||
// validation on a split operation.
|
||||
func setupSplitSignatureValidation(
|
||||
mockKeyManager *mocks.MockKeyManager,
|
||||
pubKey []byte,
|
||||
) {
|
||||
mockKeyManager.On("ValidateSignature",
|
||||
crypto.KeyTypeBLS48581G1,
|
||||
pubKey,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
).Return(true, nil)
|
||||
}
|
||||
@ -96,6 +96,7 @@ var pebbleMigrations = []func(*pebble.Batch, *pebble.DB, *config.Config) error{
|
||||
migration_2_1_0_1820,
|
||||
migration_2_1_0_1821,
|
||||
migration_2_1_0_1822,
|
||||
migration_2_1_0_1823,
|
||||
}
|
||||
|
||||
func NewPebbleDB(
|
||||
@ -1133,6 +1134,12 @@ func migration_2_1_0_1822(b *pebble.Batch, db *pebble.DB, cfg *config.Config) er
|
||||
return doMigration1818(db, cfg)
|
||||
}
|
||||
|
||||
// migration_2_1_0_1823 rebuilds the global prover shard tree to fix potential
|
||||
// corruption from transaction bypass bugs in SaveRoot and Commit.
|
||||
func migration_2_1_0_1823(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
|
||||
return doMigration1818(db, cfg)
|
||||
}
|
||||
|
||||
// pebbleBatchDB wraps a *pebble.Batch to implement store.KVDB for use in migrations
|
||||
type pebbleBatchDB struct {
|
||||
b *pebble.Batch
|
||||
|
||||
@ -66,6 +66,8 @@ const (
|
||||
ProverSeniorityMergeType uint32 = 0x031A
|
||||
TimeoutStateType uint32 = 0x031C
|
||||
TimeoutCertificateType uint32 = 0x031D
|
||||
ShardSplitType uint32 = 0x031E
|
||||
ShardMergeType uint32 = 0x031F
|
||||
|
||||
// Hypergraph types (0x0400 - 0x04FF)
|
||||
HypergraphConfigurationType uint32 = 0x0401
|
||||
|
||||
@ -2078,6 +2078,386 @@ func (a *AltShardUpdate) FromCanonicalBytes(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShardSplit serialization methods
|
||||
func (s *ShardSplit) ToCanonicalBytes() ([]byte, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
// Write type prefix
|
||||
if err := binary.Write(buf, binary.BigEndian, ShardSplitType); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
|
||||
// Write shard_address (length-prefixed)
|
||||
if err := binary.Write(
|
||||
buf,
|
||||
binary.BigEndian,
|
||||
uint32(len(s.ShardAddress)),
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
if _, err := buf.Write(s.ShardAddress); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
|
||||
// Write number of proposed_shards
|
||||
if err := binary.Write(
|
||||
buf,
|
||||
binary.BigEndian,
|
||||
uint32(len(s.ProposedShards)),
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
|
||||
// Write each proposed shard (length-prefixed)
|
||||
for _, shard := range s.ProposedShards {
|
||||
if err := binary.Write(
|
||||
buf,
|
||||
binary.BigEndian,
|
||||
uint32(len(shard)),
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
if _, err := buf.Write(shard); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
}
|
||||
|
||||
// Write frame_number
|
||||
if err := binary.Write(buf, binary.BigEndian, s.FrameNumber); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
|
||||
// Write public_key_signature_bls48581
|
||||
if s.PublicKeySignatureBls48581 != nil {
|
||||
sigBytes, err := s.PublicKeySignatureBls48581.ToCanonicalBytes()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
if err := binary.Write(
|
||||
buf,
|
||||
binary.BigEndian,
|
||||
uint32(len(sigBytes)),
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
if _, err := buf.Write(sigBytes); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
} else {
|
||||
if err := binary.Write(buf, binary.BigEndian, uint32(0)); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (s *ShardSplit) FromCanonicalBytes(data []byte) error {
|
||||
buf := bytes.NewBuffer(data)
|
||||
|
||||
// Read and verify type prefix
|
||||
var typePrefix uint32
|
||||
if err := binary.Read(buf, binary.BigEndian, &typePrefix); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
if typePrefix != ShardSplitType {
|
||||
return errors.Wrap(
|
||||
errors.New("invalid type prefix"),
|
||||
"from canonical bytes",
|
||||
)
|
||||
}
|
||||
|
||||
// Read shard_address
|
||||
var addrLen uint32
|
||||
if err := binary.Read(buf, binary.BigEndian, &addrLen); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
if addrLen > 64 {
|
||||
return errors.Wrap(
|
||||
errors.New("invalid shard address length"),
|
||||
"from canonical bytes",
|
||||
)
|
||||
}
|
||||
s.ShardAddress = make([]byte, addrLen)
|
||||
if _, err := buf.Read(s.ShardAddress); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
|
||||
// Read number of proposed_shards
|
||||
var numShards uint32
|
||||
if err := binary.Read(buf, binary.BigEndian, &numShards); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
if numShards > 8 {
|
||||
return errors.Wrap(
|
||||
errors.New("too many proposed shards"),
|
||||
"from canonical bytes",
|
||||
)
|
||||
}
|
||||
|
||||
// Read each proposed shard
|
||||
s.ProposedShards = make([][]byte, numShards)
|
||||
for i := uint32(0); i < numShards; i++ {
|
||||
var shardLen uint32
|
||||
if err := binary.Read(buf, binary.BigEndian, &shardLen); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
if shardLen > 66 {
|
||||
return errors.Wrap(
|
||||
errors.New("invalid proposed shard length"),
|
||||
"from canonical bytes",
|
||||
)
|
||||
}
|
||||
s.ProposedShards[i] = make([]byte, shardLen)
|
||||
if _, err := buf.Read(s.ProposedShards[i]); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
}
|
||||
|
||||
// Read frame_number
|
||||
if err := binary.Read(buf, binary.BigEndian, &s.FrameNumber); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
|
||||
// Read public_key_signature_bls48581
|
||||
var sigLen uint32
|
||||
if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
if sigLen > 0 {
|
||||
sigBytes := make([]byte, sigLen)
|
||||
if _, err := buf.Read(sigBytes); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
s.PublicKeySignatureBls48581 = &BLS48581AddressedSignature{}
|
||||
if err := s.PublicKeySignatureBls48581.FromCanonicalBytes(
|
||||
sigBytes,
|
||||
); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ShardSplit) Validate() error {
|
||||
if len(s.ShardAddress) < 32 || len(s.ShardAddress) > 63 {
|
||||
return errors.New("shard_address must be 32-63 bytes")
|
||||
}
|
||||
|
||||
if len(s.ProposedShards) < 2 || len(s.ProposedShards) > 8 {
|
||||
return errors.New("proposed_shards must have 2-8 entries")
|
||||
}
|
||||
|
||||
for _, shard := range s.ProposedShards {
|
||||
if len(shard) != len(s.ShardAddress)+1 &&
|
||||
len(shard) != len(s.ShardAddress)+2 {
|
||||
return errors.Errorf(
|
||||
"proposed shard length %d invalid for parent length %d",
|
||||
len(shard), len(s.ShardAddress),
|
||||
)
|
||||
}
|
||||
if !bytes.HasPrefix(shard, s.ShardAddress) {
|
||||
return errors.New("proposed shard must share parent prefix")
|
||||
}
|
||||
}
|
||||
|
||||
if s.PublicKeySignatureBls48581 == nil {
|
||||
return errors.New("BLS signature must be present")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShardMerge serialization methods
|
||||
func (s *ShardMerge) ToCanonicalBytes() ([]byte, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
// Write type prefix
|
||||
if err := binary.Write(buf, binary.BigEndian, ShardMergeType); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
|
||||
// Write number of shard_addresses
|
||||
if err := binary.Write(
|
||||
buf,
|
||||
binary.BigEndian,
|
||||
uint32(len(s.ShardAddresses)),
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
|
||||
// Write each shard address (length-prefixed)
|
||||
for _, addr := range s.ShardAddresses {
|
||||
if err := binary.Write(
|
||||
buf,
|
||||
binary.BigEndian,
|
||||
uint32(len(addr)),
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
if _, err := buf.Write(addr); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
}
|
||||
|
||||
// Write parent_address (length-prefixed)
|
||||
if err := binary.Write(
|
||||
buf,
|
||||
binary.BigEndian,
|
||||
uint32(len(s.ParentAddress)),
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
if _, err := buf.Write(s.ParentAddress); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
|
||||
// Write frame_number
|
||||
if err := binary.Write(buf, binary.BigEndian, s.FrameNumber); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
|
||||
// Write public_key_signature_bls48581
|
||||
if s.PublicKeySignatureBls48581 != nil {
|
||||
sigBytes, err := s.PublicKeySignatureBls48581.ToCanonicalBytes()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
if err := binary.Write(
|
||||
buf,
|
||||
binary.BigEndian,
|
||||
uint32(len(sigBytes)),
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
if _, err := buf.Write(sigBytes); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
} else {
|
||||
if err := binary.Write(buf, binary.BigEndian, uint32(0)); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (s *ShardMerge) FromCanonicalBytes(data []byte) error {
|
||||
buf := bytes.NewBuffer(data)
|
||||
|
||||
// Read and verify type prefix
|
||||
var typePrefix uint32
|
||||
if err := binary.Read(buf, binary.BigEndian, &typePrefix); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
if typePrefix != ShardMergeType {
|
||||
return errors.Wrap(
|
||||
errors.New("invalid type prefix"),
|
||||
"from canonical bytes",
|
||||
)
|
||||
}
|
||||
|
||||
// Read number of shard_addresses
|
||||
var numAddrs uint32
|
||||
if err := binary.Read(buf, binary.BigEndian, &numAddrs); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
if numAddrs > 8 {
|
||||
return errors.Wrap(
|
||||
errors.New("too many shard addresses"),
|
||||
"from canonical bytes",
|
||||
)
|
||||
}
|
||||
|
||||
// Read each shard address
|
||||
s.ShardAddresses = make([][]byte, numAddrs)
|
||||
for i := uint32(0); i < numAddrs; i++ {
|
||||
var addrLen uint32
|
||||
if err := binary.Read(buf, binary.BigEndian, &addrLen); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
if addrLen > 64 {
|
||||
return errors.Wrap(
|
||||
errors.New("invalid shard address length"),
|
||||
"from canonical bytes",
|
||||
)
|
||||
}
|
||||
s.ShardAddresses[i] = make([]byte, addrLen)
|
||||
if _, err := buf.Read(s.ShardAddresses[i]); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
}
|
||||
|
||||
// Read parent_address
|
||||
var parentLen uint32
|
||||
if err := binary.Read(buf, binary.BigEndian, &parentLen); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
if parentLen > 64 {
|
||||
return errors.Wrap(
|
||||
errors.New("invalid parent address length"),
|
||||
"from canonical bytes",
|
||||
)
|
||||
}
|
||||
s.ParentAddress = make([]byte, parentLen)
|
||||
if _, err := buf.Read(s.ParentAddress); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
|
||||
// Read frame_number
|
||||
if err := binary.Read(buf, binary.BigEndian, &s.FrameNumber); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
|
||||
// Read public_key_signature_bls48581
|
||||
var sigLen uint32
|
||||
if err := binary.Read(buf, binary.BigEndian, &sigLen); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
if sigLen > 0 {
|
||||
sigBytes := make([]byte, sigLen)
|
||||
if _, err := buf.Read(sigBytes); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
s.PublicKeySignatureBls48581 = &BLS48581AddressedSignature{}
|
||||
if err := s.PublicKeySignatureBls48581.FromCanonicalBytes(
|
||||
sigBytes,
|
||||
); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ShardMerge) Validate() error {
|
||||
if len(s.ShardAddresses) < 2 || len(s.ShardAddresses) > 8 {
|
||||
return errors.New("shard_addresses must have 2-8 entries")
|
||||
}
|
||||
|
||||
if len(s.ParentAddress) != 32 {
|
||||
return errors.New("parent_address must be 32 bytes")
|
||||
}
|
||||
|
||||
for _, addr := range s.ShardAddresses {
|
||||
if len(addr) <= 32 {
|
||||
return errors.New("cannot merge base shards (must be > 32 bytes)")
|
||||
}
|
||||
if !bytes.HasPrefix(addr, s.ParentAddress) {
|
||||
return errors.New(
|
||||
"all shard addresses must share the parent address prefix",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if s.PublicKeySignatureBls48581 == nil {
|
||||
return errors.New("BLS signature must be present")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MessageRequest) ToCanonicalBytes() ([]byte, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
@ -2145,6 +2525,10 @@ func (m *MessageRequest) ToCanonicalBytes() ([]byte, error) {
|
||||
innerBytes, err = request.AltShardUpdate.ToCanonicalBytes()
|
||||
case *MessageRequest_SeniorityMerge:
|
||||
innerBytes, err = request.SeniorityMerge.ToCanonicalBytes()
|
||||
case *MessageRequest_ShardSplit:
|
||||
innerBytes, err = request.ShardSplit.ToCanonicalBytes()
|
||||
case *MessageRequest_ShardMerge:
|
||||
innerBytes, err = request.ShardMerge.ToCanonicalBytes()
|
||||
default:
|
||||
return nil, errors.New("unknown request type")
|
||||
}
|
||||
@ -2419,6 +2803,24 @@ func (m *MessageRequest) FromCanonicalBytes(data []byte) error {
|
||||
SeniorityMerge: seniorityMerge,
|
||||
}
|
||||
|
||||
case ShardSplitType:
|
||||
shardSplit := &ShardSplit{}
|
||||
if err := shardSplit.FromCanonicalBytes(dataBytes); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
m.Request = &MessageRequest_ShardSplit{
|
||||
ShardSplit: shardSplit,
|
||||
}
|
||||
|
||||
case ShardMergeType:
|
||||
shardMerge := &ShardMerge{}
|
||||
if err := shardMerge.FromCanonicalBytes(dataBytes); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
m.Request = &MessageRequest_ShardMerge{
|
||||
ShardMerge: shardMerge,
|
||||
}
|
||||
|
||||
default:
|
||||
return errors.Errorf("unknown message type: 0x%08X", innerType)
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -111,6 +111,28 @@ message AltShardUpdate {
|
||||
bytes signature = 7;
|
||||
}
|
||||
|
||||
message ShardSplit {
|
||||
// The original shard address being split (32-63 bytes)
|
||||
bytes shard_address = 1;
|
||||
// The new sub-shard addresses proposed by the split
|
||||
repeated bytes proposed_shards = 2;
|
||||
// The frame number at which the split was detected
|
||||
uint64 frame_number = 3;
|
||||
// The BLS48-581 addressed signature proving prover identity
|
||||
quilibrium.node.keys.pb.BLS48581AddressedSignature public_key_signature_bls48581 = 4;
|
||||
}
|
||||
|
||||
message ShardMerge {
|
||||
// The sub-shard addresses being merged
|
||||
repeated bytes shard_addresses = 1;
|
||||
// The parent shard address (first 32 bytes)
|
||||
bytes parent_address = 2;
|
||||
// The frame number at which the merge was detected
|
||||
uint64 frame_number = 3;
|
||||
// The BLS48-581 addressed signature proving prover identity
|
||||
quilibrium.node.keys.pb.BLS48581AddressedSignature public_key_signature_bls48581 = 4;
|
||||
}
|
||||
|
||||
message MessageRequest {
|
||||
oneof request {
|
||||
quilibrium.node.global.pb.ProverJoin join = 1;
|
||||
@ -140,6 +162,8 @@ message MessageRequest {
|
||||
quilibrium.node.global.pb.FrameHeader shard = 25;
|
||||
quilibrium.node.global.pb.AltShardUpdate alt_shard_update = 26;
|
||||
quilibrium.node.global.pb.ProverSeniorityMerge seniority_merge = 27;
|
||||
quilibrium.node.global.pb.ShardSplit shard_split = 28;
|
||||
quilibrium.node.global.pb.ShardMerge shard_merge = 29;
|
||||
}
|
||||
int64 timestamp = 99;
|
||||
}
|
||||
|
||||
@ -664,6 +664,284 @@ func TestProverKick_Serialization(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestShardSplit_Serialization(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
split *ShardSplit
|
||||
}{
|
||||
{
|
||||
name: "complete shard split",
|
||||
split: &ShardSplit{
|
||||
ShardAddress: make([]byte, 33),
|
||||
ProposedShards: [][]byte{make([]byte, 34), make([]byte, 34)},
|
||||
FrameNumber: 12345,
|
||||
PublicKeySignatureBls48581: &BLS48581AddressedSignature{
|
||||
Signature: make([]byte, 74),
|
||||
Address: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "split with max proposed shards",
|
||||
split: &ShardSplit{
|
||||
ShardAddress: append([]byte{0xFF}, make([]byte, 32)...),
|
||||
ProposedShards: [][]byte{
|
||||
make([]byte, 34), make([]byte, 34),
|
||||
make([]byte, 34), make([]byte, 34),
|
||||
make([]byte, 34), make([]byte, 34),
|
||||
make([]byte, 34), make([]byte, 34),
|
||||
},
|
||||
FrameNumber: 99999,
|
||||
PublicKeySignatureBls48581: &BLS48581AddressedSignature{
|
||||
Signature: append([]byte{0xAA}, make([]byte, 73)...),
|
||||
Address: append([]byte{0xCC}, make([]byte, 31)...),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
data, err := tt.split.ToCanonicalBytes()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, data)
|
||||
|
||||
split2 := &ShardSplit{}
|
||||
err = split2.FromCanonicalBytes(data)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, tt.split.ShardAddress, split2.ShardAddress)
|
||||
assert.Equal(t, tt.split.ProposedShards, split2.ProposedShards)
|
||||
assert.Equal(t, tt.split.FrameNumber, split2.FrameNumber)
|
||||
require.NotNil(t, split2.PublicKeySignatureBls48581)
|
||||
assert.Equal(t, tt.split.PublicKeySignatureBls48581.Signature, split2.PublicKeySignatureBls48581.Signature)
|
||||
assert.Equal(t, tt.split.PublicKeySignatureBls48581.Address, split2.PublicKeySignatureBls48581.Address)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestShardMerge_Serialization(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
merge *ShardMerge
|
||||
}{
|
||||
{
|
||||
name: "complete shard merge",
|
||||
merge: &ShardMerge{
|
||||
ShardAddresses: [][]byte{make([]byte, 33), make([]byte, 33)},
|
||||
ParentAddress: make([]byte, 32),
|
||||
FrameNumber: 12345,
|
||||
PublicKeySignatureBls48581: &BLS48581AddressedSignature{
|
||||
Signature: make([]byte, 74),
|
||||
Address: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "merge with max shard addresses",
|
||||
merge: &ShardMerge{
|
||||
ShardAddresses: [][]byte{
|
||||
append([]byte{0x01}, make([]byte, 32)...),
|
||||
append([]byte{0x02}, make([]byte, 32)...),
|
||||
append([]byte{0x03}, make([]byte, 32)...),
|
||||
append([]byte{0x04}, make([]byte, 32)...),
|
||||
},
|
||||
ParentAddress: append([]byte{0xFF}, make([]byte, 31)...),
|
||||
FrameNumber: 77777,
|
||||
PublicKeySignatureBls48581: &BLS48581AddressedSignature{
|
||||
Signature: append([]byte{0xDD}, make([]byte, 73)...),
|
||||
Address: append([]byte{0xFF}, make([]byte, 31)...),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
data, err := tt.merge.ToCanonicalBytes()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, data)
|
||||
|
||||
merge2 := &ShardMerge{}
|
||||
err = merge2.FromCanonicalBytes(data)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, tt.merge.ShardAddresses, merge2.ShardAddresses)
|
||||
assert.Equal(t, tt.merge.ParentAddress, merge2.ParentAddress)
|
||||
assert.Equal(t, tt.merge.FrameNumber, merge2.FrameNumber)
|
||||
require.NotNil(t, merge2.PublicKeySignatureBls48581)
|
||||
assert.Equal(t, tt.merge.PublicKeySignatureBls48581.Signature, merge2.PublicKeySignatureBls48581.Signature)
|
||||
assert.Equal(t, tt.merge.PublicKeySignatureBls48581.Address, merge2.PublicKeySignatureBls48581.Address)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestShardSplit_Validate(t *testing.T) {
|
||||
t.Run("valid split passes", func(t *testing.T) {
|
||||
parent := make([]byte, 33)
|
||||
split := &ShardSplit{
|
||||
ShardAddress: parent,
|
||||
ProposedShards: [][]byte{append(parent, 0x00), append(parent, 0x01)},
|
||||
FrameNumber: 100,
|
||||
PublicKeySignatureBls48581: &BLS48581AddressedSignature{
|
||||
Signature: make([]byte, 74),
|
||||
Address: make([]byte, 32),
|
||||
},
|
||||
}
|
||||
err := split.Validate()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("shard address too short", func(t *testing.T) {
|
||||
split := &ShardSplit{
|
||||
ShardAddress: make([]byte, 31),
|
||||
ProposedShards: [][]byte{make([]byte, 33), make([]byte, 33)},
|
||||
}
|
||||
err := split.Validate()
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("too few proposed shards", func(t *testing.T) {
|
||||
split := &ShardSplit{
|
||||
ShardAddress: make([]byte, 33),
|
||||
ProposedShards: [][]byte{make([]byte, 34)},
|
||||
}
|
||||
err := split.Validate()
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("nil signature", func(t *testing.T) {
|
||||
parent := make([]byte, 33)
|
||||
split := &ShardSplit{
|
||||
ShardAddress: parent,
|
||||
ProposedShards: [][]byte{append(parent, 0x00), append(parent, 0x01)},
|
||||
FrameNumber: 100,
|
||||
PublicKeySignatureBls48581: nil,
|
||||
}
|
||||
err := split.Validate()
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestShardMerge_Validate(t *testing.T) {
|
||||
t.Run("valid merge passes", func(t *testing.T) {
|
||||
parent := make([]byte, 32)
|
||||
merge := &ShardMerge{
|
||||
ShardAddresses: [][]byte{append(parent, 0x00), append(parent, 0x01)},
|
||||
ParentAddress: parent,
|
||||
FrameNumber: 100,
|
||||
PublicKeySignatureBls48581: &BLS48581AddressedSignature{
|
||||
Signature: make([]byte, 74),
|
||||
Address: make([]byte, 32),
|
||||
},
|
||||
}
|
||||
err := merge.Validate()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("too few shard addresses", func(t *testing.T) {
|
||||
merge := &ShardMerge{
|
||||
ShardAddresses: [][]byte{make([]byte, 33)},
|
||||
ParentAddress: make([]byte, 32),
|
||||
}
|
||||
err := merge.Validate()
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("parent address wrong length", func(t *testing.T) {
|
||||
merge := &ShardMerge{
|
||||
ShardAddresses: [][]byte{make([]byte, 33), make([]byte, 33)},
|
||||
ParentAddress: make([]byte, 31),
|
||||
}
|
||||
err := merge.Validate()
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("base shard rejected", func(t *testing.T) {
|
||||
parent := make([]byte, 32)
|
||||
merge := &ShardMerge{
|
||||
ShardAddresses: [][]byte{make([]byte, 32), append(parent, 0x01)},
|
||||
ParentAddress: parent,
|
||||
}
|
||||
err := merge.Validate()
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("nil signature", func(t *testing.T) {
|
||||
parent := make([]byte, 32)
|
||||
merge := &ShardMerge{
|
||||
ShardAddresses: [][]byte{append(parent, 0x00), append(parent, 0x01)},
|
||||
ParentAddress: parent,
|
||||
FrameNumber: 100,
|
||||
PublicKeySignatureBls48581: nil,
|
||||
}
|
||||
err := merge.Validate()
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMessageRequest_ShardSplit_Serialization(t *testing.T) {
|
||||
parent := make([]byte, 33)
|
||||
req := &MessageRequest{
|
||||
Request: &MessageRequest_ShardSplit{
|
||||
ShardSplit: &ShardSplit{
|
||||
ShardAddress: parent,
|
||||
ProposedShards: [][]byte{append(parent, 0x00), append(parent, 0x01)},
|
||||
FrameNumber: 12345,
|
||||
PublicKeySignatureBls48581: &BLS48581AddressedSignature{
|
||||
Signature: make([]byte, 74),
|
||||
Address: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
data, err := req.ToCanonicalBytes()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, data)
|
||||
|
||||
req2 := &MessageRequest{}
|
||||
err = req2.FromCanonicalBytes(data)
|
||||
require.NoError(t, err)
|
||||
|
||||
splitReq, ok := req2.Request.(*MessageRequest_ShardSplit)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, parent, splitReq.ShardSplit.ShardAddress)
|
||||
assert.Equal(t, uint64(12345), splitReq.ShardSplit.FrameNumber)
|
||||
assert.Len(t, splitReq.ShardSplit.ProposedShards, 2)
|
||||
}
|
||||
|
||||
func TestMessageRequest_ShardMerge_Serialization(t *testing.T) {
|
||||
parent := make([]byte, 32)
|
||||
req := &MessageRequest{
|
||||
Request: &MessageRequest_ShardMerge{
|
||||
ShardMerge: &ShardMerge{
|
||||
ShardAddresses: [][]byte{append(parent, 0x00), append(parent, 0x01)},
|
||||
ParentAddress: parent,
|
||||
FrameNumber: 67890,
|
||||
PublicKeySignatureBls48581: &BLS48581AddressedSignature{
|
||||
Signature: make([]byte, 74),
|
||||
Address: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
data, err := req.ToCanonicalBytes()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, data)
|
||||
|
||||
req2 := &MessageRequest{}
|
||||
err = req2.FromCanonicalBytes(data)
|
||||
require.NoError(t, err)
|
||||
|
||||
mergeReq, ok := req2.Request.(*MessageRequest_ShardMerge)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, parent, mergeReq.ShardMerge.ParentAddress)
|
||||
assert.Equal(t, uint64(67890), mergeReq.ShardMerge.FrameNumber)
|
||||
assert.Len(t, mergeReq.ShardMerge.ShardAddresses, 2)
|
||||
}
|
||||
|
||||
func TestProverLivenessCheck_Serialization(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@ -116,6 +116,7 @@ func (s *ShardMergeEventData) ControlEventData() {}
|
||||
// BulkShardMergeEventData contains all merge-eligible shard groups in a single event
|
||||
type BulkShardMergeEventData struct {
|
||||
MergeGroups []ShardMergeEventData
|
||||
FrameProver []byte
|
||||
}
|
||||
|
||||
func (b *BulkShardMergeEventData) ControlEventData() {}
|
||||
@ -126,6 +127,7 @@ type ShardSplitEventData struct {
|
||||
ProverCount int
|
||||
AttestedStorage uint64
|
||||
ProposedShards [][]byte
|
||||
FrameProver []byte
|
||||
}
|
||||
|
||||
func (s *ShardSplitEventData) ControlEventData() {}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user