mirror of
https://github.com/QuilibriumNetwork/ceremonyclient.git
synced 2026-02-21 10:27:26 +08:00
parent
7b923b91c4
commit
3f516b04fd
4
RELEASE-NOTES
Normal file
4
RELEASE-NOTES
Normal file
@ -0,0 +1,4 @@
|
||||
# 2.1.0.13
|
||||
- Extends ProverConfirm and ProverReject to have multiple filters per message
|
||||
- Adds snapshot integration to allow hypersync to occur concurrently with writes
|
||||
- Resolved infinitessimal rings divide-by-zero error
|
||||
@ -43,7 +43,7 @@ func FormatVersion(version []byte) string {
|
||||
}
|
||||
|
||||
func GetPatchNumber() byte {
|
||||
return 0x0c
|
||||
return 0x0d
|
||||
}
|
||||
|
||||
func GetRCNumber() byte {
|
||||
|
||||
@ -47,6 +47,9 @@ type HypergraphCRDT struct {
|
||||
// handles locking scenarios for transactions
|
||||
syncController *hypergraph.SyncController
|
||||
mu sync.RWMutex
|
||||
setsMu sync.RWMutex
|
||||
prefixMu sync.RWMutex
|
||||
snapshotMgr *snapshotManager
|
||||
|
||||
// provides context-driven info for client identification
|
||||
authenticationProvider channel.AuthenticationProvider
|
||||
@ -65,7 +68,7 @@ func NewHypergraph(
|
||||
coveredPrefix []int,
|
||||
authenticationProvider channel.AuthenticationProvider,
|
||||
) *HypergraphCRDT {
|
||||
return &HypergraphCRDT{
|
||||
hg := &HypergraphCRDT{
|
||||
logger: logger,
|
||||
size: big.NewInt(0),
|
||||
vertexAdds: make(map[tries.ShardKey]hypergraph.IdSet),
|
||||
@ -77,6 +80,108 @@ func NewHypergraph(
|
||||
coveredPrefix: coveredPrefix,
|
||||
authenticationProvider: authenticationProvider,
|
||||
syncController: hypergraph.NewSyncController(),
|
||||
snapshotMgr: newSnapshotManager(logger),
|
||||
}
|
||||
|
||||
hg.publishSnapshot(nil)
|
||||
return hg
|
||||
}
|
||||
|
||||
func (hg *HypergraphCRDT) publishSnapshot(root []byte) {
|
||||
if hg.store == nil || hg.snapshotMgr == nil {
|
||||
return
|
||||
}
|
||||
hg.logger.Debug("publishing snapshot")
|
||||
|
||||
snapshotStore, release, err := hg.store.NewSnapshot()
|
||||
if err != nil {
|
||||
hg.logger.Warn("unable to create hypergraph snapshot", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
hg.snapshotMgr.publish(snapshotStore, release, root)
|
||||
}
|
||||
|
||||
func (hg *HypergraphCRDT) cloneSetWithStore(
|
||||
set hypergraph.IdSet,
|
||||
store tries.TreeBackingStore,
|
||||
) hypergraph.IdSet {
|
||||
if store == nil {
|
||||
return set
|
||||
}
|
||||
|
||||
if typed, ok := set.(*idSet); ok {
|
||||
return typed.cloneWithStore(store)
|
||||
}
|
||||
return set
|
||||
}
|
||||
|
||||
func (hg *HypergraphCRDT) snapshotSet(
|
||||
shardKey tries.ShardKey,
|
||||
targetStore tries.TreeBackingStore,
|
||||
setMap map[tries.ShardKey]hypergraph.IdSet,
|
||||
atomType hypergraph.AtomType,
|
||||
phaseType hypergraph.PhaseType,
|
||||
) hypergraph.IdSet {
|
||||
hg.setsMu.RLock()
|
||||
set := setMap[shardKey]
|
||||
hg.setsMu.RUnlock()
|
||||
|
||||
if set == nil {
|
||||
set = NewIdSet(
|
||||
atomType,
|
||||
phaseType,
|
||||
shardKey,
|
||||
hg.store,
|
||||
hg.prover,
|
||||
nil,
|
||||
hg.getCoveredPrefix(),
|
||||
)
|
||||
}
|
||||
|
||||
return hg.cloneSetWithStore(set, targetStore)
|
||||
}
|
||||
|
||||
func (hg *HypergraphCRDT) snapshotPhaseSet(
|
||||
shardKey tries.ShardKey,
|
||||
phaseSet protobufs.HypergraphPhaseSet,
|
||||
targetStore tries.TreeBackingStore,
|
||||
) hypergraph.IdSet {
|
||||
switch phaseSet {
|
||||
case protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_ADDS:
|
||||
return hg.snapshotSet(
|
||||
shardKey,
|
||||
targetStore,
|
||||
hg.vertexAdds,
|
||||
hypergraph.VertexAtomType,
|
||||
hypergraph.AddsPhaseType,
|
||||
)
|
||||
case protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_REMOVES:
|
||||
return hg.snapshotSet(
|
||||
shardKey,
|
||||
targetStore,
|
||||
hg.vertexRemoves,
|
||||
hypergraph.VertexAtomType,
|
||||
hypergraph.RemovesPhaseType,
|
||||
)
|
||||
case protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_HYPEREDGE_ADDS:
|
||||
return hg.snapshotSet(
|
||||
shardKey,
|
||||
targetStore,
|
||||
hg.hyperedgeAdds,
|
||||
hypergraph.HyperedgeAtomType,
|
||||
hypergraph.AddsPhaseType,
|
||||
)
|
||||
case protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_HYPEREDGE_REMOVES:
|
||||
return hg.snapshotSet(
|
||||
shardKey,
|
||||
targetStore,
|
||||
hg.hyperedgeRemoves,
|
||||
hypergraph.HyperedgeAtomType,
|
||||
hypergraph.RemovesPhaseType,
|
||||
)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -139,20 +244,28 @@ func (hg *HypergraphCRDT) ImportTree(
|
||||
case hypergraph.VertexAtomType:
|
||||
switch phaseType {
|
||||
case hypergraph.AddsPhaseType:
|
||||
hg.setsMu.Lock()
|
||||
hg.size.Add(hg.size, treeSize)
|
||||
hg.vertexAdds[shardKey] = set
|
||||
hg.setsMu.Unlock()
|
||||
case hypergraph.RemovesPhaseType:
|
||||
hg.setsMu.Lock()
|
||||
hg.size.Sub(hg.size, treeSize)
|
||||
hg.vertexRemoves[shardKey] = set
|
||||
hg.setsMu.Unlock()
|
||||
}
|
||||
case hypergraph.HyperedgeAtomType:
|
||||
switch phaseType {
|
||||
case hypergraph.AddsPhaseType:
|
||||
hg.setsMu.Lock()
|
||||
hg.size.Add(hg.size, treeSize)
|
||||
hg.hyperedgeAdds[shardKey] = set
|
||||
hg.setsMu.Unlock()
|
||||
case hypergraph.RemovesPhaseType:
|
||||
hg.setsMu.Lock()
|
||||
hg.size.Sub(hg.size, treeSize)
|
||||
hg.hyperedgeRemoves[shardKey] = set
|
||||
hg.setsMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
@ -542,19 +655,20 @@ func (hg *HypergraphCRDT) GetMetadataAtKey(pathKey []byte) (
|
||||
L1: [3]byte(l1),
|
||||
L2: [32]byte(pathKey[:32]),
|
||||
}
|
||||
coveredPrefix := hg.getCoveredPrefix()
|
||||
vertexAdds, vertexRemoves := hg.getOrCreateIdSet(
|
||||
shardKey,
|
||||
hg.vertexAdds,
|
||||
hg.vertexRemoves,
|
||||
hypergraph.VertexAtomType,
|
||||
hg.coveredPrefix,
|
||||
coveredPrefix,
|
||||
)
|
||||
hyperedgeAdds, hyperedgeRemoves := hg.getOrCreateIdSet(
|
||||
shardKey,
|
||||
hg.hyperedgeAdds,
|
||||
hg.hyperedgeRemoves,
|
||||
hypergraph.HyperedgeAtomType,
|
||||
hg.coveredPrefix,
|
||||
coveredPrefix,
|
||||
)
|
||||
|
||||
metadata := []hypergraph.ShardMetadata{}
|
||||
|
||||
@ -136,38 +136,58 @@ func (set *idSet) Has(key [64]byte) bool {
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (set *idSet) cloneWithStore(
|
||||
store tries.TreeBackingStore,
|
||||
) *idSet {
|
||||
if set == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &idSet{
|
||||
dirty: set.dirty,
|
||||
atomType: set.atomType,
|
||||
tree: set.tree.CloneWithStore(store),
|
||||
validator: set.validator,
|
||||
}
|
||||
}
|
||||
|
||||
func (hg *HypergraphCRDT) GetCoveredPrefix() ([]int, error) {
|
||||
hg.mu.RLock()
|
||||
defer hg.mu.RUnlock()
|
||||
return hg.getCoveredPrefix(), nil
|
||||
hg.prefixMu.RLock()
|
||||
defer hg.prefixMu.RUnlock()
|
||||
return slices.Clone(hg.coveredPrefix), nil
|
||||
}
|
||||
|
||||
func (hg *HypergraphCRDT) getCoveredPrefix() []int {
|
||||
hg.prefixMu.RLock()
|
||||
defer hg.prefixMu.RUnlock()
|
||||
return slices.Clone(hg.coveredPrefix)
|
||||
}
|
||||
|
||||
func (hg *HypergraphCRDT) SetCoveredPrefix(prefix []int) error {
|
||||
hg.mu.Lock()
|
||||
defer hg.mu.Unlock()
|
||||
hg.coveredPrefix = slices.Clone(prefix)
|
||||
prefixCopy := slices.Clone(prefix)
|
||||
hg.prefixMu.Lock()
|
||||
hg.coveredPrefix = prefixCopy
|
||||
hg.prefixMu.Unlock()
|
||||
|
||||
hg.setsMu.Lock()
|
||||
for _, s := range hg.hyperedgeAdds {
|
||||
s.GetTree().CoveredPrefix = prefix
|
||||
s.GetTree().CoveredPrefix = prefixCopy
|
||||
}
|
||||
|
||||
for _, s := range hg.hyperedgeRemoves {
|
||||
s.GetTree().CoveredPrefix = prefix
|
||||
s.GetTree().CoveredPrefix = prefixCopy
|
||||
}
|
||||
|
||||
for _, s := range hg.vertexAdds {
|
||||
s.GetTree().CoveredPrefix = prefix
|
||||
s.GetTree().CoveredPrefix = prefixCopy
|
||||
}
|
||||
|
||||
for _, s := range hg.vertexRemoves {
|
||||
s.GetTree().CoveredPrefix = prefix
|
||||
s.GetTree().CoveredPrefix = prefixCopy
|
||||
}
|
||||
hg.setsMu.Unlock()
|
||||
|
||||
return hg.store.SetCoveredPrefix(prefix)
|
||||
return hg.store.SetCoveredPrefix(prefixCopy)
|
||||
}
|
||||
|
||||
// GetVertexAddsSet returns a specific vertex addition set by shard key.
|
||||
@ -283,6 +303,8 @@ func (hg *HypergraphCRDT) getOrCreateIdSet(
|
||||
atomType hypergraph.AtomType,
|
||||
coveredPrefix []int,
|
||||
) (hypergraph.IdSet, hypergraph.IdSet) {
|
||||
hg.setsMu.Lock()
|
||||
defer hg.setsMu.Unlock()
|
||||
if _, ok := addMap[shardAddr]; !ok {
|
||||
addMap[shardAddr] = NewIdSet(
|
||||
atomType,
|
||||
|
||||
@ -1,7 +1,10 @@
|
||||
package hypergraph
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@ -215,8 +218,11 @@ func (hg *HypergraphCRDT) Commit(
|
||||
return nil, errors.Wrap(err, "commit shard")
|
||||
}
|
||||
|
||||
snapshotRoot := snapshotRootDigest(commits)
|
||||
|
||||
// Update metrics
|
||||
CommitTotal.WithLabelValues("success").Inc()
|
||||
hg.publishSnapshot(snapshotRoot)
|
||||
|
||||
// Update shard count gauges
|
||||
VertexAddsShards.Set(float64(len(hg.vertexAdds)))
|
||||
@ -233,6 +239,46 @@ func (hg *HypergraphCRDT) Commit(
|
||||
return commits, nil
|
||||
}
|
||||
|
||||
func snapshotRootDigest(commits map[tries.ShardKey][][]byte) []byte {
|
||||
hasher := sha256.New()
|
||||
var zero [64]byte
|
||||
|
||||
if len(commits) == 0 {
|
||||
return hasher.Sum(nil)
|
||||
}
|
||||
|
||||
keys := make([]tries.ShardKey, 0, len(commits))
|
||||
for k := range commits {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Slice(keys, func(i, j int) bool {
|
||||
if cmp := bytes.Compare(keys[i].L1[:], keys[j].L1[:]); cmp != 0 {
|
||||
return cmp < 0
|
||||
}
|
||||
return bytes.Compare(keys[i].L2[:], keys[j].L2[:]) < 0
|
||||
})
|
||||
|
||||
for _, key := range keys {
|
||||
hasher.Write(key.L1[:])
|
||||
hasher.Write(key.L2[:])
|
||||
|
||||
roots := commits[key]
|
||||
for phase := 0; phase < 4; phase++ {
|
||||
var root []byte
|
||||
if phase < len(roots) {
|
||||
root = roots[phase]
|
||||
}
|
||||
if len(root) != len(zero) {
|
||||
hasher.Write(zero[:])
|
||||
} else {
|
||||
hasher.Write(root)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return hasher.Sum(nil)
|
||||
}
|
||||
|
||||
// Commit calculates the sub-scoped vector commitments of each phase set and
|
||||
// returns the roots of each.
|
||||
func (hg *HypergraphCRDT) CommitShard(
|
||||
|
||||
116
hypergraph/snapshot_manager.go
Normal file
116
hypergraph/snapshot_manager.go
Normal file
@ -0,0 +1,116 @@
|
||||
package hypergraph
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/tries"
|
||||
)
|
||||
|
||||
type snapshotHandle struct {
|
||||
store tries.TreeBackingStore
|
||||
release func()
|
||||
refs atomic.Int32
|
||||
root []byte
|
||||
}
|
||||
|
||||
func newSnapshotHandle(
|
||||
store tries.TreeBackingStore,
|
||||
release func(),
|
||||
root []byte,
|
||||
) *snapshotHandle {
|
||||
h := &snapshotHandle{
|
||||
store: store,
|
||||
release: release,
|
||||
}
|
||||
if len(root) != 0 {
|
||||
h.root = append([]byte{}, root...)
|
||||
}
|
||||
h.refs.Store(1)
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *snapshotHandle) acquire() tries.TreeBackingStore {
|
||||
h.refs.Add(1)
|
||||
return h.store
|
||||
}
|
||||
|
||||
func (h *snapshotHandle) releaseRef(logger *zap.Logger) {
|
||||
if h == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if h.refs.Add(-1) == 0 && h.release != nil {
|
||||
if err := safeRelease(h.release); err != nil {
|
||||
logger.Warn("failed to release hypergraph snapshot", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *snapshotHandle) Store() tries.TreeBackingStore {
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
return h.store
|
||||
}
|
||||
|
||||
func (h *snapshotHandle) Root() []byte {
|
||||
if h == nil || len(h.root) == 0 {
|
||||
return nil
|
||||
}
|
||||
return append([]byte{}, h.root...)
|
||||
}
|
||||
|
||||
type snapshotManager struct {
|
||||
logger *zap.Logger
|
||||
current atomic.Pointer[snapshotHandle]
|
||||
}
|
||||
|
||||
func newSnapshotManager(logger *zap.Logger) *snapshotManager {
|
||||
return &snapshotManager{logger: logger}
|
||||
}
|
||||
|
||||
func (m *snapshotManager) publish(
|
||||
store tries.TreeBackingStore,
|
||||
release func(),
|
||||
root []byte,
|
||||
) {
|
||||
handle := newSnapshotHandle(store, release, root)
|
||||
prev := m.current.Swap(handle)
|
||||
if prev != nil {
|
||||
prev.releaseRef(m.logger)
|
||||
}
|
||||
rootHex := ""
|
||||
if len(root) != 0 {
|
||||
rootHex = hex.EncodeToString(root)
|
||||
}
|
||||
m.logger.Debug("swapped snapshot", zap.String("root", rootHex))
|
||||
}
|
||||
|
||||
func (m *snapshotManager) acquire() *snapshotHandle {
|
||||
handle := m.current.Load()
|
||||
if handle == nil {
|
||||
return nil
|
||||
}
|
||||
handle.acquire()
|
||||
return handle
|
||||
}
|
||||
|
||||
func (m *snapshotManager) release(handle *snapshotHandle) {
|
||||
if handle == nil {
|
||||
return
|
||||
}
|
||||
handle.releaseRef(m.logger)
|
||||
}
|
||||
|
||||
func safeRelease(fn func()) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf("panic releasing snapshot: %v", r)
|
||||
}
|
||||
}()
|
||||
fn()
|
||||
return nil
|
||||
}
|
||||
@ -22,30 +22,43 @@ import (
|
||||
func (hg *HypergraphCRDT) HyperStream(
|
||||
stream protobufs.HypergraphComparisonService_HyperStreamServer,
|
||||
) error {
|
||||
if !hg.syncController.TryEstablishSyncSession() {
|
||||
return errors.New("unavailable")
|
||||
}
|
||||
|
||||
hg.mu.Lock()
|
||||
defer hg.mu.Unlock()
|
||||
defer hg.syncController.EndSyncSession()
|
||||
|
||||
peerId, err := hg.authenticationProvider.Identify(stream.Context())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "hyper stream")
|
||||
}
|
||||
|
||||
status, ok := hg.syncController.SyncStatus[peerId.String()]
|
||||
if ok && time.Since(status.LastSynced) < 10*time.Second {
|
||||
return errors.New("peer too recently synced")
|
||||
peerKey := peerId.String()
|
||||
if !hg.syncController.TryEstablishSyncSession(peerKey) {
|
||||
return errors.New("peer already syncing")
|
||||
}
|
||||
defer func() {
|
||||
hg.syncController.EndSyncSession(peerKey)
|
||||
}()
|
||||
|
||||
handle := hg.snapshotMgr.acquire()
|
||||
if handle == nil {
|
||||
return errors.New("hypergraph snapshot unavailable")
|
||||
}
|
||||
defer hg.snapshotMgr.release(handle)
|
||||
|
||||
root := handle.Root()
|
||||
if len(root) != 0 {
|
||||
hg.logger.Debug(
|
||||
"acquired snapshot",
|
||||
zap.String("root", hex.EncodeToString(root)),
|
||||
)
|
||||
} else {
|
||||
hg.logger.Debug("acquired snapshot", zap.String("root", ""))
|
||||
}
|
||||
|
||||
err = hg.syncTreeServer(stream)
|
||||
snapshotStore := handle.Store()
|
||||
|
||||
hg.syncController.SyncStatus[peerId.String()] = &hypergraph.SyncInfo{
|
||||
err = hg.syncTreeServer(stream, snapshotStore, root)
|
||||
|
||||
hg.syncController.SetStatus(peerKey, &hypergraph.SyncInfo{
|
||||
Unreachable: false,
|
||||
LastSynced: time.Now(),
|
||||
}
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
@ -59,13 +72,16 @@ func (hg *HypergraphCRDT) Sync(
|
||||
shardKey tries.ShardKey,
|
||||
phaseSet protobufs.HypergraphPhaseSet,
|
||||
) error {
|
||||
if !hg.syncController.TryEstablishSyncSession() {
|
||||
return errors.New("unavailable")
|
||||
const localSyncKey = "local-sync"
|
||||
if !hg.syncController.TryEstablishSyncSession(localSyncKey) {
|
||||
return errors.New("local sync already in progress")
|
||||
}
|
||||
defer func() {
|
||||
hg.syncController.EndSyncSession(localSyncKey)
|
||||
}()
|
||||
|
||||
hg.mu.Lock()
|
||||
defer hg.mu.Unlock()
|
||||
defer hg.syncController.EndSyncSession()
|
||||
|
||||
hg.logger.Info(
|
||||
"sending initialization message",
|
||||
@ -76,7 +92,6 @@ func (hg *HypergraphCRDT) Sync(
|
||||
zap.Int("phase_set", int(phaseSet)),
|
||||
)
|
||||
|
||||
// Get the appropriate id set
|
||||
var set hypergraph.IdSet
|
||||
switch phaseSet {
|
||||
case protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_ADDS:
|
||||
@ -87,9 +102,11 @@ func (hg *HypergraphCRDT) Sync(
|
||||
set = hg.getHyperedgeAddsSet(shardKey)
|
||||
case protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_HYPEREDGE_REMOVES:
|
||||
set = hg.getHyperedgeRemovesSet(shardKey)
|
||||
default:
|
||||
return errors.New("unsupported phase set")
|
||||
}
|
||||
|
||||
path := hg.coveredPrefix
|
||||
path := hg.getCoveredPrefix()
|
||||
|
||||
// Send initial query for path
|
||||
if err := stream.Send(&protobufs.HypergraphComparison{
|
||||
@ -106,8 +123,10 @@ func (hg *HypergraphCRDT) Sync(
|
||||
return err
|
||||
}
|
||||
|
||||
// hg.logger.Debug("server waiting for initial query")
|
||||
msg, err := stream.Recv()
|
||||
if err != nil {
|
||||
hg.logger.Info("initial recv failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
response := msg.GetResponse()
|
||||
@ -158,7 +177,7 @@ func (hg *HypergraphCRDT) Sync(
|
||||
for {
|
||||
msg, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
hg.logger.Debug("stream closed by sender")
|
||||
// hg.logger.Debug("stream closed by sender")
|
||||
cancel()
|
||||
close(incomingQueriesIn)
|
||||
close(incomingResponsesIn)
|
||||
@ -166,7 +185,7 @@ func (hg *HypergraphCRDT) Sync(
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
hg.logger.Debug("error from stream", zap.Error(err))
|
||||
// hg.logger.Debug("error from stream", zap.Error(err))
|
||||
cancel()
|
||||
close(incomingQueriesIn)
|
||||
close(incomingResponsesIn)
|
||||
@ -216,7 +235,7 @@ func (hg *HypergraphCRDT) Sync(
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
hg.logger.Error("error while syncing", zap.Error(err))
|
||||
hg.logger.Debug("error while syncing", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
@ -462,10 +481,10 @@ func (s *streamManager) sendLeafData(
|
||||
},
|
||||
}
|
||||
|
||||
s.logger.Info(
|
||||
"sending leaf data",
|
||||
zap.String("key", hex.EncodeToString(leaf.Key)),
|
||||
)
|
||||
// s.logger.Info(
|
||||
// "sending leaf data",
|
||||
// zap.String("key", hex.EncodeToString(leaf.Key)),
|
||||
// )
|
||||
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
@ -507,7 +526,7 @@ func (s *streamManager) sendLeafData(
|
||||
}
|
||||
|
||||
if node == nil {
|
||||
s.logger.Info("no node, sending 0 leaves")
|
||||
// s.logger.Info("no node, sending 0 leaves")
|
||||
if err := s.stream.Send(&protobufs.HypergraphComparison{
|
||||
Payload: &protobufs.HypergraphComparison_Metadata{
|
||||
Metadata: &protobufs.HypersyncMetadata{Leaves: 0},
|
||||
@ -533,7 +552,7 @@ func (s *streamManager) sendLeafData(
|
||||
}
|
||||
count++
|
||||
}
|
||||
s.logger.Info("sending set of leaves", zap.Uint64("leaf_count", count))
|
||||
// s.logger.Info("sending set of leaves", zap.Uint64("leaf_count", count))
|
||||
if err := s.stream.Send(&protobufs.HypergraphComparison{
|
||||
Payload: &protobufs.HypergraphComparison_Metadata{
|
||||
Metadata: &protobufs.HypersyncMetadata{Leaves: count},
|
||||
@ -552,7 +571,7 @@ func (s *streamManager) sendLeafData(
|
||||
}
|
||||
} else {
|
||||
count = 1
|
||||
s.logger.Info("sending one leaf", zap.Uint64("leaf_count", count))
|
||||
// s.logger.Info("sending one leaf", zap.Uint64("leaf_count", count))
|
||||
if err := s.stream.Send(&protobufs.HypergraphComparison{
|
||||
Payload: &protobufs.HypergraphComparison_Metadata{
|
||||
Metadata: &protobufs.HypersyncMetadata{Leaves: count},
|
||||
@ -708,22 +727,20 @@ func getBranchInfoFromTree(
|
||||
for _, p := range path {
|
||||
intpath = append(intpath, int(p))
|
||||
}
|
||||
commitment := node.Commit(
|
||||
tree.InclusionProver,
|
||||
nil,
|
||||
tree.SetType,
|
||||
tree.PhaseType,
|
||||
tree.ShardKey,
|
||||
intpath,
|
||||
false,
|
||||
)
|
||||
|
||||
node = ensureCommittedNode(logger, tree, intpath, node)
|
||||
|
||||
branchInfo := &protobufs.HypergraphComparisonResponse{
|
||||
Path: path,
|
||||
Commitment: commitment,
|
||||
IsRoot: len(path) == 0,
|
||||
Path: path,
|
||||
IsRoot: len(path) == 0,
|
||||
}
|
||||
|
||||
if branch, ok := node.(*tries.LazyVectorCommitmentBranchNode); ok {
|
||||
branchInfo.Commitment = branch.Commitment
|
||||
if len(branch.Commitment) == 0 {
|
||||
panic("branch cannot have no commitment")
|
||||
}
|
||||
|
||||
for _, p := range branch.Prefix {
|
||||
branchInfo.Path = append(branchInfo.Path, int32(p))
|
||||
}
|
||||
@ -741,16 +758,21 @@ func getBranchInfoFromTree(
|
||||
logger.Panic("failed to get node by path", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
childPath := slices.Concat(branch.FullPrefix, []int{i})
|
||||
child = ensureCommittedNode(logger, tree, childPath, child)
|
||||
|
||||
if child != nil {
|
||||
childCommit := child.Commit(
|
||||
tree.InclusionProver,
|
||||
nil,
|
||||
tree.SetType,
|
||||
tree.PhaseType,
|
||||
tree.ShardKey,
|
||||
slices.Concat(branch.FullPrefix, []int{i}),
|
||||
false,
|
||||
)
|
||||
var childCommit []byte
|
||||
if childB, ok := child.(*tries.LazyVectorCommitmentBranchNode); ok {
|
||||
childCommit = childB.Commitment
|
||||
} else if childL, ok := child.(*tries.LazyVectorCommitmentLeafNode); ok {
|
||||
childCommit = childL.Commitment
|
||||
}
|
||||
|
||||
if len(childCommit) == 0 {
|
||||
panic("cannot have non-committed child")
|
||||
}
|
||||
branchInfo.Children = append(
|
||||
branchInfo.Children,
|
||||
&protobufs.BranchChild{
|
||||
@ -760,10 +782,58 @@ func getBranchInfoFromTree(
|
||||
)
|
||||
}
|
||||
}
|
||||
} else if leaf, ok := node.(*tries.LazyVectorCommitmentLeafNode); ok {
|
||||
branchInfo.Commitment = leaf.Commitment
|
||||
if len(branchInfo.Commitment) == 0 {
|
||||
panic("leaf cannot have no commitment")
|
||||
}
|
||||
}
|
||||
return branchInfo, nil
|
||||
}
|
||||
|
||||
func ensureCommittedNode(
|
||||
logger *zap.Logger,
|
||||
tree *tries.LazyVectorCommitmentTree,
|
||||
path []int,
|
||||
node tries.LazyVectorCommitmentNode,
|
||||
) tries.LazyVectorCommitmentNode {
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
hasCommit := func(commitment []byte) bool {
|
||||
return len(commitment) != 0
|
||||
}
|
||||
|
||||
switch n := node.(type) {
|
||||
case *tries.LazyVectorCommitmentBranchNode:
|
||||
if hasCommit(n.Commitment) {
|
||||
return node
|
||||
}
|
||||
case *tries.LazyVectorCommitmentLeafNode:
|
||||
if hasCommit(n.Commitment) {
|
||||
return node
|
||||
}
|
||||
default:
|
||||
return node
|
||||
}
|
||||
|
||||
reloaded, err := tree.Store.GetNodeByPath(
|
||||
tree.SetType,
|
||||
tree.PhaseType,
|
||||
tree.ShardKey,
|
||||
path,
|
||||
)
|
||||
if err != nil && !strings.Contains(err.Error(), "item not found") {
|
||||
logger.Panic("failed to reload node by path", zap.Error(err))
|
||||
}
|
||||
if reloaded != nil {
|
||||
return reloaded
|
||||
}
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
// isLeaf infers whether a HypergraphComparisonResponse message represents a
|
||||
// leaf node.
|
||||
func isLeaf(info *protobufs.HypergraphComparisonResponse) bool {
|
||||
@ -847,7 +917,7 @@ func (s *streamManager) handleLeafData(
|
||||
)
|
||||
}
|
||||
|
||||
s.logger.Info("expecting leaves", zap.Uint64("count", expectedLeaves))
|
||||
// s.logger.Info("expecting leaves", zap.Uint64("count", expectedLeaves))
|
||||
|
||||
var txn tries.TreeBackingStoreTransaction
|
||||
var err error
|
||||
@ -894,10 +964,10 @@ func (s *streamManager) handleLeafData(
|
||||
remoteUpdate = msg.GetLeafData()
|
||||
}
|
||||
|
||||
s.logger.Info(
|
||||
"received leaf data",
|
||||
zap.String("key", hex.EncodeToString(remoteUpdate.Key)),
|
||||
)
|
||||
// s.logger.Info(
|
||||
// "received leaf data",
|
||||
// zap.String("key", hex.EncodeToString(remoteUpdate.Key)),
|
||||
// )
|
||||
|
||||
theirs := AtomFromBytes(remoteUpdate.Value)
|
||||
if len(remoteUpdate.UnderlyingData) != 0 {
|
||||
@ -1105,11 +1175,11 @@ func (s *streamManager) walk(
|
||||
pathString := zap.String("path", hex.EncodeToString(packPath(path)))
|
||||
|
||||
if bytes.Equal(lnode.Commitment, rnode.Commitment) {
|
||||
s.logger.Info(
|
||||
"commitments match",
|
||||
pathString,
|
||||
zap.String("commitment", hex.EncodeToString(lnode.Commitment)),
|
||||
)
|
||||
// s.logger.Debug(
|
||||
// "commitments match",
|
||||
// pathString,
|
||||
// zap.String("commitment", hex.EncodeToString(lnode.Commitment)),
|
||||
// )
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1118,7 +1188,7 @@ func (s *streamManager) walk(
|
||||
}
|
||||
|
||||
if isLeaf(rnode) || isLeaf(lnode) {
|
||||
s.logger.Info("leaf/branch mismatch at path", pathString)
|
||||
// s.logger.Debug("leaf/branch mismatch at path", pathString)
|
||||
if isServer {
|
||||
err := s.sendLeafData(
|
||||
path,
|
||||
@ -1134,22 +1204,22 @@ func (s *streamManager) walk(
|
||||
lpref := lnode.Path
|
||||
rpref := rnode.Path
|
||||
if len(lpref) != len(rpref) {
|
||||
s.logger.Info(
|
||||
"prefix length mismatch",
|
||||
zap.Int("local_prefix", len(lpref)),
|
||||
zap.Int("remote_prefix", len(rpref)),
|
||||
pathString,
|
||||
)
|
||||
// s.logger.Debug(
|
||||
// "prefix length mismatch",
|
||||
// zap.Int("local_prefix", len(lpref)),
|
||||
// zap.Int("remote_prefix", len(rpref)),
|
||||
// pathString,
|
||||
// )
|
||||
if len(lpref) > len(rpref) {
|
||||
s.logger.Info("local prefix longer, traversing remote to path", pathString)
|
||||
// s.logger.Debug("local prefix longer, traversing remote to path", pathString)
|
||||
traverse := lpref[len(rpref)-1:]
|
||||
rtrav := rnode
|
||||
traversePath := append([]int32{}, rpref...)
|
||||
for _, nibble := range traverse {
|
||||
s.logger.Info("attempting remote traversal step")
|
||||
// s.logger.Debug("attempting remote traversal step")
|
||||
for _, child := range rtrav.Children {
|
||||
if child.Index == nibble {
|
||||
s.logger.Info("sending query")
|
||||
// s.logger.Debug("sending query")
|
||||
traversePath = append(traversePath, child.Index)
|
||||
var err error
|
||||
rtrav, err = queryNext(
|
||||
@ -1168,7 +1238,7 @@ func (s *streamManager) walk(
|
||||
}
|
||||
|
||||
if rtrav == nil {
|
||||
s.logger.Info("traversal could not reach path")
|
||||
// s.logger.Debug("traversal could not reach path")
|
||||
if isServer {
|
||||
err := s.sendLeafData(
|
||||
lpref,
|
||||
@ -1181,7 +1251,7 @@ func (s *streamManager) walk(
|
||||
}
|
||||
}
|
||||
}
|
||||
s.logger.Info("traversal completed, performing walk", pathString)
|
||||
// s.logger.Debug("traversal completed, performing walk", pathString)
|
||||
return s.walk(
|
||||
path,
|
||||
lnode,
|
||||
@ -1193,19 +1263,19 @@ func (s *streamManager) walk(
|
||||
isServer,
|
||||
)
|
||||
} else {
|
||||
s.logger.Info("remote prefix longer, traversing local to path", pathString)
|
||||
// s.logger.Debug("remote prefix longer, traversing local to path", pathString)
|
||||
traverse := rpref[len(lpref)-1:]
|
||||
ltrav := lnode
|
||||
traversedPath := append([]int32{}, lnode.Path...)
|
||||
|
||||
for _, nibble := range traverse {
|
||||
s.logger.Info("attempting local traversal step")
|
||||
// s.logger.Debug("attempting local traversal step")
|
||||
preTraversal := append([]int32{}, traversedPath...)
|
||||
for _, child := range ltrav.Children {
|
||||
if child.Index == nibble {
|
||||
traversedPath = append(traversedPath, nibble)
|
||||
var err error
|
||||
s.logger.Info("expecting query")
|
||||
// s.logger.Debug("expecting query")
|
||||
ltrav, err = handleQueryNext(
|
||||
s.logger,
|
||||
s.ctx,
|
||||
@ -1220,7 +1290,7 @@ func (s *streamManager) walk(
|
||||
}
|
||||
|
||||
if ltrav == nil {
|
||||
s.logger.Info("traversal could not reach path")
|
||||
// s.logger.Debug("traversal could not reach path")
|
||||
if isServer {
|
||||
err := s.sendLeafData(
|
||||
preTraversal,
|
||||
@ -1233,17 +1303,17 @@ func (s *streamManager) walk(
|
||||
}
|
||||
}
|
||||
} else {
|
||||
s.logger.Info(
|
||||
"known missing branch",
|
||||
zap.String(
|
||||
"path",
|
||||
hex.EncodeToString(
|
||||
packPath(
|
||||
append(append([]int32{}, preTraversal...), child.Index),
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
// s.logger.Debug(
|
||||
// "known missing branch",
|
||||
// zap.String(
|
||||
// "path",
|
||||
// hex.EncodeToString(
|
||||
// packPath(
|
||||
// append(append([]int32{}, preTraversal...), child.Index),
|
||||
// ),
|
||||
// ),
|
||||
// ),
|
||||
// )
|
||||
if isServer {
|
||||
if err := s.sendLeafData(
|
||||
append(append([]int32{}, preTraversal...), child.Index),
|
||||
@ -1260,7 +1330,7 @@ func (s *streamManager) walk(
|
||||
}
|
||||
}
|
||||
}
|
||||
s.logger.Info("traversal completed, performing walk", pathString)
|
||||
// s.logger.Debug("traversal completed, performing walk", pathString)
|
||||
return s.walk(
|
||||
path,
|
||||
ltrav,
|
||||
@ -1274,13 +1344,13 @@ func (s *streamManager) walk(
|
||||
}
|
||||
} else {
|
||||
if slices.Compare(lpref, rpref) == 0 {
|
||||
s.logger.Debug("prefixes match, diffing children")
|
||||
// s.logger.Debug("prefixes match, diffing children")
|
||||
for i := int32(0); i < 64; i++ {
|
||||
s.logger.Debug("checking branch", zap.Int32("branch", i))
|
||||
// s.logger.Debug("checking branch", zap.Int32("branch", i))
|
||||
var lchild *protobufs.BranchChild = nil
|
||||
for _, lc := range lnode.Children {
|
||||
if lc.Index == i {
|
||||
s.logger.Debug("local instance found", zap.Int32("branch", i))
|
||||
// s.logger.Debug("local instance found", zap.Int32("branch", i))
|
||||
|
||||
lchild = lc
|
||||
break
|
||||
@ -1289,7 +1359,7 @@ func (s *streamManager) walk(
|
||||
var rchild *protobufs.BranchChild = nil
|
||||
for _, rc := range rnode.Children {
|
||||
if rc.Index == i {
|
||||
s.logger.Debug("remote instance found", zap.Int32("branch", i))
|
||||
// s.logger.Debug("remote instance found", zap.Int32("branch", i))
|
||||
|
||||
rchild = rc
|
||||
break
|
||||
@ -1335,7 +1405,7 @@ func (s *streamManager) walk(
|
||||
nextPath,
|
||||
)
|
||||
if err != nil {
|
||||
s.logger.Info("incomplete branch descension", zap.Error(err))
|
||||
// s.logger.Debug("incomplete branch descension", zap.Error(err))
|
||||
if isServer {
|
||||
if err := s.sendLeafData(
|
||||
nextPath,
|
||||
@ -1368,7 +1438,7 @@ func (s *streamManager) walk(
|
||||
}
|
||||
}
|
||||
} else {
|
||||
s.logger.Info("prefix mismatch on both sides", pathString)
|
||||
// s.logger.Debug("prefix mismatch on both sides", pathString)
|
||||
if isServer {
|
||||
if err := s.sendLeafData(
|
||||
path,
|
||||
@ -1393,7 +1463,18 @@ func (s *streamManager) walk(
|
||||
// and queues further queries as differences are detected.
|
||||
func (hg *HypergraphCRDT) syncTreeServer(
|
||||
stream protobufs.HypergraphComparisonService_HyperStreamServer,
|
||||
snapshotStore tries.TreeBackingStore,
|
||||
snapshotRoot []byte,
|
||||
) error {
|
||||
if len(snapshotRoot) != 0 {
|
||||
hg.logger.Info(
|
||||
"syncing with snapshot",
|
||||
zap.String("root", hex.EncodeToString(snapshotRoot)),
|
||||
)
|
||||
} else {
|
||||
hg.logger.Info("syncing with snapshot", zap.String("root", ""))
|
||||
}
|
||||
|
||||
msg, err := stream.Recv()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1414,17 +1495,9 @@ func (hg *HypergraphCRDT) syncTreeServer(
|
||||
L2: [32]byte(query.ShardKey[3:]),
|
||||
}
|
||||
|
||||
// Get the appropriate id set
|
||||
var idSet hypergraph.IdSet
|
||||
switch query.PhaseSet {
|
||||
case protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_ADDS:
|
||||
idSet = hg.getVertexAddsSet(shardKey)
|
||||
case protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_REMOVES:
|
||||
idSet = hg.getVertexRemovesSet(shardKey)
|
||||
case protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_HYPEREDGE_ADDS:
|
||||
idSet = hg.getHyperedgeAddsSet(shardKey)
|
||||
case protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_HYPEREDGE_REMOVES:
|
||||
idSet = hg.getHyperedgeRemovesSet(shardKey)
|
||||
idSet := hg.snapshotPhaseSet(shardKey, query.PhaseSet, snapshotStore)
|
||||
if idSet == nil {
|
||||
return errors.New("unsupported phase set")
|
||||
}
|
||||
|
||||
branchInfo, err := getBranchInfoFromTree(
|
||||
@ -1436,12 +1509,12 @@ func (hg *HypergraphCRDT) syncTreeServer(
|
||||
return err
|
||||
}
|
||||
|
||||
hg.logger.Debug(
|
||||
"returning branch info",
|
||||
zap.String("commitment", hex.EncodeToString(branchInfo.Commitment)),
|
||||
zap.Int("children", len(branchInfo.Children)),
|
||||
zap.Int("path", len(branchInfo.Path)),
|
||||
)
|
||||
// hg.logger.Debug(
|
||||
// "returning branch info",
|
||||
// zap.String("commitment", hex.EncodeToString(branchInfo.Commitment)),
|
||||
// zap.Int("children", len(branchInfo.Children)),
|
||||
// zap.Int("path", len(branchInfo.Path)),
|
||||
// )
|
||||
|
||||
resp := &protobufs.HypergraphComparison{
|
||||
Payload: &protobufs.HypergraphComparison_Response{
|
||||
@ -1486,14 +1559,14 @@ func (hg *HypergraphCRDT) syncTreeServer(
|
||||
for {
|
||||
msg, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
hg.logger.Info("received disconnect")
|
||||
hg.logger.Info("server stream recv eof")
|
||||
cancel()
|
||||
close(incomingQueriesIn)
|
||||
close(incomingResponsesIn)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
hg.logger.Info("received error", zap.Error(err))
|
||||
hg.logger.Info("server stream recv error", zap.Error(err))
|
||||
cancel()
|
||||
close(incomingQueriesIn)
|
||||
close(incomingResponsesIn)
|
||||
@ -1527,7 +1600,7 @@ func (hg *HypergraphCRDT) syncTreeServer(
|
||||
cancel: cancel,
|
||||
logger: hg.logger,
|
||||
stream: stream,
|
||||
hypergraphStore: hg.store,
|
||||
hypergraphStore: snapshotStore,
|
||||
localTree: idSet.GetTree(),
|
||||
lastSent: time.Now(),
|
||||
}
|
||||
|
||||
@ -1596,7 +1596,9 @@ func (e *GlobalConsensusEngine) materialize(
|
||||
}
|
||||
|
||||
if e.verifyProverRoot(frameNumber, expectedProverRoot, proposer) {
|
||||
e.reconcileLocalWorkerAllocations()
|
||||
if !e.config.Engine.ArchiveMode || e.config.P2P.Network == 99 {
|
||||
e.reconcileLocalWorkerAllocations()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -1664,10 +1666,9 @@ func (e *GlobalConsensusEngine) triggerProverHypersync(proposer []byte) {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go func() {
|
||||
defer e.proverSyncInProgress.Store(false)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
shardKey := tries.ShardKey{
|
||||
L1: [3]byte{0x00, 0x00, 0x00},
|
||||
@ -1680,6 +1681,15 @@ func (e *GlobalConsensusEngine) triggerProverHypersync(proposer []byte) {
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
cancel()
|
||||
}()
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case <-e.ShutdownSignal():
|
||||
cancel()
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@ -3145,59 +3155,53 @@ func (e *GlobalConsensusEngine) DecideWorkerJoins(
|
||||
}
|
||||
|
||||
if len(reject) != 0 {
|
||||
for _, r := range reject {
|
||||
rejectMessage, err := global.NewProverReject(
|
||||
r,
|
||||
frame.Header.FrameNumber,
|
||||
e.keyManager,
|
||||
e.hypergraph,
|
||||
schema.NewRDFMultiprover(&schema.TurtleRDFParser{}, e.inclusionProver),
|
||||
)
|
||||
if err != nil {
|
||||
e.logger.Error("could not construct reject", zap.Error(err))
|
||||
return errors.Wrap(err, "decide worker joins")
|
||||
}
|
||||
|
||||
err = rejectMessage.Prove(frame.Header.FrameNumber)
|
||||
if err != nil {
|
||||
e.logger.Error("could not construct reject", zap.Error(err))
|
||||
return errors.Wrap(err, "decide worker joins")
|
||||
}
|
||||
|
||||
bundle.Requests = append(bundle.Requests, &protobufs.MessageRequest{
|
||||
Request: &protobufs.MessageRequest_Reject{
|
||||
Reject: rejectMessage.ToProtobuf(),
|
||||
},
|
||||
})
|
||||
rejectMessage, err := global.NewProverReject(
|
||||
reject,
|
||||
frame.Header.FrameNumber,
|
||||
e.keyManager,
|
||||
e.hypergraph,
|
||||
schema.NewRDFMultiprover(&schema.TurtleRDFParser{}, e.inclusionProver),
|
||||
)
|
||||
if err != nil {
|
||||
e.logger.Error("could not construct reject", zap.Error(err))
|
||||
return errors.Wrap(err, "decide worker joins")
|
||||
}
|
||||
}
|
||||
|
||||
if len(confirm) != 0 {
|
||||
for _, r := range confirm {
|
||||
confirmMessage, err := global.NewProverConfirm(
|
||||
r,
|
||||
frame.Header.FrameNumber,
|
||||
e.keyManager,
|
||||
e.hypergraph,
|
||||
schema.NewRDFMultiprover(&schema.TurtleRDFParser{}, e.inclusionProver),
|
||||
)
|
||||
if err != nil {
|
||||
e.logger.Error("could not construct confirm", zap.Error(err))
|
||||
return errors.Wrap(err, "decide worker joins")
|
||||
}
|
||||
|
||||
err = confirmMessage.Prove(frame.Header.FrameNumber)
|
||||
if err != nil {
|
||||
e.logger.Error("could not construct confirm", zap.Error(err))
|
||||
return errors.Wrap(err, "decide worker joins")
|
||||
}
|
||||
|
||||
bundle.Requests = append(bundle.Requests, &protobufs.MessageRequest{
|
||||
Request: &protobufs.MessageRequest_Confirm{
|
||||
Confirm: confirmMessage.ToProtobuf(),
|
||||
},
|
||||
})
|
||||
err = rejectMessage.Prove(frame.Header.FrameNumber)
|
||||
if err != nil {
|
||||
e.logger.Error("could not construct reject", zap.Error(err))
|
||||
return errors.Wrap(err, "decide worker joins")
|
||||
}
|
||||
|
||||
bundle.Requests = append(bundle.Requests, &protobufs.MessageRequest{
|
||||
Request: &protobufs.MessageRequest_Reject{
|
||||
Reject: rejectMessage.ToProtobuf(),
|
||||
},
|
||||
})
|
||||
} else if len(confirm) != 0 {
|
||||
confirmMessage, err := global.NewProverConfirm(
|
||||
confirm,
|
||||
frame.Header.FrameNumber,
|
||||
e.keyManager,
|
||||
e.hypergraph,
|
||||
schema.NewRDFMultiprover(&schema.TurtleRDFParser{}, e.inclusionProver),
|
||||
)
|
||||
if err != nil {
|
||||
e.logger.Error("could not construct confirm", zap.Error(err))
|
||||
return errors.Wrap(err, "decide worker joins")
|
||||
}
|
||||
|
||||
err = confirmMessage.Prove(frame.Header.FrameNumber)
|
||||
if err != nil {
|
||||
e.logger.Error("could not construct confirm", zap.Error(err))
|
||||
return errors.Wrap(err, "decide worker joins")
|
||||
}
|
||||
|
||||
bundle.Requests = append(bundle.Requests, &protobufs.MessageRequest{
|
||||
Request: &protobufs.MessageRequest_Confirm{
|
||||
Confirm: confirmMessage.ToProtobuf(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
bundle.Timestamp = time.Now().UnixMilli()
|
||||
|
||||
@ -145,7 +145,7 @@ func (p *globalMessageProcessor) enforceCollectorLimit(
|
||||
|
||||
if len(collector.Records()) >= maxGlobalMessagesPerFrame {
|
||||
collector.Remove(record)
|
||||
p.engine.deferGlobalMessage(record.sequence+1, record.payload)
|
||||
// p.engine.deferGlobalMessage(record.sequence+1, record.payload)
|
||||
return keyedcollector.NewInvalidRecordError(
|
||||
record,
|
||||
fmt.Errorf("message limit reached for frame %d", p.sequence),
|
||||
@ -215,10 +215,51 @@ func (e *GlobalConsensusEngine) startGlobalMessageAggregator(
|
||||
}
|
||||
|
||||
func (e *GlobalConsensusEngine) addGlobalMessage(data []byte) {
|
||||
if e.messageAggregator == nil {
|
||||
if e.messageAggregator == nil || len(data) == 0 {
|
||||
return
|
||||
}
|
||||
record := newSequencedGlobalMessage(e.currentRank+1, data)
|
||||
|
||||
payload := data
|
||||
if len(data) >= 4 {
|
||||
typePrefix := binary.BigEndian.Uint32(data[:4])
|
||||
if typePrefix == protobufs.MessageBundleType {
|
||||
bundle := &protobufs.MessageBundle{}
|
||||
if err := bundle.FromCanonicalBytes(data); err != nil {
|
||||
if e.logger != nil {
|
||||
e.logger.Debug(
|
||||
"failed to decode message bundle for collector",
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if len(bundle.Requests) > maxGlobalMessagesPerFrame {
|
||||
if e.logger != nil {
|
||||
e.logger.Debug(
|
||||
"truncating message bundle requests for collector",
|
||||
zap.Int("original", len(bundle.Requests)),
|
||||
zap.Int("limit", maxGlobalMessagesPerFrame),
|
||||
)
|
||||
}
|
||||
bundle.Requests = bundle.Requests[:maxGlobalMessagesPerFrame]
|
||||
}
|
||||
|
||||
encoded, err := bundle.ToCanonicalBytes()
|
||||
if err != nil {
|
||||
if e.logger != nil {
|
||||
e.logger.Debug(
|
||||
"failed to re-encode message bundle for collector",
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
return
|
||||
}
|
||||
payload = encoded
|
||||
}
|
||||
}
|
||||
|
||||
record := newSequencedGlobalMessage(e.currentRank+1, payload)
|
||||
e.messageAggregator.Add(record)
|
||||
}
|
||||
|
||||
|
||||
@ -316,6 +316,13 @@ func (e *GlobalConsensusEngine) validateProverMessage(
|
||||
return tp2p.ValidationResultReject
|
||||
}
|
||||
|
||||
if e.currentRank < 14400 {
|
||||
for _, r := range messageBundle.Requests {
|
||||
if r.GetKick() != nil {
|
||||
return tp2p.ValidationResultIgnore
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := messageBundle.Validate(); err != nil {
|
||||
e.logger.Debug("invalid request", zap.Error(err))
|
||||
return tp2p.ValidationResultReject
|
||||
|
||||
@ -330,6 +330,13 @@ func (m *Manager) scoreShards(
|
||||
for j := uint8(0); j < s.Ring+1; j++ {
|
||||
divisor <<= 1
|
||||
}
|
||||
|
||||
// shard is oversubscribed, treat as no rewards
|
||||
if divisor == 0 {
|
||||
scores = append(scores, scored{idx: i, score: big.NewInt(0)})
|
||||
continue
|
||||
}
|
||||
|
||||
ringDiv := decimal.NewFromInt(divisor)
|
||||
|
||||
// shard factor = sqrt(Shards)
|
||||
@ -344,9 +351,6 @@ func (m *Manager) scoreShards(
|
||||
if shardsSqrt.IsZero() {
|
||||
return nil, errors.New("score shards")
|
||||
}
|
||||
if ringDiv.IsZero() {
|
||||
return nil, errors.New("score shards")
|
||||
}
|
||||
|
||||
factor = factor.Div(ringDiv)
|
||||
factor = factor.Div(shardsSqrt)
|
||||
|
||||
@ -76,7 +76,12 @@ func (p *OptimizedProofOfMeaningfulWorkRewardIssuance) Calculate(
|
||||
// Divide by 2^s
|
||||
divisor := int64(1)
|
||||
for i := uint8(0); i < alloc.Ring+1; i++ {
|
||||
divisor *= 2
|
||||
divisor <<= 1
|
||||
}
|
||||
|
||||
// shard is oversubscribed, treat as no rewards
|
||||
if divisor == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
ringScaled := decimal.NewFromInt(divisor)
|
||||
@ -207,7 +212,11 @@ func (p *MinimalAllocationOptimizedProofOfMeaningfulWorkRewardIssuance) Calculat
|
||||
factor = factor.Div(worldStateBytesDecimal)
|
||||
|
||||
result := factor.Mul(basisDecimal)
|
||||
result = result.Div(ringDivisors[alloc.Ring+1])
|
||||
divisor := ringDivisors[alloc.Ring+1]
|
||||
if divisor.IsZero() {
|
||||
continue
|
||||
}
|
||||
result = result.Div(divisor)
|
||||
|
||||
p.cacheLock.RLock()
|
||||
shardFactor := p.sqrtCache[alloc.Shards]
|
||||
|
||||
@ -37,7 +37,12 @@ func (p *ProofOfMeaningfulWorkRewardIssuance) Calculate(
|
||||
// Divide by 2^s
|
||||
divisor := int64(1)
|
||||
for i := uint8(0); i < alloc.Ring+1; i++ {
|
||||
divisor *= 2
|
||||
divisor <<= 1
|
||||
}
|
||||
|
||||
// shard is oversubscribed, treat as no rewards
|
||||
if divisor == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
ringScaled := decimal.NewFromInt(divisor)
|
||||
|
||||
@ -381,16 +381,23 @@ func (p *SyncProvider[StateT, ProposalT]) HyperSync(
|
||||
return
|
||||
}
|
||||
|
||||
phaseSyncs := []func(
|
||||
protobufs.HypergraphComparisonService_HyperStreamClient,
|
||||
tries.ShardKey,
|
||||
){
|
||||
p.hyperSyncVertexAdds,
|
||||
p.hyperSyncVertexRemoves,
|
||||
p.hyperSyncHyperedgeAdds,
|
||||
p.hyperSyncHyperedgeRemoves,
|
||||
}
|
||||
|
||||
for _, reachability := range info.Reachability {
|
||||
if !bytes.Equal(reachability.Filter, p.filter) {
|
||||
continue
|
||||
}
|
||||
for _, s := range reachability.StreamMultiaddrs {
|
||||
if err == nil {
|
||||
ch, err := p.getDirectChannel(
|
||||
[]byte(peerId),
|
||||
s,
|
||||
)
|
||||
for _, syncPhase := range phaseSyncs {
|
||||
ch, err := p.getDirectChannel([]byte(peerId), s)
|
||||
if err != nil {
|
||||
p.logger.Debug(
|
||||
"could not establish direct channel, trying next multiaddr",
|
||||
@ -401,18 +408,17 @@ func (p *SyncProvider[StateT, ProposalT]) HyperSync(
|
||||
continue
|
||||
}
|
||||
|
||||
defer ch.Close()
|
||||
client := protobufs.NewHypergraphComparisonServiceClient(ch)
|
||||
str, err := client.HyperStream(ctx)
|
||||
if err != nil {
|
||||
p.logger.Error("error from sync", zap.Error(err))
|
||||
} else {
|
||||
p.hyperSyncVertexAdds(str, shardKey)
|
||||
p.hyperSyncVertexRemoves(str, shardKey)
|
||||
p.hyperSyncHyperedgeAdds(str, shardKey)
|
||||
p.hyperSyncHyperedgeRemoves(str, shardKey)
|
||||
return
|
||||
}
|
||||
|
||||
syncPhase(str, shardKey)
|
||||
if cerr := ch.Close(); cerr != nil {
|
||||
p.logger.Error("error while closing connection", zap.Error(cerr))
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
|
||||
@ -499,7 +499,7 @@ func createProverResumePayload(t *testing.T) *protobufs.MessageRequest {
|
||||
|
||||
func createProverConfirmPayload(t *testing.T) *protobufs.MessageRequest {
|
||||
confirm := &global.ProverConfirm{
|
||||
Filter: []byte("filter1filter1filter1filter1filter1"),
|
||||
Filters: [][]byte{[]byte("filter1filter1filter1filter1filter1")},
|
||||
FrameNumber: 1,
|
||||
PublicKeySignatureBLS48581: global.BLS48581AddressedSignature{
|
||||
Address: make([]byte, 32),
|
||||
@ -519,7 +519,7 @@ func createProverConfirmPayload(t *testing.T) *protobufs.MessageRequest {
|
||||
|
||||
func createProverRejectPayload(t *testing.T) *protobufs.MessageRequest {
|
||||
reject := &global.ProverReject{
|
||||
Filter: []byte("filter1filter1filter1filter1filter1"),
|
||||
Filters: [][]byte{[]byte("filter1filter1filter1filter1filter1")},
|
||||
FrameNumber: 1,
|
||||
PublicKeySignatureBLS48581: global.BLS48581AddressedSignature{
|
||||
Address: make([]byte, 32),
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
package global
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"source.quilibrium.com/quilibrium/monorepo/protobufs"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
|
||||
@ -362,8 +364,21 @@ func ProverConfirmFromProtobuf(
|
||||
return nil, errors.Wrap(err, "converting public key signature")
|
||||
}
|
||||
|
||||
filters := [][]byte{}
|
||||
if len(pb.Filters) > 0 {
|
||||
filters = pb.Filters
|
||||
} else {
|
||||
if bytes.Equal(pb.Filter, bytes.Repeat([]byte("reserved"), 4)) {
|
||||
return nil, errors.Wrap(
|
||||
errors.New("filter cannot be reserved"),
|
||||
"invalid prover confirm",
|
||||
)
|
||||
}
|
||||
filters = append(filters, pb.Filter)
|
||||
}
|
||||
|
||||
return &ProverConfirm{
|
||||
Filter: pb.Filter,
|
||||
Filters: filters,
|
||||
FrameNumber: pb.FrameNumber,
|
||||
PublicKeySignatureBLS48581: *pubKeySig,
|
||||
hypergraph: hg,
|
||||
@ -379,7 +394,7 @@ func (p *ProverConfirm) ToProtobuf() *protobufs.ProverConfirm {
|
||||
}
|
||||
|
||||
return &protobufs.ProverConfirm{
|
||||
Filter: p.Filter,
|
||||
Filters: p.Filters,
|
||||
FrameNumber: p.FrameNumber,
|
||||
PublicKeySignatureBls48581: p.PublicKeySignatureBLS48581.ToProtobuf(),
|
||||
}
|
||||
@ -405,8 +420,21 @@ func ProverRejectFromProtobuf(
|
||||
return nil, errors.Wrap(err, "converting public key signature")
|
||||
}
|
||||
|
||||
filters := [][]byte{}
|
||||
if len(pb.Filters) > 0 {
|
||||
filters = pb.Filters
|
||||
} else {
|
||||
if bytes.Equal(pb.Filter, bytes.Repeat([]byte("reserved"), 4)) {
|
||||
return nil, errors.Wrap(
|
||||
errors.New("filter cannot be reserved"),
|
||||
"invalid prover confirm",
|
||||
)
|
||||
}
|
||||
filters = append(filters, pb.Filter)
|
||||
}
|
||||
|
||||
return &ProverReject{
|
||||
Filter: pb.Filter,
|
||||
Filters: filters,
|
||||
FrameNumber: pb.FrameNumber,
|
||||
PublicKeySignatureBLS48581: *pubKeySig,
|
||||
hypergraph: hg,
|
||||
@ -422,7 +450,7 @@ func (p *ProverReject) ToProtobuf() *protobufs.ProverReject {
|
||||
}
|
||||
|
||||
return &protobufs.ProverReject{
|
||||
Filter: p.Filter,
|
||||
Filters: p.Filters,
|
||||
FrameNumber: p.FrameNumber,
|
||||
PublicKeySignatureBls48581: p.PublicKeySignatureBLS48581.ToProtobuf(),
|
||||
}
|
||||
|
||||
@ -28,8 +28,8 @@ type BLS48581AddressedSignature struct {
|
||||
}
|
||||
|
||||
type ProverConfirm struct {
|
||||
// The filter representing the confirm request
|
||||
Filter []byte
|
||||
// The filters representing the confirm request
|
||||
Filters [][]byte
|
||||
// The frame number when this request is made
|
||||
FrameNumber uint64
|
||||
// The BLS48581 addressed signature
|
||||
@ -42,14 +42,14 @@ type ProverConfirm struct {
|
||||
}
|
||||
|
||||
func NewProverConfirm(
|
||||
filter []byte,
|
||||
filters [][]byte,
|
||||
frameNumber uint64,
|
||||
keyManager keys.KeyManager,
|
||||
hypergraph hypergraph.Hypergraph,
|
||||
rdfMultiprover *schema.RDFMultiprover,
|
||||
) (*ProverConfirm, error) {
|
||||
return &ProverConfirm{
|
||||
Filter: filter,
|
||||
Filters: filters,
|
||||
FrameNumber: frameNumber,
|
||||
keyManager: keyManager,
|
||||
hypergraph: hypergraph,
|
||||
@ -110,157 +110,159 @@ func (p *ProverConfirm) Materialize(
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
|
||||
// Calculate allocation address:
|
||||
allocationAddressBI, err := poseidon.HashBytes(
|
||||
slices.Concat([]byte("PROVER_ALLOCATION"), publicKey, p.Filter),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
allocationAddress := allocationAddressBI.FillBytes(make([]byte, 32))
|
||||
allocationFullAddress := [64]byte{}
|
||||
copy(allocationFullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(allocationFullAddress[32:], allocationAddress)
|
||||
|
||||
// Get allocation vertex
|
||||
allocationVertex, err := hg.Get(
|
||||
allocationFullAddress[:32],
|
||||
allocationFullAddress[32:],
|
||||
hgstate.VertexAddsDiscriminator,
|
||||
)
|
||||
if err != nil || allocationVertex == nil {
|
||||
return nil, errors.Wrap(
|
||||
errors.New("allocation not found"),
|
||||
"materialize",
|
||||
for _, filter := range p.Filters {
|
||||
// Calculate allocation address:
|
||||
allocationAddressBI, err := poseidon.HashBytes(
|
||||
slices.Concat([]byte("PROVER_ALLOCATION"), publicKey, filter),
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
allocationAddress := allocationAddressBI.FillBytes(make([]byte, 32))
|
||||
allocationFullAddress := [64]byte{}
|
||||
copy(allocationFullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(allocationFullAddress[32:], allocationAddress)
|
||||
|
||||
var allocationTree *tries.VectorCommitmentTree
|
||||
allocationTree, ok = allocationVertex.(*tries.VectorCommitmentTree)
|
||||
if !ok || allocationTree == nil {
|
||||
return nil, errors.Wrap(
|
||||
errors.New("invalid object returned for vertex"),
|
||||
"materialize",
|
||||
// Get allocation vertex
|
||||
allocationVertex, err := hg.Get(
|
||||
allocationFullAddress[:32],
|
||||
allocationFullAddress[32:],
|
||||
hgstate.VertexAddsDiscriminator,
|
||||
)
|
||||
}
|
||||
if err != nil || allocationVertex == nil {
|
||||
return nil, errors.Wrap(
|
||||
errors.New("allocation not found"),
|
||||
"materialize",
|
||||
)
|
||||
}
|
||||
|
||||
// Check current allocation status
|
||||
statusBytes, err := p.rdfMultiprover.Get(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
"allocation:ProverAllocation",
|
||||
"Status",
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
var allocationTree *tries.VectorCommitmentTree
|
||||
allocationTree, ok = allocationVertex.(*tries.VectorCommitmentTree)
|
||||
if !ok || allocationTree == nil {
|
||||
return nil, errors.Wrap(
|
||||
errors.New("invalid object returned for vertex"),
|
||||
"materialize",
|
||||
)
|
||||
}
|
||||
|
||||
status := uint8(0)
|
||||
if len(statusBytes) > 0 {
|
||||
status = statusBytes[0]
|
||||
}
|
||||
|
||||
// Determine what we're confirming based on current status
|
||||
if status == 0 {
|
||||
// Confirming join - update allocation status to active (1)
|
||||
err = p.rdfMultiprover.Set(
|
||||
// Check current allocation status
|
||||
statusBytes, err := p.rdfMultiprover.Get(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
"allocation:ProverAllocation",
|
||||
"Status",
|
||||
[]byte{1},
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
|
||||
// Set active frame to current
|
||||
frameNumberBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(frameNumberBytes, p.FrameNumber)
|
||||
err = p.rdfMultiprover.Set(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
status := uint8(0)
|
||||
if len(statusBytes) > 0 {
|
||||
status = statusBytes[0]
|
||||
}
|
||||
|
||||
// Determine what we're confirming based on current status
|
||||
if status == 0 {
|
||||
// Confirming join - update allocation status to active (1)
|
||||
err = p.rdfMultiprover.Set(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
"allocation:ProverAllocation",
|
||||
"Status",
|
||||
[]byte{1},
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
|
||||
// Set active frame to current
|
||||
frameNumberBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(frameNumberBytes, p.FrameNumber)
|
||||
err = p.rdfMultiprover.Set(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
"allocation:ProverAllocation",
|
||||
"LastActiveFrameNumber",
|
||||
frameNumberBytes,
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
|
||||
// Store join confirmation frame number
|
||||
err = p.rdfMultiprover.Set(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
"allocation:ProverAllocation",
|
||||
"JoinConfirmFrameNumber",
|
||||
frameNumberBytes,
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
} else if status == 3 {
|
||||
// Confirming leave - update allocation status to left (4)
|
||||
err = p.rdfMultiprover.Set(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
"allocation:ProverAllocation",
|
||||
"Status",
|
||||
[]byte{4},
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
|
||||
// Store leave confirmation frame number
|
||||
frameNumberBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(frameNumberBytes, p.FrameNumber)
|
||||
err = p.rdfMultiprover.Set(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
"allocation:ProverAllocation",
|
||||
"LeaveConfirmFrameNumber",
|
||||
frameNumberBytes,
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
}
|
||||
|
||||
// Get a copy of the original allocation tree for change tracking
|
||||
var prior *tries.VectorCommitmentTree
|
||||
originalAllocationVertex, err := hg.Get(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
"allocation:ProverAllocation",
|
||||
"LastActiveFrameNumber",
|
||||
frameNumberBytes,
|
||||
allocationTree,
|
||||
allocationAddress,
|
||||
hgstate.VertexAddsDiscriminator,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
if err == nil && originalAllocationVertex != nil {
|
||||
prior = originalAllocationVertex.(*tries.VectorCommitmentTree)
|
||||
}
|
||||
|
||||
// Store join confirmation frame number
|
||||
err = p.rdfMultiprover.Set(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
// Create allocation vertex
|
||||
allocationVertexState := hg.NewVertexAddMaterializedState(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS,
|
||||
[32]byte(allocationAddress),
|
||||
frameNumber,
|
||||
prior,
|
||||
allocationTree,
|
||||
)
|
||||
|
||||
err = hg.Set(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
"allocation:ProverAllocation",
|
||||
"JoinConfirmFrameNumber",
|
||||
frameNumberBytes,
|
||||
allocationTree,
|
||||
allocationAddress,
|
||||
hgstate.VertexAddsDiscriminator,
|
||||
frameNumber,
|
||||
allocationVertexState,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
} else if status == 3 {
|
||||
// Confirming leave - update allocation status to left (4)
|
||||
err = p.rdfMultiprover.Set(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
"allocation:ProverAllocation",
|
||||
"Status",
|
||||
[]byte{4},
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
|
||||
// Store leave confirmation frame number
|
||||
frameNumberBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(frameNumberBytes, p.FrameNumber)
|
||||
err = p.rdfMultiprover.Set(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
"allocation:ProverAllocation",
|
||||
"LeaveConfirmFrameNumber",
|
||||
frameNumberBytes,
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
}
|
||||
|
||||
// Get a copy of the original allocation tree for change tracking
|
||||
var prior *tries.VectorCommitmentTree
|
||||
originalAllocationVertex, err := hg.Get(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
allocationAddress,
|
||||
hgstate.VertexAddsDiscriminator,
|
||||
)
|
||||
if err == nil && originalAllocationVertex != nil {
|
||||
prior = originalAllocationVertex.(*tries.VectorCommitmentTree)
|
||||
}
|
||||
|
||||
// Create allocation vertex
|
||||
allocationVertexState := hg.NewVertexAddMaterializedState(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS,
|
||||
[32]byte(allocationAddress),
|
||||
frameNumber,
|
||||
prior,
|
||||
allocationTree,
|
||||
)
|
||||
|
||||
err = hg.Set(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
allocationAddress,
|
||||
hgstate.VertexAddsDiscriminator,
|
||||
frameNumber,
|
||||
allocationVertexState,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
|
||||
// Update the prover status to reflect the aggregate allocation status
|
||||
@ -300,7 +302,7 @@ func (p *ProverConfirm) Prove(frameNumber uint64) error {
|
||||
confirmMessage := bytes.Buffer{}
|
||||
|
||||
// Add filter
|
||||
confirmMessage.Write(p.Filter)
|
||||
confirmMessage.Write(slices.Concat(p.Filters...))
|
||||
|
||||
// Add frame number
|
||||
frameNumberBytes := make([]byte, 8)
|
||||
@ -365,26 +367,28 @@ func (p *ProverConfirm) GetWriteAddresses(frameNumber uint64) ([][]byte, error)
|
||||
return nil, errors.Wrap(err, "get write addresses")
|
||||
}
|
||||
|
||||
// Calculate allocation address:
|
||||
allocationAddressBI, err := poseidon.HashBytes(
|
||||
slices.Concat([]byte("PROVER_ALLOCATION"), publicKey, p.Filter),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get write addresses")
|
||||
}
|
||||
|
||||
allocationAddress := allocationAddressBI.FillBytes(make([]byte, 32))
|
||||
allocationFullAddress := [64]byte{}
|
||||
copy(allocationFullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(allocationFullAddress[32:], allocationAddress)
|
||||
|
||||
result := [][]byte{}
|
||||
addresses := map[string]struct{}{}
|
||||
addresses[string(proverFullAddress[:])] = struct{}{}
|
||||
addresses[string(allocationFullAddress[:])] = struct{}{}
|
||||
for _, filter := range p.Filters {
|
||||
// Calculate allocation address:
|
||||
allocationAddressBI, err := poseidon.HashBytes(
|
||||
slices.Concat([]byte("PROVER_ALLOCATION"), publicKey, filter),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get write addresses")
|
||||
}
|
||||
|
||||
result := [][]byte{}
|
||||
for key := range addresses {
|
||||
result = append(result, []byte(key))
|
||||
allocationAddress := allocationAddressBI.FillBytes(make([]byte, 32))
|
||||
allocationFullAddress := [64]byte{}
|
||||
copy(allocationFullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(allocationFullAddress[32:], allocationAddress)
|
||||
|
||||
addresses[string(allocationFullAddress[:])] = struct{}{}
|
||||
|
||||
for key := range addresses {
|
||||
result = append(result, []byte(key))
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
@ -396,7 +400,7 @@ func (p *ProverConfirm) Verify(frameNumber uint64) (bool, error) {
|
||||
confirmMessage := bytes.Buffer{}
|
||||
|
||||
// Add filter
|
||||
confirmMessage.Write(p.Filter)
|
||||
confirmMessage.Write(slices.Concat(p.Filters...))
|
||||
|
||||
// Add frame number
|
||||
frameNumberBytes := make([]byte, 8)
|
||||
@ -439,133 +443,135 @@ func (p *ProverConfirm) Verify(frameNumber uint64) (bool, error) {
|
||||
return false, errors.Wrap(err, "verify")
|
||||
}
|
||||
|
||||
// Calculate allocation address to verify it exists
|
||||
allocationAddressBI, err := poseidon.HashBytes(
|
||||
slices.Concat([]byte("PROVER_ALLOCATION"), pubkey, p.Filter),
|
||||
)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify")
|
||||
}
|
||||
allocationAddress := allocationAddressBI.FillBytes(make([]byte, 32))
|
||||
allocationFullAddress := [64]byte{}
|
||||
copy(allocationFullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(allocationFullAddress[32:], allocationAddress)
|
||||
|
||||
// Get allocation vertex
|
||||
allocationTree, err := p.hypergraph.GetVertexData(allocationFullAddress)
|
||||
if err != nil || allocationTree == nil {
|
||||
return false, errors.Wrap(
|
||||
errors.New("allocation not found"),
|
||||
"verify",
|
||||
for _, filter := range p.Filters {
|
||||
// Calculate allocation address to verify it exists
|
||||
allocationAddressBI, err := poseidon.HashBytes(
|
||||
slices.Concat([]byte("PROVER_ALLOCATION"), pubkey, filter),
|
||||
)
|
||||
}
|
||||
|
||||
// Check current allocation status
|
||||
statusBytes, err := p.rdfMultiprover.Get(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
"allocation:ProverAllocation",
|
||||
"Status",
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify")
|
||||
}
|
||||
|
||||
status := uint8(0)
|
||||
if len(statusBytes) > 0 {
|
||||
status = statusBytes[0]
|
||||
}
|
||||
|
||||
// Can only confirm if allocation is in joining (0) or leaving (3) state
|
||||
if status != 0 && status != 3 {
|
||||
return false, errors.Wrap(
|
||||
errors.New("invalid allocation state for confirmation"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
|
||||
if status == 0 {
|
||||
// Confirming join
|
||||
// Get join frame number
|
||||
joinFrameBytes, err := p.rdfMultiprover.Get(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
"allocation:ProverAllocation",
|
||||
"JoinFrameNumber",
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil || len(joinFrameBytes) != 8 {
|
||||
return false, errors.Wrap(errors.New("missing join frame"), "verify")
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify")
|
||||
}
|
||||
joinFrame := binary.BigEndian.Uint64(joinFrameBytes)
|
||||
allocationAddress := allocationAddressBI.FillBytes(make([]byte, 32))
|
||||
allocationFullAddress := [64]byte{}
|
||||
copy(allocationFullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(allocationFullAddress[32:], allocationAddress)
|
||||
|
||||
// Check timing constraints
|
||||
if joinFrame < token.FRAME_2_1_EXTENDED_ENROLL_END && joinFrame >= 244100 {
|
||||
if frameNumber < token.FRAME_2_1_EXTENDED_ENROLL_END {
|
||||
// If joined before frame 255840, cannot confirm until frame 255840
|
||||
return false, errors.Wrap(
|
||||
errors.New("cannot confirm before frame 255840"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
|
||||
// Set this to either 255840 - 360 or the raw join frame if higher than it
|
||||
// so the provers before can immeidately join after the wait, those after
|
||||
// still have the full 360.
|
||||
if joinFrame < (token.FRAME_2_1_EXTENDED_ENROLL_END - 360) {
|
||||
joinFrame = (token.FRAME_2_1_EXTENDED_ENROLL_END - 360)
|
||||
}
|
||||
}
|
||||
|
||||
// For joins before 255840, once we reach frame 255840, they can confirm
|
||||
// immediately, for joins after 255840, normal 360 frame wait applies.
|
||||
// If the join frame precedes the genesis frame (e.g. not mainnet), we
|
||||
// ignore the topic altogether
|
||||
if joinFrame >= (token.FRAME_2_1_EXTENDED_ENROLL_END-360) ||
|
||||
joinFrame <= 244100 {
|
||||
framesSinceJoin := frameNumber - joinFrame
|
||||
if framesSinceJoin < 360 {
|
||||
return false, errors.Wrap(
|
||||
fmt.Errorf(
|
||||
"must wait 360 frames after join to confirm (%d)",
|
||||
framesSinceJoin,
|
||||
),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
if framesSinceJoin > 720 {
|
||||
return false, errors.Wrap(
|
||||
errors.New("confirmation window expired (720 frames)"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
}
|
||||
} else if status == 3 {
|
||||
// Confirming leave
|
||||
// Get leave frame number
|
||||
leaveFrameBytes, err := p.rdfMultiprover.Get(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
"allocation:ProverAllocation",
|
||||
"LeaveFrameNumber",
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil || len(leaveFrameBytes) != 8 {
|
||||
return false, errors.Wrap(errors.New("missing leave frame"), "verify")
|
||||
}
|
||||
leaveFrame := binary.BigEndian.Uint64(leaveFrameBytes)
|
||||
|
||||
framesSinceLeave := frameNumber - leaveFrame
|
||||
if framesSinceLeave < 360 {
|
||||
// Get allocation vertex
|
||||
allocationTree, err := p.hypergraph.GetVertexData(allocationFullAddress)
|
||||
if err != nil || allocationTree == nil {
|
||||
return false, errors.Wrap(
|
||||
errors.New("must wait 360 frames after leave to confirm"),
|
||||
errors.New("allocation not found"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
if framesSinceLeave > 720 {
|
||||
|
||||
// Check current allocation status
|
||||
statusBytes, err := p.rdfMultiprover.Get(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
"allocation:ProverAllocation",
|
||||
"Status",
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify")
|
||||
}
|
||||
|
||||
status := uint8(0)
|
||||
if len(statusBytes) > 0 {
|
||||
status = statusBytes[0]
|
||||
}
|
||||
|
||||
// Can only confirm if allocation is in joining (0) or leaving (3) state
|
||||
if status != 0 && status != 3 {
|
||||
return false, errors.Wrap(
|
||||
errors.New("leave confirmation window expired (720 frames)"),
|
||||
errors.New("invalid allocation state for confirmation"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
|
||||
if status == 0 {
|
||||
// Confirming join
|
||||
// Get join frame number
|
||||
joinFrameBytes, err := p.rdfMultiprover.Get(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
"allocation:ProverAllocation",
|
||||
"JoinFrameNumber",
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil || len(joinFrameBytes) != 8 {
|
||||
return false, errors.Wrap(errors.New("missing join frame"), "verify")
|
||||
}
|
||||
joinFrame := binary.BigEndian.Uint64(joinFrameBytes)
|
||||
|
||||
// Check timing constraints
|
||||
if joinFrame < token.FRAME_2_1_EXTENDED_ENROLL_END && joinFrame >= 244100 {
|
||||
if frameNumber < token.FRAME_2_1_EXTENDED_ENROLL_END {
|
||||
// If joined before frame 255840, cannot confirm until frame 255840
|
||||
return false, errors.Wrap(
|
||||
errors.New("cannot confirm before frame 255840"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
|
||||
// Set this to either 255840 - 360 or the raw join frame if higher than
|
||||
// it so the provers before can immeidately join after the wait, those
|
||||
// after still have the full 360.
|
||||
if joinFrame < (token.FRAME_2_1_EXTENDED_ENROLL_END - 360) {
|
||||
joinFrame = (token.FRAME_2_1_EXTENDED_ENROLL_END - 360)
|
||||
}
|
||||
}
|
||||
|
||||
// For joins before 255840, once we reach frame 255840, they can confirm
|
||||
// immediately, for joins after 255840, normal 360 frame wait applies.
|
||||
// If the join frame precedes the genesis frame (e.g. not mainnet), we
|
||||
// ignore the topic altogether
|
||||
if joinFrame >= (token.FRAME_2_1_EXTENDED_ENROLL_END-360) ||
|
||||
joinFrame <= 244100 {
|
||||
framesSinceJoin := frameNumber - joinFrame
|
||||
if framesSinceJoin < 360 {
|
||||
return false, errors.Wrap(
|
||||
fmt.Errorf(
|
||||
"must wait 360 frames after join to confirm (%d)",
|
||||
framesSinceJoin,
|
||||
),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
if framesSinceJoin > 720 {
|
||||
return false, errors.Wrap(
|
||||
errors.New("confirmation window expired (720 frames)"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
}
|
||||
} else if status == 3 {
|
||||
// Confirming leave
|
||||
// Get leave frame number
|
||||
leaveFrameBytes, err := p.rdfMultiprover.Get(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
"allocation:ProverAllocation",
|
||||
"LeaveFrameNumber",
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil || len(leaveFrameBytes) != 8 {
|
||||
return false, errors.Wrap(errors.New("missing leave frame"), "verify")
|
||||
}
|
||||
leaveFrame := binary.BigEndian.Uint64(leaveFrameBytes)
|
||||
|
||||
framesSinceLeave := frameNumber - leaveFrame
|
||||
if framesSinceLeave < 360 {
|
||||
return false, errors.Wrap(
|
||||
errors.New("must wait 360 frames after leave to confirm"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
if framesSinceLeave > 720 {
|
||||
return false, errors.Wrap(
|
||||
errors.New("leave confirmation window expired (720 frames)"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the signature
|
||||
|
||||
@ -121,7 +121,7 @@ func TestProverConfirm_Prove(t *testing.T) {
|
||||
|
||||
// Create the prover confirm operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
proverConfirm, err := global.NewProverConfirm(filter, frameNumber, mockKeyManager, nil, rdfMultiprover)
|
||||
proverConfirm, err := global.NewProverConfirm([][]byte{filter}, frameNumber, mockKeyManager, nil, rdfMultiprover)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Call the prove function
|
||||
@ -172,7 +172,7 @@ func TestProverConfirm_Verify(t *testing.T) {
|
||||
|
||||
// Create the prover confirm operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
proverConfirm, err := global.NewProverConfirm(filter, confirmFrame, mockKeyManager, mockHypergraph, rdfMultiprover)
|
||||
proverConfirm, err := global.NewProverConfirm([][]byte{filter}, confirmFrame, mockKeyManager, mockHypergraph, rdfMultiprover)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up the signature data manually
|
||||
@ -244,7 +244,7 @@ func TestProverConfirm_Verify(t *testing.T) {
|
||||
|
||||
// Create the prover confirm operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
proverConfirm, err := global.NewProverConfirm(filter, confirmFrame, mockKeyManager, mockHypergraph, rdfMultiprover)
|
||||
proverConfirm, err := global.NewProverConfirm([][]byte{filter}, confirmFrame, mockKeyManager, mockHypergraph, rdfMultiprover)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up the signature data manually
|
||||
@ -293,7 +293,7 @@ func TestProverConfirm_Verify(t *testing.T) {
|
||||
|
||||
// Create the prover confirm operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
proverConfirm, err := global.NewProverConfirm(filter, confirmFrame, mockKeyManager, mockHypergraph, rdfMultiprover)
|
||||
proverConfirm, err := global.NewProverConfirm([][]byte{filter}, confirmFrame, mockKeyManager, mockHypergraph, rdfMultiprover)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up the signature data manually
|
||||
@ -342,7 +342,7 @@ func TestProverConfirm_Verify(t *testing.T) {
|
||||
|
||||
// Create the prover confirm operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
proverConfirm, err := global.NewProverConfirm(filter, confirmFrame, mockKeyManager, mockHypergraph, rdfMultiprover)
|
||||
proverConfirm, err := global.NewProverConfirm([][]byte{filter}, confirmFrame, mockKeyManager, mockHypergraph, rdfMultiprover)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up the signature data manually
|
||||
@ -480,7 +480,7 @@ func TestProverConfirm_Materialize(t *testing.T) {
|
||||
|
||||
// Create the prover confirm operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
proverConfirm, err := global.NewProverConfirm(filter, frameNumber, mockKeyManager, nil, rdfMultiprover)
|
||||
proverConfirm, err := global.NewProverConfirm([][]byte{filter}, frameNumber, mockKeyManager, nil, rdfMultiprover)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up the signature data
|
||||
@ -588,7 +588,7 @@ func TestProverConfirm_Materialize(t *testing.T) {
|
||||
|
||||
// Create the prover confirm operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
proverConfirm, err := global.NewProverConfirm(filter, frameNumber, mockKeyManager, nil, rdfMultiprover)
|
||||
proverConfirm, err := global.NewProverConfirm([][]byte{filter}, frameNumber, mockKeyManager, nil, rdfMultiprover)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up the signature data
|
||||
@ -669,7 +669,7 @@ func TestProverConfirm_Materialize(t *testing.T) {
|
||||
|
||||
// Create the prover confirm operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
proverConfirm, err := global.NewProverConfirm(filter, frameNumber, mockKeyManager, nil, rdfMultiprover)
|
||||
proverConfirm, err := global.NewProverConfirm([][]byte{filter}, frameNumber, mockKeyManager, nil, rdfMultiprover)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up the signature data
|
||||
@ -709,7 +709,7 @@ func TestProverConfirm_Materialize(t *testing.T) {
|
||||
|
||||
// Create the prover confirm operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
proverConfirm, err := global.NewProverConfirm(filter, frameNumber, mockKeyManager, nil, rdfMultiprover)
|
||||
proverConfirm, err := global.NewProverConfirm([][]byte{filter}, frameNumber, mockKeyManager, nil, rdfMultiprover)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up the signature data
|
||||
@ -762,7 +762,7 @@ func TestProverConfirm_Materialize(t *testing.T) {
|
||||
|
||||
// Create the prover confirm operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
proverConfirm, err := global.NewProverConfirm(filter, frameNumber, mockKeyManager, nil, rdfMultiprover)
|
||||
proverConfirm, err := global.NewProverConfirm([][]byte{filter}, frameNumber, mockKeyManager, nil, rdfMultiprover)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up the signature data
|
||||
@ -839,7 +839,7 @@ func TestProverConfirm_Materialize(t *testing.T) {
|
||||
|
||||
// Create the prover confirm operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
proverConfirm, err := global.NewProverConfirm(filter, frameNumber, mockKeyManager, nil, rdfMultiprover)
|
||||
proverConfirm, err := global.NewProverConfirm([][]byte{filter}, frameNumber, mockKeyManager, nil, rdfMultiprover)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up the signature data
|
||||
|
||||
@ -20,8 +20,8 @@ import (
|
||||
)
|
||||
|
||||
type ProverReject struct {
|
||||
// The filter representing the reject request
|
||||
Filter []byte
|
||||
// The filters representing the reject request
|
||||
Filters [][]byte
|
||||
// The frame number when this request is made
|
||||
FrameNumber uint64
|
||||
// The BLS48581 addressed signature
|
||||
@ -34,14 +34,14 @@ type ProverReject struct {
|
||||
}
|
||||
|
||||
func NewProverReject(
|
||||
filter []byte,
|
||||
filters [][]byte,
|
||||
frameNumber uint64,
|
||||
keyManager keys.KeyManager,
|
||||
hypergraph hypergraph.Hypergraph,
|
||||
rdfMultiprover *schema.RDFMultiprover,
|
||||
) (*ProverReject, error) {
|
||||
return &ProverReject{
|
||||
Filter: filter,
|
||||
Filters: filters,
|
||||
FrameNumber: frameNumber,
|
||||
keyManager: keyManager,
|
||||
hypergraph: hypergraph,
|
||||
@ -100,144 +100,146 @@ func (p *ProverReject) Materialize(
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
|
||||
// Calculate allocation address:
|
||||
allocationAddressBI, err := poseidon.HashBytes(
|
||||
slices.Concat([]byte("PROVER_ALLOCATION"), publicKey, p.Filter),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
allocationAddress := allocationAddressBI.FillBytes(make([]byte, 32))
|
||||
allocationFullAddress := [64]byte{}
|
||||
copy(allocationFullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(allocationFullAddress[32:], allocationAddress)
|
||||
|
||||
// Get allocation vertex
|
||||
allocationVertex, err := hg.Get(
|
||||
allocationFullAddress[:32],
|
||||
allocationFullAddress[32:],
|
||||
hgstate.VertexAddsDiscriminator,
|
||||
)
|
||||
if err != nil || allocationVertex == nil {
|
||||
return nil, errors.Wrap(
|
||||
errors.New("allocation not found"),
|
||||
"materialize",
|
||||
for _, filter := range p.Filters {
|
||||
// Calculate allocation address:
|
||||
allocationAddressBI, err := poseidon.HashBytes(
|
||||
slices.Concat([]byte("PROVER_ALLOCATION"), publicKey, filter),
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
allocationAddress := allocationAddressBI.FillBytes(make([]byte, 32))
|
||||
allocationFullAddress := [64]byte{}
|
||||
copy(allocationFullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(allocationFullAddress[32:], allocationAddress)
|
||||
|
||||
var allocationTree *tries.VectorCommitmentTree
|
||||
allocationTree, ok = allocationVertex.(*tries.VectorCommitmentTree)
|
||||
if !ok || allocationTree == nil {
|
||||
return nil, errors.Wrap(
|
||||
errors.New("invalid object returned for vertex"),
|
||||
"materialize",
|
||||
// Get allocation vertex
|
||||
allocationVertex, err := hg.Get(
|
||||
allocationFullAddress[:32],
|
||||
allocationFullAddress[32:],
|
||||
hgstate.VertexAddsDiscriminator,
|
||||
)
|
||||
}
|
||||
if err != nil || allocationVertex == nil {
|
||||
return nil, errors.Wrap(
|
||||
errors.New("allocation not found"),
|
||||
"materialize",
|
||||
)
|
||||
}
|
||||
|
||||
// Check current allocation status
|
||||
statusBytes, err := p.rdfMultiprover.Get(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
"allocation:ProverAllocation",
|
||||
"Status",
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
var allocationTree *tries.VectorCommitmentTree
|
||||
allocationTree, ok = allocationVertex.(*tries.VectorCommitmentTree)
|
||||
if !ok || allocationTree == nil {
|
||||
return nil, errors.Wrap(
|
||||
errors.New("invalid object returned for vertex"),
|
||||
"materialize",
|
||||
)
|
||||
}
|
||||
|
||||
status := uint8(0)
|
||||
if len(statusBytes) > 0 {
|
||||
status = statusBytes[0]
|
||||
}
|
||||
|
||||
// Determine what we're rejecting based on current status
|
||||
if status == 0 {
|
||||
// Rejecting join - update allocation status to left (4)
|
||||
err = p.rdfMultiprover.Set(
|
||||
// Check current allocation status
|
||||
statusBytes, err := p.rdfMultiprover.Get(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
"allocation:ProverAllocation",
|
||||
"Status",
|
||||
[]byte{4},
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
|
||||
// Store join rejection frame number
|
||||
frameNumberBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(frameNumberBytes, p.FrameNumber)
|
||||
err = p.rdfMultiprover.Set(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
status := uint8(0)
|
||||
if len(statusBytes) > 0 {
|
||||
status = statusBytes[0]
|
||||
}
|
||||
|
||||
// Determine what we're rejecting based on current status
|
||||
if status == 0 {
|
||||
// Rejecting join - update allocation status to left (4)
|
||||
err = p.rdfMultiprover.Set(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
"allocation:ProverAllocation",
|
||||
"Status",
|
||||
[]byte{4},
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
|
||||
// Store join rejection frame number
|
||||
frameNumberBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(frameNumberBytes, p.FrameNumber)
|
||||
err = p.rdfMultiprover.Set(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
"allocation:ProverAllocation",
|
||||
"JoinRejectFrameNumber",
|
||||
frameNumberBytes,
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
} else if status == 3 {
|
||||
// Rejecting leave - update allocation status back to active (1)
|
||||
err = p.rdfMultiprover.Set(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
"allocation:ProverAllocation",
|
||||
"Status",
|
||||
[]byte{1},
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
|
||||
// Store leave rejection frame number
|
||||
frameNumberBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(frameNumberBytes, p.FrameNumber)
|
||||
err = p.rdfMultiprover.Set(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
"allocation:ProverAllocation",
|
||||
"LeaveRejectFrameNumber",
|
||||
frameNumberBytes,
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
}
|
||||
|
||||
// Get a copy of the original allocation tree for change tracking
|
||||
var prior *tries.VectorCommitmentTree
|
||||
originalAllocationVertex, err := hg.Get(
|
||||
allocationFullAddress[:32],
|
||||
allocationFullAddress[32:],
|
||||
hgstate.VertexAddsDiscriminator,
|
||||
)
|
||||
if err == nil && originalAllocationVertex != nil {
|
||||
prior = originalAllocationVertex.(*tries.VectorCommitmentTree)
|
||||
}
|
||||
|
||||
// Update allocation vertex
|
||||
updatedAllocation := hg.NewVertexAddMaterializedState(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS,
|
||||
[32]byte(allocationFullAddress[32:]),
|
||||
frameNumber,
|
||||
prior,
|
||||
allocationTree,
|
||||
)
|
||||
|
||||
err = hg.Set(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
"allocation:ProverAllocation",
|
||||
"JoinRejectFrameNumber",
|
||||
frameNumberBytes,
|
||||
allocationTree,
|
||||
allocationAddress,
|
||||
hgstate.VertexAddsDiscriminator,
|
||||
frameNumber,
|
||||
updatedAllocation,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
} else if status == 3 {
|
||||
// Rejecting leave - update allocation status back to active (1)
|
||||
err = p.rdfMultiprover.Set(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
"allocation:ProverAllocation",
|
||||
"Status",
|
||||
[]byte{1},
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
|
||||
// Store leave rejection frame number
|
||||
frameNumberBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(frameNumberBytes, p.FrameNumber)
|
||||
err = p.rdfMultiprover.Set(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
"allocation:ProverAllocation",
|
||||
"LeaveRejectFrameNumber",
|
||||
frameNumberBytes,
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
}
|
||||
|
||||
// Get a copy of the original allocation tree for change tracking
|
||||
var prior *tries.VectorCommitmentTree
|
||||
originalAllocationVertex, err := hg.Get(
|
||||
allocationFullAddress[:32],
|
||||
allocationFullAddress[32:],
|
||||
hgstate.VertexAddsDiscriminator,
|
||||
)
|
||||
if err == nil && originalAllocationVertex != nil {
|
||||
prior = originalAllocationVertex.(*tries.VectorCommitmentTree)
|
||||
}
|
||||
|
||||
// Update allocation vertex
|
||||
updatedAllocation := hg.NewVertexAddMaterializedState(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS,
|
||||
[32]byte(allocationFullAddress[32:]),
|
||||
frameNumber,
|
||||
prior,
|
||||
allocationTree,
|
||||
)
|
||||
|
||||
err = hg.Set(
|
||||
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
|
||||
allocationAddress,
|
||||
hgstate.VertexAddsDiscriminator,
|
||||
frameNumber,
|
||||
updatedAllocation,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "materialize")
|
||||
}
|
||||
|
||||
// Update the prover status to reflect the aggregate allocation status
|
||||
@ -277,7 +279,7 @@ func (p *ProverReject) Prove(frameNumber uint64) error {
|
||||
rejectMessage := bytes.Buffer{}
|
||||
|
||||
// Add filter
|
||||
rejectMessage.Write(p.Filter)
|
||||
rejectMessage.Write(slices.Concat(p.Filters...))
|
||||
|
||||
// Add frame number
|
||||
frameNumberBytes := make([]byte, 8)
|
||||
@ -343,26 +345,28 @@ func (p *ProverReject) GetWriteAddresses(frameNumber uint64) ([][]byte, error) {
|
||||
return nil, errors.Wrap(err, "get write addresses")
|
||||
}
|
||||
|
||||
// Calculate allocation address:
|
||||
allocationAddressBI, err := poseidon.HashBytes(
|
||||
slices.Concat([]byte("PROVER_ALLOCATION"), publicKey, p.Filter),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get write addresses")
|
||||
}
|
||||
|
||||
allocationAddress := allocationAddressBI.FillBytes(make([]byte, 32))
|
||||
allocationFullAddress := [64]byte{}
|
||||
copy(allocationFullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(allocationFullAddress[32:], allocationAddress)
|
||||
|
||||
result := [][]byte{}
|
||||
addresses := map[string]struct{}{}
|
||||
addresses[string(proverFullAddress[:])] = struct{}{}
|
||||
addresses[string(allocationFullAddress[:])] = struct{}{}
|
||||
for _, filter := range p.Filters {
|
||||
// Calculate allocation address:
|
||||
allocationAddressBI, err := poseidon.HashBytes(
|
||||
slices.Concat([]byte("PROVER_ALLOCATION"), publicKey, filter),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get write addresses")
|
||||
}
|
||||
|
||||
result := [][]byte{}
|
||||
for key := range addresses {
|
||||
result = append(result, []byte(key))
|
||||
allocationAddress := allocationAddressBI.FillBytes(make([]byte, 32))
|
||||
allocationFullAddress := [64]byte{}
|
||||
copy(allocationFullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(allocationFullAddress[32:], allocationAddress)
|
||||
|
||||
addresses[string(allocationFullAddress[:])] = struct{}{}
|
||||
|
||||
for key := range addresses {
|
||||
result = append(result, []byte(key))
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
@ -374,7 +378,7 @@ func (p *ProverReject) Verify(frameNumber uint64) (bool, error) {
|
||||
rejectMessage := bytes.Buffer{}
|
||||
|
||||
// Add filter
|
||||
rejectMessage.Write(p.Filter)
|
||||
rejectMessage.Write(slices.Concat(p.Filters...))
|
||||
|
||||
// Add frame number
|
||||
frameNumberBytes := make([]byte, 8)
|
||||
@ -417,102 +421,104 @@ func (p *ProverReject) Verify(frameNumber uint64) (bool, error) {
|
||||
return false, errors.Wrap(err, "verify")
|
||||
}
|
||||
|
||||
// Calculate allocation address to verify it exists
|
||||
allocationAddressBI, err := poseidon.HashBytes(
|
||||
slices.Concat([]byte("PROVER_ALLOCATION"), pubkey, p.Filter),
|
||||
)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify")
|
||||
}
|
||||
allocationAddress := allocationAddressBI.FillBytes(make([]byte, 32))
|
||||
allocationFullAddress := [64]byte{}
|
||||
copy(allocationFullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(allocationFullAddress[32:], allocationAddress)
|
||||
|
||||
// Get allocation vertex
|
||||
allocationTree, err := p.hypergraph.GetVertexData(allocationFullAddress)
|
||||
if err != nil || allocationTree == nil {
|
||||
return false, errors.Wrap(
|
||||
errors.New("allocation not found"),
|
||||
"verify",
|
||||
for _, filter := range p.Filters {
|
||||
// Calculate allocation address to verify it exists
|
||||
allocationAddressBI, err := poseidon.HashBytes(
|
||||
slices.Concat([]byte("PROVER_ALLOCATION"), pubkey, filter),
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify")
|
||||
}
|
||||
allocationAddress := allocationAddressBI.FillBytes(make([]byte, 32))
|
||||
allocationFullAddress := [64]byte{}
|
||||
copy(allocationFullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
|
||||
copy(allocationFullAddress[32:], allocationAddress)
|
||||
|
||||
// Check current allocation status
|
||||
statusBytes, err := p.rdfMultiprover.Get(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
"allocation:ProverAllocation",
|
||||
"Status",
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify")
|
||||
}
|
||||
// Get allocation vertex
|
||||
allocationTree, err := p.hypergraph.GetVertexData(allocationFullAddress)
|
||||
if err != nil || allocationTree == nil {
|
||||
return false, errors.Wrap(
|
||||
errors.New("allocation not found"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
|
||||
status := uint8(0)
|
||||
if len(statusBytes) > 0 {
|
||||
status = statusBytes[0]
|
||||
}
|
||||
|
||||
// Can only reject if allocation is in joining (0) or leaving (3) state
|
||||
if status != 0 && status != 3 {
|
||||
return false, errors.Wrap(
|
||||
errors.New("invalid allocation state for rejection"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
|
||||
if status == 0 {
|
||||
// Rejecting join
|
||||
// Get join frame number
|
||||
joinFrameBytes, err := p.rdfMultiprover.Get(
|
||||
// Check current allocation status
|
||||
statusBytes, err := p.rdfMultiprover.Get(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
"allocation:ProverAllocation",
|
||||
"JoinFrameNumber",
|
||||
"Status",
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil || len(joinFrameBytes) != 8 {
|
||||
return false, errors.Wrap(errors.New("missing join frame"), "verify")
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify")
|
||||
}
|
||||
joinFrame := binary.BigEndian.Uint64(joinFrameBytes)
|
||||
|
||||
// Special case: if join was before frame 255840, can reject any time
|
||||
if joinFrame >= token.FRAME_2_1_EXTENDED_ENROLL_END {
|
||||
// Otherwise same timing constraints as confirm
|
||||
framesSinceJoin := frameNumber - joinFrame
|
||||
if framesSinceJoin > 720 {
|
||||
status := uint8(0)
|
||||
if len(statusBytes) > 0 {
|
||||
status = statusBytes[0]
|
||||
}
|
||||
|
||||
// Can only reject if allocation is in joining (0) or leaving (3) state
|
||||
if status != 0 && status != 3 {
|
||||
return false, errors.Wrap(
|
||||
errors.New("invalid allocation state for rejection"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
|
||||
if status == 0 {
|
||||
// Rejecting join
|
||||
// Get join frame number
|
||||
joinFrameBytes, err := p.rdfMultiprover.Get(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
"allocation:ProverAllocation",
|
||||
"JoinFrameNumber",
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil || len(joinFrameBytes) != 8 {
|
||||
return false, errors.Wrap(errors.New("missing join frame"), "verify")
|
||||
}
|
||||
joinFrame := binary.BigEndian.Uint64(joinFrameBytes)
|
||||
|
||||
// Special case: if join was before frame 255840, can reject any time
|
||||
if joinFrame >= token.FRAME_2_1_EXTENDED_ENROLL_END {
|
||||
// Otherwise same timing constraints as confirm
|
||||
framesSinceJoin := frameNumber - joinFrame
|
||||
if framesSinceJoin > 720 {
|
||||
return false, errors.Wrap(
|
||||
errors.New("join already implicitly rejected after 720 frames"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
}
|
||||
} else if status == 3 {
|
||||
// Rejecting leave
|
||||
// Get leave frame number
|
||||
leaveFrameBytes, err := p.rdfMultiprover.Get(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
"allocation:ProverAllocation",
|
||||
"LeaveFrameNumber",
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil || len(leaveFrameBytes) != 8 {
|
||||
return false, errors.Wrap(errors.New("missing leave frame"), "verify")
|
||||
}
|
||||
leaveFrame := binary.BigEndian.Uint64(leaveFrameBytes)
|
||||
|
||||
framesSinceLeave := frameNumber - leaveFrame
|
||||
if framesSinceLeave < 360 {
|
||||
return false, errors.Wrap(
|
||||
errors.New("join already implicitly rejected after 720 frames"),
|
||||
errors.New("must wait 360 frames after leave to reject"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
if framesSinceLeave > 720 {
|
||||
return false, errors.Wrap(
|
||||
errors.New("leave already implicitly confirmed after 720 frames"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
}
|
||||
} else if status == 3 {
|
||||
// Rejecting leave
|
||||
// Get leave frame number
|
||||
leaveFrameBytes, err := p.rdfMultiprover.Get(
|
||||
GLOBAL_RDF_SCHEMA,
|
||||
"allocation:ProverAllocation",
|
||||
"LeaveFrameNumber",
|
||||
allocationTree,
|
||||
)
|
||||
if err != nil || len(leaveFrameBytes) != 8 {
|
||||
return false, errors.Wrap(errors.New("missing leave frame"), "verify")
|
||||
}
|
||||
leaveFrame := binary.BigEndian.Uint64(leaveFrameBytes)
|
||||
|
||||
framesSinceLeave := frameNumber - leaveFrame
|
||||
if framesSinceLeave < 360 {
|
||||
return false, errors.Wrap(
|
||||
errors.New("must wait 360 frames after leave to reject"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
if framesSinceLeave > 720 {
|
||||
return false, errors.Wrap(
|
||||
errors.New("leave already implicitly confirmed after 720 frames"),
|
||||
"verify",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -58,7 +58,7 @@ func TestProverReject_Prove(t *testing.T) {
|
||||
|
||||
// Create the prover reject operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
proverReject, err := global.NewProverReject(filter, frameNumber, mockKeyManager, nil, rdfMultiprover)
|
||||
proverReject, err := global.NewProverReject([][]byte{filter}, frameNumber, mockKeyManager, nil, rdfMultiprover)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Call the prove function
|
||||
@ -109,7 +109,7 @@ func TestProverReject_Verify(t *testing.T) {
|
||||
|
||||
// Create the prover reject operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
proverReject, err := global.NewProverReject(filter, rejectFrame, mockKeyManager, mockHypergraph, rdfMultiprover)
|
||||
proverReject, err := global.NewProverReject([][]byte{filter}, rejectFrame, mockKeyManager, mockHypergraph, rdfMultiprover)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up the signature data manually
|
||||
@ -181,7 +181,7 @@ func TestProverReject_Verify(t *testing.T) {
|
||||
|
||||
// Create the prover reject operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
proverReject, err := global.NewProverReject(filter, rejectFrame, mockKeyManager, mockHypergraph, rdfMultiprover)
|
||||
proverReject, err := global.NewProverReject([][]byte{filter}, rejectFrame, mockKeyManager, mockHypergraph, rdfMultiprover)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up the signature data manually
|
||||
@ -253,7 +253,7 @@ func TestProverReject_Verify(t *testing.T) {
|
||||
|
||||
// Create the prover reject operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
proverReject, err := global.NewProverReject(filter, rejectFrame, mockKeyManager, mockHypergraph, rdfMultiprover)
|
||||
proverReject, err := global.NewProverReject([][]byte{filter}, rejectFrame, mockKeyManager, mockHypergraph, rdfMultiprover)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up the signature data manually
|
||||
@ -297,7 +297,7 @@ func TestProverReject_Verify(t *testing.T) {
|
||||
|
||||
// Create the prover reject operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
proverReject, err := global.NewProverReject(filter, frameNumber, mockKeyManager, mockHypergraph, rdfMultiprover)
|
||||
proverReject, err := global.NewProverReject([][]byte{filter}, frameNumber, mockKeyManager, mockHypergraph, rdfMultiprover)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up the signature data manually
|
||||
@ -367,7 +367,7 @@ func TestProverReject_Verify(t *testing.T) {
|
||||
|
||||
// Create the prover reject operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
proverReject, err := global.NewProverReject(filter, rejectFrame, mockKeyManager, mockHypergraph, rdfMultiprover)
|
||||
proverReject, err := global.NewProverReject([][]byte{filter}, rejectFrame, mockKeyManager, mockHypergraph, rdfMultiprover)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up the signature data manually
|
||||
@ -398,7 +398,7 @@ func TestProverReject_GetCost(t *testing.T) {
|
||||
|
||||
// Create the prover reject operation
|
||||
rdfMultiprover := createMockRDFMultiprover()
|
||||
proverReject, err := global.NewProverReject(filter, frameNumber, mockKeyManager, nil, rdfMultiprover)
|
||||
proverReject, err := global.NewProverReject([][]byte{filter}, frameNumber, mockKeyManager, nil, rdfMultiprover)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up the signature data manually
|
||||
|
||||
@ -275,7 +275,7 @@ func FuzzProverConfirm(f *testing.F) {
|
||||
}
|
||||
|
||||
pc := &global.ProverConfirm{
|
||||
Filter: filter,
|
||||
Filters: [][]byte{filter},
|
||||
FrameNumber: 12345,
|
||||
PublicKeySignatureBLS48581: global.BLS48581AddressedSignature{
|
||||
Address: make([]byte, 32),
|
||||
@ -296,7 +296,7 @@ func FuzzProverConfirm(f *testing.F) {
|
||||
t.Fatalf("FromBytes failed: %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(pc.Filter, decoded.Filter) {
|
||||
if !bytes.Equal(pc.Filters[0], decoded.Filters[0]) {
|
||||
t.Errorf("Filter mismatch")
|
||||
}
|
||||
if pc.FrameNumber != decoded.FrameNumber {
|
||||
@ -319,7 +319,7 @@ func FuzzProverReject(f *testing.F) {
|
||||
}
|
||||
|
||||
pr := &global.ProverReject{
|
||||
Filter: filter,
|
||||
Filters: [][]byte{filter},
|
||||
FrameNumber: 12345,
|
||||
PublicKeySignatureBLS48581: global.BLS48581AddressedSignature{
|
||||
Address: make([]byte, 32),
|
||||
@ -340,7 +340,7 @@ func FuzzProverReject(f *testing.F) {
|
||||
t.Fatalf("FromBytes failed: %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(pr.Filter, decoded.Filter) {
|
||||
if !bytes.Equal(pr.Filters[0], decoded.Filters[0]) {
|
||||
t.Errorf("Filter mismatch")
|
||||
}
|
||||
if pr.FrameNumber != decoded.FrameNumber {
|
||||
|
||||
@ -188,7 +188,7 @@ func TestProverResumeSerialization(t *testing.T) {
|
||||
// TestProverConfirmSerialization tests serialization and deserialization of ProverConfirm
|
||||
func TestProverConfirmSerialization(t *testing.T) {
|
||||
proverConfirm := global.ProverConfirm{
|
||||
Filter: []byte("filter-data"),
|
||||
Filters: [][]byte{[]byte("filter-data")},
|
||||
FrameNumber: 12345,
|
||||
PublicKeySignatureBLS48581: MockAddressedSignature(),
|
||||
}
|
||||
@ -208,7 +208,7 @@ func TestProverConfirmSerialization(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify
|
||||
assert.Equal(t, proverConfirm.Filter, deserializedProverConfirm.Filter)
|
||||
assert.Equal(t, proverConfirm.Filters, deserializedProverConfirm.Filters)
|
||||
assert.Equal(t, proverConfirm.FrameNumber, deserializedProverConfirm.FrameNumber)
|
||||
assert.Equal(t, proverConfirm.PublicKeySignatureBLS48581.Address, deserializedProverConfirm.PublicKeySignatureBLS48581.Address)
|
||||
assert.Equal(t, proverConfirm.PublicKeySignatureBLS48581.Signature, deserializedProverConfirm.PublicKeySignatureBLS48581.Signature)
|
||||
@ -217,7 +217,7 @@ func TestProverConfirmSerialization(t *testing.T) {
|
||||
// TestProverRejectSerialization tests serialization and deserialization of ProverReject
|
||||
func TestProverRejectSerialization(t *testing.T) {
|
||||
proverReject := global.ProverReject{
|
||||
Filter: []byte("filter-data"),
|
||||
Filters: [][]byte{[]byte("filter-data")},
|
||||
FrameNumber: 12345,
|
||||
PublicKeySignatureBLS48581: MockAddressedSignature(),
|
||||
}
|
||||
@ -237,7 +237,7 @@ func TestProverRejectSerialization(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify
|
||||
assert.Equal(t, proverReject.Filter, deserializedProverReject.Filter)
|
||||
assert.Equal(t, proverReject.Filters[0], deserializedProverReject.Filters[0])
|
||||
assert.Equal(t, proverReject.FrameNumber, deserializedProverReject.FrameNumber)
|
||||
assert.Equal(t, proverReject.PublicKeySignatureBLS48581.Address, deserializedProverReject.PublicKeySignatureBLS48581.Address)
|
||||
assert.Equal(t, proverReject.PublicKeySignatureBLS48581.Signature, deserializedProverReject.PublicKeySignatureBLS48581.Signature)
|
||||
@ -306,7 +306,7 @@ func TestProverKickSerialization(t *testing.T) {
|
||||
// Verify TraversalProof structure
|
||||
require.NotNil(t, deserializedProverKick.TraversalProof)
|
||||
assert.Equal(t, len(proverKick.TraversalProof.SubProofs), len(deserializedProverKick.TraversalProof.SubProofs))
|
||||
|
||||
|
||||
// Multiproof will be nil when deserialized without inclusionProver
|
||||
assert.Nil(t, deserializedProverKick.TraversalProof.Multiproof)
|
||||
|
||||
@ -316,19 +316,19 @@ func TestProverKickSerialization(t *testing.T) {
|
||||
assert.Equal(t, subProof.Ys, deserializedProverKick.TraversalProof.SubProofs[i].Ys)
|
||||
assert.Equal(t, subProof.Paths, deserializedProverKick.TraversalProof.SubProofs[i].Paths)
|
||||
}
|
||||
|
||||
|
||||
// Test deserialization with inclusionProver to reconstruct Multiproof
|
||||
newMockMultiproof := &mocks.MockMultiproof{}
|
||||
// The multiproof bytes are stored as multicommitment + proof concatenated
|
||||
multiproofBytes := append([]byte("mock-multicommitment"), []byte("mock-proof")...)
|
||||
newMockMultiproof.On("FromBytes", multiproofBytes).Return(nil)
|
||||
mockInclusionProver.On("NewMultiproof").Return(newMockMultiproof)
|
||||
|
||||
|
||||
// Deserialize with hypergraph and inclusionProver
|
||||
var deserializedWithDeps global.ProverKick
|
||||
err = deserializedWithDeps.FromBytesWithHypergraph(data, nil, mockInclusionProver, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
// Now Multiproof should be reconstructed
|
||||
require.NotNil(t, deserializedWithDeps.TraversalProof)
|
||||
assert.NotNil(t, deserializedWithDeps.TraversalProof.Multiproof)
|
||||
@ -409,11 +409,11 @@ func TestInvalidLengthErrors(t *testing.T) {
|
||||
var deserializedSig global.BLS48581SignatureWithProofOfPossession
|
||||
err := deserializedSig.FromBytes(invalidData)
|
||||
assert.Error(t, err, "Should error on invalid data")
|
||||
|
||||
|
||||
// Test with empty data
|
||||
err = deserializedSig.FromBytes([]byte{})
|
||||
assert.Error(t, err, "Should error on empty data")
|
||||
|
||||
|
||||
// Test with truncated data (type prefix only)
|
||||
err = deserializedSig.FromBytes([]byte{0x00, 0x00, 0x03, 0x08})
|
||||
assert.Error(t, err, "Should error on truncated data")
|
||||
@ -425,11 +425,11 @@ func TestInvalidLengthErrors(t *testing.T) {
|
||||
var deserializedSig global.BLS48581AddressedSignature
|
||||
err := deserializedSig.FromBytes(invalidData)
|
||||
assert.Error(t, err, "Should error on invalid data")
|
||||
|
||||
|
||||
// Test with empty data
|
||||
err = deserializedSig.FromBytes([]byte{})
|
||||
assert.Error(t, err, "Should error on empty data")
|
||||
|
||||
|
||||
// Test with truncated data (type prefix only)
|
||||
err = deserializedSig.FromBytes([]byte{0x00, 0x00, 0x03, 0x09})
|
||||
assert.Error(t, err, "Should error on truncated data")
|
||||
@ -517,7 +517,7 @@ func TestSerializationRoundTrip(t *testing.T) {
|
||||
name: "ProverConfirm",
|
||||
getObj: func() interface{} {
|
||||
return &global.ProverConfirm{
|
||||
Filter: []byte("filter-data"),
|
||||
Filters: [][]byte{[]byte("filter-data")},
|
||||
FrameNumber: 12345,
|
||||
PublicKeySignatureBLS48581: MockAddressedSignature(),
|
||||
}
|
||||
@ -534,7 +534,7 @@ func TestSerializationRoundTrip(t *testing.T) {
|
||||
name: "ProverReject",
|
||||
getObj: func() interface{} {
|
||||
return &global.ProverReject{
|
||||
Filter: []byte("filter-data"),
|
||||
Filters: [][]byte{[]byte("filter-data")},
|
||||
FrameNumber: 12345,
|
||||
PublicKeySignatureBLS48581: MockAddressedSignature(),
|
||||
}
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
package global
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"source.quilibrium.com/quilibrium/monorepo/protobufs"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/crypto"
|
||||
@ -259,10 +261,24 @@ func (p *ProverConfirm) FromBytes(data []byte) error {
|
||||
return errors.Wrap(err, "from bytes")
|
||||
}
|
||||
|
||||
filters := [][]byte{}
|
||||
if len(pb.Filters) > 0 {
|
||||
filters = pb.Filters
|
||||
} else {
|
||||
if bytes.Equal(pb.Filter, bytes.Repeat([]byte("reserved"), 4)) {
|
||||
return errors.Wrap(
|
||||
errors.New("filter cannot be reserved"),
|
||||
"from bytes",
|
||||
)
|
||||
}
|
||||
filters = append(filters, pb.Filter)
|
||||
}
|
||||
|
||||
// Copy only the data fields, runtime dependencies will be set separately
|
||||
p.Filter = converted.Filter
|
||||
p.Filters = filters
|
||||
p.FrameNumber = converted.FrameNumber
|
||||
p.PublicKeySignatureBLS48581 = converted.PublicKeySignatureBLS48581
|
||||
p.Filters = converted.Filters
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -297,8 +313,21 @@ func (p *ProverReject) FromBytes(data []byte) error {
|
||||
return errors.Wrap(err, "from bytes")
|
||||
}
|
||||
|
||||
filters := [][]byte{}
|
||||
if len(pb.Filters) > 0 {
|
||||
filters = pb.Filters
|
||||
} else {
|
||||
if bytes.Equal(pb.Filter, bytes.Repeat([]byte("reserved"), 4)) {
|
||||
return errors.Wrap(
|
||||
errors.New("filter cannot be reserved"),
|
||||
"from bytes",
|
||||
)
|
||||
}
|
||||
filters = append(filters, pb.Filter)
|
||||
}
|
||||
|
||||
// Copy only the data fields, runtime dependencies will be set separately
|
||||
p.Filter = converted.Filter
|
||||
p.Filters = filters
|
||||
p.FrameNumber = converted.FrameNumber
|
||||
p.PublicKeySignatureBLS48581 = converted.PublicKeySignatureBLS48581
|
||||
|
||||
|
||||
59
node/main.go
59
node/main.go
@ -126,6 +126,11 @@ var (
|
||||
false,
|
||||
"starts the db console mode (does not run nodes)",
|
||||
)
|
||||
dangerClearPending = flag.Bool(
|
||||
"danger-clear-pending",
|
||||
false,
|
||||
"clears pending states (dangerous action)",
|
||||
)
|
||||
|
||||
// *char flags
|
||||
blockchar = "█"
|
||||
@ -376,6 +381,60 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
if *dangerClearPending {
|
||||
db := store.NewPebbleDB(logger, nodeConfig.DB, 0)
|
||||
defer db.Close()
|
||||
consensusStore := store.NewPebbleConsensusStore(db, logger)
|
||||
state, err := consensusStore.GetConsensusState(nil)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
clockStore := store.NewPebbleClockStore(db, logger)
|
||||
qc, err := clockStore.GetQuorumCertificate(nil, state.FinalizedRank)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
err = clockStore.DeleteGlobalClockFrameRange(
|
||||
qc.FrameNumber+1,
|
||||
qc.FrameNumber+10000,
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
err = clockStore.DeleteQuorumCertificateRange(
|
||||
nil,
|
||||
qc.Rank+1,
|
||||
qc.Rank+10000,
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if state.LatestTimeout != nil {
|
||||
latestTCRank := state.LatestTimeout.Rank
|
||||
err = clockStore.DeleteTimeoutCertificateRange(
|
||||
nil,
|
||||
latestTCRank+1,
|
||||
latestTCRank+10000,
|
||||
latestTCRank,
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("pending entries cleared")
|
||||
return
|
||||
}
|
||||
|
||||
if *core == 0 {
|
||||
config.PrintLogo(*char)
|
||||
config.PrintVersion(uint8(*network), *char, *ver)
|
||||
|
||||
@ -9,7 +9,9 @@ import (
|
||||
"log"
|
||||
"math/big"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -22,6 +24,7 @@ import (
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/test/bufconn"
|
||||
"source.quilibrium.com/quilibrium/monorepo/bls48581"
|
||||
"source.quilibrium.com/quilibrium/monorepo/config"
|
||||
hgcrdt "source.quilibrium.com/quilibrium/monorepo/hypergraph"
|
||||
@ -772,6 +775,314 @@ func TestHypergraphPartialSync(t *testing.T) {
|
||||
assert.Greater(t, clientHas, 1, "mismatching vertex data entries")
|
||||
}
|
||||
|
||||
func TestHypergraphSyncWithConcurrentCommits(t *testing.T) {
|
||||
logger, _ := zap.NewDevelopment()
|
||||
enc := verenc.NewMPCitHVerifiableEncryptor(1)
|
||||
inclusionProver := bls48581.NewKZGInclusionProver(logger)
|
||||
|
||||
dataTree := buildDataTree(t, enc, inclusionProver)
|
||||
|
||||
serverPath := filepath.Join(t.TempDir(), "server")
|
||||
clientBase := filepath.Join(t.TempDir(), "clients")
|
||||
|
||||
serverDB := store.NewPebbleDB(logger, &config.DBConfig{Path: serverPath}, 0)
|
||||
defer serverDB.Close()
|
||||
|
||||
serverStore := store.NewPebbleHypergraphStore(
|
||||
&config.DBConfig{Path: serverPath},
|
||||
serverDB,
|
||||
logger,
|
||||
enc,
|
||||
inclusionProver,
|
||||
)
|
||||
|
||||
const clientCount = 100
|
||||
clientDBs := make([]*store.PebbleDB, clientCount)
|
||||
clientStores := make([]*store.PebbleHypergraphStore, clientCount)
|
||||
clientHGs := make([]*hgcrdt.HypergraphCRDT, clientCount)
|
||||
|
||||
serverHG := hgcrdt.NewHypergraph(
|
||||
logger.With(zap.String("side", "server")),
|
||||
serverStore,
|
||||
inclusionProver,
|
||||
[]int{},
|
||||
&tests.Nopthenticator{},
|
||||
)
|
||||
for i := 0; i < clientCount; i++ {
|
||||
clientPath := filepath.Join(clientBase, fmt.Sprintf("client-%d", i))
|
||||
clientDBs[i] = store.NewPebbleDB(logger, &config.DBConfig{Path: clientPath}, 0)
|
||||
clientStores[i] = store.NewPebbleHypergraphStore(
|
||||
&config.DBConfig{Path: clientPath},
|
||||
clientDBs[i],
|
||||
logger,
|
||||
enc,
|
||||
inclusionProver,
|
||||
)
|
||||
clientHGs[i] = hgcrdt.NewHypergraph(
|
||||
logger.With(zap.String("side", fmt.Sprintf("client-%d", i))),
|
||||
clientStores[i],
|
||||
inclusionProver,
|
||||
[]int{},
|
||||
&tests.Nopthenticator{},
|
||||
)
|
||||
}
|
||||
defer func() {
|
||||
for _, db := range clientDBs {
|
||||
if db != nil {
|
||||
db.Close()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Seed both hypergraphs with a baseline vertex so they share the shard key.
|
||||
domain := randomBytes32(t)
|
||||
initialVertex := hgcrdt.NewVertex(
|
||||
domain,
|
||||
randomBytes32(t),
|
||||
dataTree.Commit(inclusionProver, false),
|
||||
dataTree.GetSize(),
|
||||
)
|
||||
addVertices(
|
||||
t,
|
||||
serverStore,
|
||||
serverHG,
|
||||
dataTree,
|
||||
initialVertex,
|
||||
)
|
||||
for i := 0; i < clientCount; i++ {
|
||||
addVertices(
|
||||
t,
|
||||
clientStores[i],
|
||||
clientHGs[i],
|
||||
dataTree,
|
||||
initialVertex,
|
||||
)
|
||||
}
|
||||
|
||||
shardKey := application.GetShardKey(initialVertex)
|
||||
|
||||
// Start gRPC server backed by the server hypergraph.
|
||||
const bufSize = 1 << 20
|
||||
lis := bufconn.Listen(bufSize)
|
||||
|
||||
grpcServer := grpc.NewServer(
|
||||
grpc.ChainStreamInterceptor(func(
|
||||
srv interface{},
|
||||
ss grpc.ServerStream,
|
||||
info *grpc.StreamServerInfo,
|
||||
handler grpc.StreamHandler,
|
||||
) error {
|
||||
_, priv, _ := ed448.GenerateKey(rand.Reader)
|
||||
privKey, err := pcrypto.UnmarshalEd448PrivateKey(priv)
|
||||
require.NoError(t, err)
|
||||
|
||||
pub := privKey.GetPublic()
|
||||
peerID, err := peer.IDFromPublicKey(pub)
|
||||
require.NoError(t, err)
|
||||
|
||||
return handler(srv, &serverStream{
|
||||
ServerStream: ss,
|
||||
ctx: internal_grpc.NewContextWithPeerID(ss.Context(), peerID),
|
||||
})
|
||||
}),
|
||||
)
|
||||
protobufs.RegisterHypergraphComparisonServiceServer(
|
||||
grpcServer,
|
||||
serverHG,
|
||||
)
|
||||
defer grpcServer.Stop()
|
||||
|
||||
go func() {
|
||||
_ = grpcServer.Serve(lis)
|
||||
}()
|
||||
|
||||
dialClient := func() (*grpc.ClientConn, protobufs.HypergraphComparisonServiceClient) {
|
||||
dialer := func(context.Context, string) (net.Conn, error) {
|
||||
return lis.Dial()
|
||||
}
|
||||
|
||||
conn, err := grpc.DialContext(
|
||||
context.Background(),
|
||||
"bufnet",
|
||||
grpc.WithContextDialer(dialer),
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
return conn, protobufs.NewHypergraphComparisonServiceClient(conn)
|
||||
}
|
||||
|
||||
const rounds = 3
|
||||
for round := 0; round < rounds; round++ {
|
||||
c, _ := serverHG.Commit(uint64(round))
|
||||
fmt.Printf("svr commitment: %x\n", c[shardKey][0])
|
||||
|
||||
updates := generateVertices(
|
||||
t,
|
||||
domain,
|
||||
dataTree,
|
||||
inclusionProver,
|
||||
5,
|
||||
)
|
||||
|
||||
var syncWG sync.WaitGroup
|
||||
var serverWG sync.WaitGroup
|
||||
|
||||
syncWG.Add(clientCount)
|
||||
serverWG.Add(1)
|
||||
|
||||
for clientIdx := 0; clientIdx < clientCount; clientIdx++ {
|
||||
go func(idx int) {
|
||||
defer syncWG.Done()
|
||||
clientHG := clientHGs[idx]
|
||||
conn, client := dialClient()
|
||||
streamCtx, cancelStream := context.WithTimeout(
|
||||
context.Background(),
|
||||
100*time.Second,
|
||||
)
|
||||
stream, err := client.HyperStream(streamCtx)
|
||||
require.NoError(t, err)
|
||||
clientHG.Sync(
|
||||
stream,
|
||||
shardKey,
|
||||
protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_ADDS,
|
||||
)
|
||||
require.NoError(t, stream.CloseSend())
|
||||
cancelStream()
|
||||
conn.Close()
|
||||
|
||||
c, _ := clientHGs[idx].Commit(uint64(round))
|
||||
fmt.Printf("cli commitment: %x\n", c[shardKey][0])
|
||||
}(clientIdx)
|
||||
}
|
||||
|
||||
go func(round int) {
|
||||
defer serverWG.Done()
|
||||
logger.Info("server applying concurrent updates", zap.Int("round", round))
|
||||
addVertices(t, serverStore, serverHG, dataTree, updates...)
|
||||
logger.Info("server commit starting", zap.Int("round", round))
|
||||
_, err := serverHG.Commit(uint64(round + 1))
|
||||
require.NoError(t, err)
|
||||
logger.Info("server commit finished", zap.Int("round", round))
|
||||
}(round)
|
||||
|
||||
syncWG.Wait()
|
||||
serverWG.Wait()
|
||||
}
|
||||
|
||||
// Add additional server-only updates after the concurrent sync rounds.
|
||||
extraUpdates := generateVertices(t, domain, dataTree, inclusionProver, 3)
|
||||
addVertices(t, serverStore, serverHG, dataTree, extraUpdates...)
|
||||
|
||||
_, err := serverHG.Commit(100)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = serverHG.Commit(101)
|
||||
require.NoError(t, err)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(100)
|
||||
serverRoot := serverHG.GetVertexAddsSet(shardKey).GetTree().Commit(false)
|
||||
for i := 0; i < len(clientHGs); i++ {
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
|
||||
_, err = clientHGs[idx].Commit(100)
|
||||
require.NoError(t, err)
|
||||
// Final sync to catch up.
|
||||
conn, client := dialClient()
|
||||
streamCtx, cancelStream := context.WithTimeout(
|
||||
context.Background(),
|
||||
100*time.Second,
|
||||
)
|
||||
stream, err := client.HyperStream(streamCtx)
|
||||
require.NoError(t, err)
|
||||
err = clientHGs[idx].Sync(
|
||||
stream,
|
||||
shardKey,
|
||||
protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_ADDS,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stream.CloseSend())
|
||||
cancelStream()
|
||||
conn.Close()
|
||||
|
||||
_, err = clientHGs[idx].Commit(101)
|
||||
require.NoError(t, err)
|
||||
clientRoot := clientHGs[idx].GetVertexAddsSet(shardKey).GetTree().Commit(false)
|
||||
assert.Equal(t, serverRoot, clientRoot, "client should converge to server state")
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func buildDataTree(
|
||||
t *testing.T,
|
||||
enc *verenc.MPCitHVerifiableEncryptor,
|
||||
prover *bls48581.KZGInclusionProver,
|
||||
) *crypto.VectorCommitmentTree {
|
||||
t.Helper()
|
||||
|
||||
pub, _, _ := ed448.GenerateKey(rand.Reader)
|
||||
data := enc.Encrypt(make([]byte, 20), pub)
|
||||
verenc1 := data[0].Compress()
|
||||
tree := &crypto.VectorCommitmentTree{}
|
||||
for _, encrypted := range []application.Encrypted{verenc1} {
|
||||
bytes := encrypted.ToBytes()
|
||||
id := sha512.Sum512(bytes)
|
||||
tree.Insert(id[:], bytes, encrypted.GetStatement(), big.NewInt(int64(len(bytes))))
|
||||
}
|
||||
tree.Commit(prover, false)
|
||||
return tree
|
||||
}
|
||||
|
||||
func addVertices(
|
||||
t *testing.T,
|
||||
hStore *store.PebbleHypergraphStore,
|
||||
hg *hgcrdt.HypergraphCRDT,
|
||||
dataTree *crypto.VectorCommitmentTree,
|
||||
vertices ...application.Vertex,
|
||||
) {
|
||||
t.Helper()
|
||||
|
||||
txn, err := hStore.NewTransaction(false)
|
||||
require.NoError(t, err)
|
||||
for _, v := range vertices {
|
||||
id := v.GetID()
|
||||
require.NoError(t, hStore.SaveVertexTree(txn, id[:], dataTree))
|
||||
require.NoError(t, hg.AddVertex(txn, v))
|
||||
}
|
||||
require.NoError(t, txn.Commit())
|
||||
}
|
||||
|
||||
func generateVertices(
|
||||
t *testing.T,
|
||||
appAddress [32]byte,
|
||||
dataTree *crypto.VectorCommitmentTree,
|
||||
prover *bls48581.KZGInclusionProver,
|
||||
count int,
|
||||
) []application.Vertex {
|
||||
t.Helper()
|
||||
|
||||
verts := make([]application.Vertex, count)
|
||||
for i := 0; i < count; i++ {
|
||||
addr := randomBytes32(t)
|
||||
verts[i] = hgcrdt.NewVertex(
|
||||
appAddress,
|
||||
addr,
|
||||
dataTree.Commit(prover, false),
|
||||
dataTree.GetSize(),
|
||||
)
|
||||
}
|
||||
return verts
|
||||
}
|
||||
|
||||
func randomBytes32(t *testing.T) [32]byte {
|
||||
t.Helper()
|
||||
var out [32]byte
|
||||
_, err := rand.Read(out[:])
|
||||
require.NoError(t, err)
|
||||
return out
|
||||
}
|
||||
|
||||
func toUint32Slice(s []int32) []uint32 {
|
||||
o := []uint32{}
|
||||
for _, p := range s {
|
||||
|
||||
@ -1394,6 +1394,49 @@ func (p *PebbleClockStore) DeleteGlobalClockFrameRange(
|
||||
return errors.Wrap(err, "delete global clock frame range")
|
||||
}
|
||||
|
||||
func (p *PebbleClockStore) DeleteQuorumCertificateRange(
|
||||
filter []byte,
|
||||
minRank uint64,
|
||||
maxRank uint64,
|
||||
) error {
|
||||
err := p.db.DeleteRange(
|
||||
clockQuorumCertificateKey(minRank, filter),
|
||||
clockQuorumCertificateKey(maxRank, filter),
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "delete quorum certificate range")
|
||||
}
|
||||
|
||||
err = p.db.Set(
|
||||
clockQuorumCertificateLatestIndex(nil),
|
||||
binary.BigEndian.AppendUint64(nil, minRank-1),
|
||||
)
|
||||
|
||||
return errors.Wrap(err, "delete quorum certificate range")
|
||||
}
|
||||
|
||||
func (p *PebbleClockStore) DeleteTimeoutCertificateRange(
|
||||
filter []byte,
|
||||
minRank uint64,
|
||||
maxRank uint64,
|
||||
priorLatestRank uint64,
|
||||
) error {
|
||||
err := p.db.DeleteRange(
|
||||
clockTimeoutCertificateKey(minRank, filter),
|
||||
clockTimeoutCertificateKey(maxRank, filter),
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "delete timeout certificate range")
|
||||
}
|
||||
|
||||
err = p.db.Set(
|
||||
clockTimeoutCertificateLatestIndex(nil),
|
||||
binary.BigEndian.AppendUint64(nil, priorLatestRank),
|
||||
)
|
||||
|
||||
return errors.Wrap(err, "delete timeout certificate range")
|
||||
}
|
||||
|
||||
func (p *PebbleClockStore) DeleteShardClockFrameRange(
|
||||
filter []byte,
|
||||
fromFrameNumber uint64,
|
||||
|
||||
@ -30,6 +30,7 @@ type PebbleHypergraphStore struct {
|
||||
logger *zap.Logger
|
||||
verenc crypto.VerifiableEncryptor
|
||||
prover crypto.InclusionProver
|
||||
pebble *pebble.DB
|
||||
}
|
||||
|
||||
func NewPebbleHypergraphStore(
|
||||
@ -39,15 +40,50 @@ func NewPebbleHypergraphStore(
|
||||
verenc crypto.VerifiableEncryptor,
|
||||
prover crypto.InclusionProver,
|
||||
) *PebbleHypergraphStore {
|
||||
var pebbleHandle *pebble.DB
|
||||
if pdb, ok := db.(*PebbleDB); ok {
|
||||
pebbleHandle = pdb.DB()
|
||||
}
|
||||
|
||||
return &PebbleHypergraphStore{
|
||||
config,
|
||||
db,
|
||||
logger,
|
||||
verenc,
|
||||
prover,
|
||||
pebbleHandle,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PebbleHypergraphStore) NewSnapshot() (
|
||||
tries.TreeBackingStore,
|
||||
func(),
|
||||
error,
|
||||
) {
|
||||
if p.pebble == nil {
|
||||
return nil, nil, errors.New("hypergraph store does not support snapshots")
|
||||
}
|
||||
|
||||
snapshot := p.pebble.NewSnapshot()
|
||||
snapshotDB := &pebbleSnapshotDB{snap: snapshot}
|
||||
snapshotStore := NewPebbleHypergraphStore(
|
||||
p.config,
|
||||
snapshotDB,
|
||||
p.logger,
|
||||
p.verenc,
|
||||
p.prover,
|
||||
)
|
||||
snapshotStore.pebble = nil
|
||||
|
||||
release := func() {
|
||||
if err := snapshotDB.Close(); err != nil {
|
||||
p.logger.Warn("failed to close hypergraph snapshot", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
return snapshotStore, release, nil
|
||||
}
|
||||
|
||||
type PebbleVertexDataIterator struct {
|
||||
i store.Iterator
|
||||
db *PebbleHypergraphStore
|
||||
|
||||
@ -19,6 +19,10 @@ type PebbleDB struct {
|
||||
db *pebble.DB
|
||||
}
|
||||
|
||||
func (p *PebbleDB) DB() *pebble.DB {
|
||||
return p.db
|
||||
}
|
||||
|
||||
// pebbleMigrations contains ordered migration steps. New migrations append to
|
||||
// the end.
|
||||
var pebbleMigrations = []func(*pebble.Batch) error{
|
||||
@ -163,8 +167,8 @@ func (p *PebbleDB) migrate(logger *zap.Logger) error {
|
||||
for i := int(storedVersion); i < len(pebbleMigrations); i++ {
|
||||
logger.Warn(
|
||||
"performing pebble store migration",
|
||||
zap.Int("from_version", int(storedVersion)),
|
||||
zap.Int("to_version", int(storedVersion+1)),
|
||||
zap.Int("from_version", int(i)),
|
||||
zap.Int("to_version", int(i+1)),
|
||||
)
|
||||
if err := pebbleMigrations[i](batch); err != nil {
|
||||
batch.Close()
|
||||
@ -173,8 +177,8 @@ func (p *PebbleDB) migrate(logger *zap.Logger) error {
|
||||
}
|
||||
logger.Info(
|
||||
"migration step completed",
|
||||
zap.Int("from_version", int(storedVersion)),
|
||||
zap.Int("to_version", int(storedVersion+1)),
|
||||
zap.Int("from_version", int(i)),
|
||||
zap.Int("to_version", int(i+1)),
|
||||
)
|
||||
}
|
||||
|
||||
@ -325,6 +329,92 @@ func (t *PebbleTransaction) DeleteRange(
|
||||
|
||||
var _ store.Transaction = (*PebbleTransaction)(nil)
|
||||
|
||||
type pebbleSnapshotDB struct {
|
||||
snap *pebble.Snapshot
|
||||
}
|
||||
|
||||
func (p *pebbleSnapshotDB) Get(key []byte) ([]byte, io.Closer, error) {
|
||||
return p.snap.Get(key)
|
||||
}
|
||||
|
||||
func (p *pebbleSnapshotDB) Set(key, value []byte) error {
|
||||
return errors.New("pebble snapshot is read-only")
|
||||
}
|
||||
|
||||
func (p *pebbleSnapshotDB) Delete(key []byte) error {
|
||||
return errors.New("pebble snapshot is read-only")
|
||||
}
|
||||
|
||||
func (p *pebbleSnapshotDB) NewBatch(indexed bool) store.Transaction {
|
||||
return &snapshotTransaction{}
|
||||
}
|
||||
|
||||
func (p *pebbleSnapshotDB) NewIter(lowerBound []byte, upperBound []byte) (
|
||||
store.Iterator,
|
||||
error,
|
||||
) {
|
||||
return p.snap.NewIter(&pebble.IterOptions{
|
||||
LowerBound: lowerBound,
|
||||
UpperBound: upperBound,
|
||||
})
|
||||
}
|
||||
|
||||
func (p *pebbleSnapshotDB) Compact(start, end []byte, parallelize bool) error {
|
||||
return errors.New("pebble snapshot is read-only")
|
||||
}
|
||||
|
||||
func (p *pebbleSnapshotDB) Close() error {
|
||||
return p.snap.Close()
|
||||
}
|
||||
|
||||
func (p *pebbleSnapshotDB) DeleteRange(start, end []byte) error {
|
||||
return errors.New("pebble snapshot is read-only")
|
||||
}
|
||||
|
||||
func (p *pebbleSnapshotDB) CompactAll() error {
|
||||
return errors.New("pebble snapshot is read-only")
|
||||
}
|
||||
|
||||
var _ store.KVDB = (*pebbleSnapshotDB)(nil)
|
||||
|
||||
type snapshotTransaction struct{}
|
||||
|
||||
func (s *snapshotTransaction) Get(key []byte) ([]byte, io.Closer, error) {
|
||||
return nil, nil, errors.New("pebble snapshot transaction is read-only")
|
||||
}
|
||||
|
||||
func (s *snapshotTransaction) Set(key []byte, value []byte) error {
|
||||
return errors.New("pebble snapshot transaction is read-only")
|
||||
}
|
||||
|
||||
func (s *snapshotTransaction) Commit() error {
|
||||
return errors.New("pebble snapshot transaction is read-only")
|
||||
}
|
||||
|
||||
func (s *snapshotTransaction) Delete(key []byte) error {
|
||||
return errors.New("pebble snapshot transaction is read-only")
|
||||
}
|
||||
|
||||
func (s *snapshotTransaction) Abort() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *snapshotTransaction) NewIter(
|
||||
lowerBound []byte,
|
||||
upperBound []byte,
|
||||
) (store.Iterator, error) {
|
||||
return nil, errors.New("pebble snapshot transaction is read-only")
|
||||
}
|
||||
|
||||
func (s *snapshotTransaction) DeleteRange(
|
||||
lowerBound []byte,
|
||||
upperBound []byte,
|
||||
) error {
|
||||
return errors.New("pebble snapshot transaction is read-only")
|
||||
}
|
||||
|
||||
var _ store.Transaction = (*snapshotTransaction)(nil)
|
||||
|
||||
func rightAlign(data []byte, size int) []byte {
|
||||
l := len(data)
|
||||
|
||||
|
||||
@ -1396,9 +1396,11 @@ func (n *Nopthenticator) StreamInterceptor(
|
||||
info *grpc.StreamServerInfo,
|
||||
handler grpc.StreamHandler,
|
||||
) error {
|
||||
bytes := make([]byte, 20)
|
||||
rand.Read(bytes)
|
||||
ss = &authenticatedStream{
|
||||
ServerStream: ss,
|
||||
ctx: qgrpc.NewContextWithPeerID(ss.Context(), peer.ID("peer")),
|
||||
ctx: qgrpc.NewContextWithPeerID(ss.Context(), peer.ID(bytes)),
|
||||
}
|
||||
return handler(srv, ss)
|
||||
}
|
||||
@ -1410,12 +1412,16 @@ func (n *Nopthenticator) UnaryInterceptor(
|
||||
info *grpc.UnaryServerInfo,
|
||||
handler grpc.UnaryHandler,
|
||||
) (any, error) {
|
||||
return handler(qgrpc.NewContextWithPeerID(ctx, peer.ID("peer")), req)
|
||||
bytes := make([]byte, 20)
|
||||
rand.Read(bytes)
|
||||
return handler(qgrpc.NewContextWithPeerID(ctx, peer.ID(bytes)), req)
|
||||
}
|
||||
|
||||
// Identify implements channel.AuthenticationProvider.
|
||||
func (n *Nopthenticator) Identify(ctx context.Context) (peer.ID, error) {
|
||||
return peer.ID("peer"), nil
|
||||
bytes := make([]byte, 20)
|
||||
rand.Read(bytes)
|
||||
return peer.ID(bytes), nil
|
||||
}
|
||||
|
||||
var _ channel.AuthenticationProvider = (*Nopthenticator)(nil)
|
||||
|
||||
@ -1441,15 +1441,15 @@ func (p *ProverConfirm) ToCanonicalBytes() ([]byte, error) {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
|
||||
// Write filter
|
||||
// Write deprecated field for filter
|
||||
if err := binary.Write(
|
||||
buf,
|
||||
binary.BigEndian,
|
||||
uint32(len(p.Filter)),
|
||||
uint32(32),
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
if _, err := buf.Write(p.Filter); err != nil {
|
||||
if _, err := buf.Write(bytes.Repeat([]byte("reserved"), 4)); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
|
||||
@ -1480,6 +1480,27 @@ func (p *ProverConfirm) ToCanonicalBytes() ([]byte, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Write filters
|
||||
if err := binary.Write(
|
||||
buf,
|
||||
binary.BigEndian,
|
||||
uint32(len(p.Filters)),
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
for _, f := range p.Filters {
|
||||
if err := binary.Write(
|
||||
buf,
|
||||
binary.BigEndian,
|
||||
uint32(len(f)),
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
if _, err := buf.Write(f); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
@ -1543,6 +1564,40 @@ func (p *ProverConfirm) FromCanonicalBytes(data []byte) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Read filters
|
||||
filtersLen := uint32(0)
|
||||
if err := binary.Read(buf, binary.BigEndian, &filtersLen); err != nil {
|
||||
// Skip errors here, can be old messages
|
||||
return nil
|
||||
}
|
||||
|
||||
if filtersLen > 100 {
|
||||
return errors.Wrap(
|
||||
errors.New("invalid filters length"),
|
||||
"from canonical bytes",
|
||||
)
|
||||
}
|
||||
|
||||
p.Filters = make([][]byte, 0, filtersLen)
|
||||
for i := uint32(0); i < filtersLen; i++ {
|
||||
var filterLen uint32
|
||||
if err := binary.Read(buf, binary.BigEndian, &filterLen); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
if filterLen > 64 || filterLen < 32 {
|
||||
return errors.Wrap(
|
||||
errors.New("invalid filters length"),
|
||||
"from canonical bytes",
|
||||
)
|
||||
}
|
||||
|
||||
filterBytes := make([]byte, filterLen)
|
||||
if _, err := buf.Read(filterBytes); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
p.Filters = append(p.Filters, filterBytes)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1554,15 +1609,15 @@ func (p *ProverReject) ToCanonicalBytes() ([]byte, error) {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
|
||||
// Write filter
|
||||
// Write deprecated field for filter
|
||||
if err := binary.Write(
|
||||
buf,
|
||||
binary.BigEndian,
|
||||
uint32(len(p.Filter)),
|
||||
uint32(32),
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
if _, err := buf.Write(p.Filter); err != nil {
|
||||
if _, err := buf.Write(bytes.Repeat([]byte("reserved"), 4)); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
|
||||
@ -1593,6 +1648,27 @@ func (p *ProverReject) ToCanonicalBytes() ([]byte, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Write filters
|
||||
if err := binary.Write(
|
||||
buf,
|
||||
binary.BigEndian,
|
||||
uint32(len(p.Filters)),
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
for _, f := range p.Filters {
|
||||
if err := binary.Write(
|
||||
buf,
|
||||
binary.BigEndian,
|
||||
uint32(len(f)),
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
if _, err := buf.Write(f); err != nil {
|
||||
return nil, errors.Wrap(err, "to canonical bytes")
|
||||
}
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
@ -1656,6 +1732,40 @@ func (p *ProverReject) FromCanonicalBytes(data []byte) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Read filters
|
||||
filtersLen := uint32(0)
|
||||
if err := binary.Read(buf, binary.BigEndian, &filtersLen); err != nil {
|
||||
// Skip errors here, can be old messages
|
||||
return nil
|
||||
}
|
||||
|
||||
if filtersLen > 100 {
|
||||
return errors.Wrap(
|
||||
errors.New("invalid filters length"),
|
||||
"from canonical bytes",
|
||||
)
|
||||
}
|
||||
|
||||
p.Filters = make([][]byte, 0, filtersLen)
|
||||
for i := uint32(0); i < filtersLen; i++ {
|
||||
var filterLen uint32
|
||||
if err := binary.Read(buf, binary.BigEndian, &filterLen); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
if filterLen > 64 || filterLen < 32 {
|
||||
return errors.Wrap(
|
||||
errors.New("invalid filters length"),
|
||||
"from canonical bytes",
|
||||
)
|
||||
}
|
||||
|
||||
filterBytes := make([]byte, filterLen)
|
||||
if _, err := buf.Read(filterBytes); err != nil {
|
||||
return errors.Wrap(err, "from canonical bytes")
|
||||
}
|
||||
p.Filters = append(p.Filters, filterBytes)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -5015,6 +5125,11 @@ func (t *ProverConfirm) Validate() error {
|
||||
if err := t.PublicKeySignatureBls48581.Validate(); err != nil {
|
||||
return errors.Wrap(err, "public key signature")
|
||||
}
|
||||
for _, filter := range t.Filters {
|
||||
if len(filter) < 32 || len(filter) > 64 {
|
||||
return errors.Wrap(errors.New("invalid filter"), "validate")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -5031,6 +5146,11 @@ func (t *ProverReject) Validate() error {
|
||||
if err := t.PublicKeySignatureBls48581.Validate(); err != nil {
|
||||
return errors.Wrap(err, "public key signature")
|
||||
}
|
||||
for _, filter := range t.Filters {
|
||||
if len(filter) < 32 || len(filter) > 64 {
|
||||
return errors.Wrap(errors.New("invalid filter"), "validate")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -50,9 +50,10 @@ message ProverResume {
|
||||
}
|
||||
|
||||
message ProverConfirm {
|
||||
bytes filter = 1;
|
||||
bytes filter = 1 [deprecated = true];
|
||||
uint64 frame_number = 2;
|
||||
quilibrium.node.keys.pb.BLS48581AddressedSignature public_key_signature_bls48581 = 3;
|
||||
repeated bytes filters = 4;
|
||||
}
|
||||
|
||||
message ProverUpdate {
|
||||
@ -70,11 +71,11 @@ message ProverKick {
|
||||
quilibrium.node.application.pb.TraversalProof traversal_proof = 7;
|
||||
}
|
||||
|
||||
// ProverReject remains the same structure as before
|
||||
message ProverReject {
|
||||
bytes filter = 1;
|
||||
bytes filter = 1 [deprecated = true];
|
||||
uint64 frame_number = 2;
|
||||
quilibrium.node.keys.pb.BLS48581AddressedSignature public_key_signature_bls48581 = 3;
|
||||
repeated bytes filters = 4;
|
||||
}
|
||||
|
||||
message MessageRequest {
|
||||
|
||||
@ -1,31 +1,78 @@
|
||||
package hypergraph
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type SyncController struct {
|
||||
isSyncing atomic.Bool
|
||||
SyncStatus map[string]*SyncInfo
|
||||
globalSync atomic.Bool
|
||||
statusMu sync.RWMutex
|
||||
syncStatus map[string]*SyncInfo
|
||||
}
|
||||
|
||||
func (s *SyncController) TryEstablishSyncSession() bool {
|
||||
return !s.isSyncing.Swap(true)
|
||||
func (s *SyncController) TryEstablishSyncSession(peerID string) bool {
|
||||
if peerID == "" {
|
||||
return !s.globalSync.Swap(true)
|
||||
}
|
||||
|
||||
info := s.getOrCreate(peerID)
|
||||
return !info.inProgress.Swap(true)
|
||||
}
|
||||
|
||||
func (s *SyncController) EndSyncSession() {
|
||||
s.isSyncing.Store(false)
|
||||
func (s *SyncController) EndSyncSession(peerID string) {
|
||||
if peerID == "" {
|
||||
s.globalSync.Store(false)
|
||||
return
|
||||
}
|
||||
|
||||
s.statusMu.RLock()
|
||||
info := s.syncStatus[peerID]
|
||||
s.statusMu.RUnlock()
|
||||
if info != nil {
|
||||
info.inProgress.Store(false)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SyncController) GetStatus(peerID string) (*SyncInfo, bool) {
|
||||
s.statusMu.RLock()
|
||||
defer s.statusMu.RUnlock()
|
||||
info, ok := s.syncStatus[peerID]
|
||||
return info, ok
|
||||
}
|
||||
|
||||
func (s *SyncController) SetStatus(peerID string, info *SyncInfo) {
|
||||
s.statusMu.Lock()
|
||||
existing := s.syncStatus[peerID]
|
||||
if existing == nil {
|
||||
s.syncStatus[peerID] = info
|
||||
} else {
|
||||
existing.Unreachable = info.Unreachable
|
||||
existing.LastSynced = info.LastSynced
|
||||
}
|
||||
s.statusMu.Unlock()
|
||||
}
|
||||
|
||||
func (s *SyncController) getOrCreate(peerID string) *SyncInfo {
|
||||
s.statusMu.Lock()
|
||||
defer s.statusMu.Unlock()
|
||||
info, ok := s.syncStatus[peerID]
|
||||
if !ok {
|
||||
info = &SyncInfo{}
|
||||
s.syncStatus[peerID] = info
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
type SyncInfo struct {
|
||||
Unreachable bool
|
||||
LastSynced time.Time
|
||||
inProgress atomic.Bool
|
||||
}
|
||||
|
||||
func NewSyncController() *SyncController {
|
||||
return &SyncController{
|
||||
isSyncing: atomic.Bool{},
|
||||
SyncStatus: map[string]*SyncInfo{},
|
||||
syncStatus: map[string]*SyncInfo{},
|
||||
}
|
||||
}
|
||||
|
||||
@ -528,6 +528,7 @@ type TreeBackingStore interface {
|
||||
shardAddress []byte,
|
||||
) ([]byte, error)
|
||||
GetRootCommits(frameNumber uint64) (map[ShardKey][][]byte, error)
|
||||
NewSnapshot() (TreeBackingStore, func(), error)
|
||||
}
|
||||
|
||||
// LazyVectorCommitmentTree is a lazy-loaded (from a TreeBackingStore based
|
||||
@ -564,6 +565,74 @@ func (t *LazyVectorCommitmentTree) PruneUncoveredBranches() error {
|
||||
)
|
||||
}
|
||||
|
||||
func (t *LazyVectorCommitmentTree) CloneWithStore(
|
||||
store TreeBackingStore,
|
||||
) *LazyVectorCommitmentTree {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
t.treeMx.RLock()
|
||||
defer t.treeMx.RUnlock()
|
||||
|
||||
clone := *t
|
||||
clone.Store = store
|
||||
clone.Root = cloneLazyNode(t.Root, store)
|
||||
return &clone
|
||||
}
|
||||
|
||||
func cloneLazyNode(
|
||||
node LazyVectorCommitmentNode,
|
||||
store TreeBackingStore,
|
||||
) LazyVectorCommitmentNode {
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch n := node.(type) {
|
||||
case *LazyVectorCommitmentLeafNode:
|
||||
leaf := *n
|
||||
if n.Key != nil {
|
||||
leaf.Key = slices.Clone(n.Key)
|
||||
}
|
||||
if n.Value != nil {
|
||||
leaf.Value = slices.Clone(n.Value)
|
||||
}
|
||||
if n.HashTarget != nil {
|
||||
leaf.HashTarget = slices.Clone(n.HashTarget)
|
||||
}
|
||||
if n.Commitment != nil {
|
||||
leaf.Commitment = slices.Clone(n.Commitment)
|
||||
}
|
||||
if n.Size != nil {
|
||||
leaf.Size = new(big.Int).Set(n.Size)
|
||||
}
|
||||
leaf.Store = store
|
||||
return &leaf
|
||||
case *LazyVectorCommitmentBranchNode:
|
||||
branch := *n
|
||||
if n.Prefix != nil {
|
||||
branch.Prefix = slices.Clone(n.Prefix)
|
||||
}
|
||||
if n.FullPrefix != nil {
|
||||
branch.FullPrefix = slices.Clone(n.FullPrefix)
|
||||
}
|
||||
if n.Commitment != nil {
|
||||
branch.Commitment = slices.Clone(n.Commitment)
|
||||
}
|
||||
if n.Size != nil {
|
||||
branch.Size = new(big.Int).Set(n.Size)
|
||||
}
|
||||
for i := range branch.Children {
|
||||
branch.Children[i] = cloneLazyNode(n.Children[i], store)
|
||||
}
|
||||
branch.Store = store
|
||||
return &branch
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// InsertBranchSkeleton writes a branch node at fullPrefix with the given
|
||||
// metadata. prefix is the compressed prefix stored in the node, commitment
|
||||
// should be the source tree’s commitment for this branch node. size, leafCount,
|
||||
|
||||
Loading…
Reference in New Issue
Block a user