fix: tree deletion issue

This commit is contained in:
Cassandra Heart 2026-01-25 18:17:02 -06:00
parent ca08b25937
commit 4d0e4f8b96
No known key found for this signature in database
GPG Key ID: 371083BFA6C240AA
6 changed files with 1031 additions and 186 deletions

View File

@ -648,11 +648,12 @@ func (hg *HypergraphCRDT) syncSubtree(
) error {
tree := localSet.GetTree()
// Get local commitment at same path to check if subtrees match
// Get local node at same path
var localCommitment []byte
var localNode tries.LazyVectorCommitmentNode
if tree != nil && tree.Root != nil {
path := toIntSlice(serverBranch.FullPath)
localNode := getNodeAtPath(
localNode = getNodeAtPath(
logger,
tree.SetType,
tree.PhaseType,
@ -672,7 +673,7 @@ func (hg *HypergraphCRDT) syncSubtree(
}
}
// If commitments match, subtrees are identical - no sync needed
// If commitments match, subtrees are identical
if bytes.Equal(localCommitment, serverBranch.Commitment) {
logger.Debug("subtree matches",
zap.String("path", hex.EncodeToString(packPath(serverBranch.FullPath))),
@ -680,15 +681,101 @@ func (hg *HypergraphCRDT) syncSubtree(
return nil
}
// Commitments don't match - fetch all leaves from server.
// This is simpler and more reliable than branch-by-branch comparison,
// ensuring we get the complete correct state from the server.
logger.Debug("subtree mismatch, fetching all leaves from server",
zap.String("path", hex.EncodeToString(packPath(serverBranch.FullPath))),
zap.String("localCommitment", hex.EncodeToString(localCommitment)),
zap.String("serverCommitment", hex.EncodeToString(serverBranch.Commitment)),
)
return hg.fetchAndIntegrateLeaves(stream, shardKey, phaseSet, expectedRoot, serverBranch.FullPath, localSet, logger)
// If server node is a leaf or has no children, fetch all leaves
if serverBranch.IsLeaf || len(serverBranch.Children) == 0 {
return hg.fetchAndIntegrateLeaves(stream, shardKey, phaseSet, expectedRoot, serverBranch.FullPath, localSet, logger)
}
// If we have NO local data at this path, fetch all leaves directly.
// This avoids N round trips for N children when we need all of them anyway.
if localNode == nil {
logger.Debug("no local data at path, fetching all leaves directly",
zap.String("path", hex.EncodeToString(packPath(serverBranch.FullPath))),
zap.Int("serverChildren", len(serverBranch.Children)),
)
return hg.fetchAndIntegrateLeaves(stream, shardKey, phaseSet, expectedRoot, serverBranch.FullPath, localSet, logger)
}
// Compare children and recurse
localChildren := make(map[int32][]byte)
if tree != nil && tree.Root != nil {
path := toIntSlice(serverBranch.FullPath)
if branch, ok := localNode.(*tries.LazyVectorCommitmentBranchNode); ok {
for i := 0; i < 64; i++ {
child := branch.Children[i]
if child == nil {
child, _ = branch.Store.GetNodeByPath(
tree.SetType,
tree.PhaseType,
tree.ShardKey,
slices.Concat(path, []int{i}),
)
}
if child != nil {
childPath := slices.Concat(path, []int{i})
child = ensureCommittedNode(logger, tree, childPath, child)
switch c := child.(type) {
case *tries.LazyVectorCommitmentBranchNode:
localChildren[int32(i)] = c.Commitment
case *tries.LazyVectorCommitmentLeafNode:
localChildren[int32(i)] = c.Commitment
}
}
}
}
}
for _, serverChild := range serverBranch.Children {
localChildCommit := localChildren[serverChild.Index]
if bytes.Equal(localChildCommit, serverChild.Commitment) {
// Child matches, skip
continue
}
// Need to sync this child
childPath := append(slices.Clone(serverBranch.FullPath), serverChild.Index)
// Query for child branch
err := stream.Send(&protobufs.HypergraphSyncQuery{
Request: &protobufs.HypergraphSyncQuery_GetBranch{
GetBranch: &protobufs.HypergraphSyncGetBranchRequest{
ShardKey: shardKey,
PhaseSet: phaseSet,
Path: childPath,
ExpectedRoot: expectedRoot,
},
},
})
if err != nil {
return errors.Wrap(err, "send GetBranch for child")
}
resp, err := stream.Recv()
if err != nil {
return errors.Wrap(err, "receive GetBranch response for child")
}
if errResp := resp.GetError(); errResp != nil {
logger.Warn("error getting child branch",
zap.String("error", errResp.Message),
zap.String("path", hex.EncodeToString(packPath(childPath))),
)
continue
}
childBranch := resp.GetBranch()
if childBranch == nil {
continue
}
// Recurse
if err := hg.syncSubtree(stream, shardKey, phaseSet, expectedRoot, childBranch, localSet, logger); err != nil {
return err
}
}
return nil
}
func (hg *HypergraphCRDT) fetchAndIntegrateLeaves(

View File

@ -6,8 +6,10 @@ package crypto
import (
"bytes"
"crypto/rand"
"fmt"
"math/big"
mrand "math/rand"
"slices"
"testing"
"go.uber.org/zap"
@ -21,10 +23,18 @@ import (
// This test requires native code integration to be useful
var verEncr = verenc.NewMPCitHVerifiableEncryptor(1)
// testConfig returns a test config with in-memory database
func testConfig() *config.Config {
return &config.Config{
DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"},
}
}
func BenchmarkLazyVectorCommitmentTreeInsert(b *testing.B) {
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
store := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
cfg := testConfig()
db := store.NewPebbleDB(l, cfg, 0)
store := store.NewPebbleHypergraphStore(cfg.DB, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree := &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: store, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
addresses := [][]byte{}
@ -42,8 +52,9 @@ func BenchmarkLazyVectorCommitmentTreeInsert(b *testing.B) {
func BenchmarkLazyVectorCommitmentTreeCommit(b *testing.B) {
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
store := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
cfg := testConfig()
db := store.NewPebbleDB(l, cfg, 0)
store := store.NewPebbleHypergraphStore(cfg.DB, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree := &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: store, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
addresses := [][]byte{}
@ -61,8 +72,9 @@ func BenchmarkLazyVectorCommitmentTreeCommit(b *testing.B) {
func BenchmarkLazyVectorCommitmentTreeProve(b *testing.B) {
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
store := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
cfg := testConfig()
db := store.NewPebbleDB(l, cfg, 0)
store := store.NewPebbleHypergraphStore(cfg.DB, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree := &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: store, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
addresses := [][]byte{}
@ -81,8 +93,9 @@ func BenchmarkLazyVectorCommitmentTreeProve(b *testing.B) {
func BenchmarkLazyVectorCommitmentTreeVerify(b *testing.B) {
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
store := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
cfg := testConfig()
db := store.NewPebbleDB(l, cfg, 0)
store := store.NewPebbleHypergraphStore(cfg.DB, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree := &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: store, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
addresses := [][]byte{}
@ -105,8 +118,9 @@ func BenchmarkLazyVectorCommitmentTreeVerify(b *testing.B) {
func TestLazyVectorCommitmentTrees(t *testing.T) {
bls48581.Init()
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
cfg := testConfig()
db := store.NewPebbleDB(l, cfg, 0)
s := store.NewPebbleHypergraphStore(cfg.DB, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree := &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
// Test single insert
@ -136,8 +150,9 @@ func TestLazyVectorCommitmentTrees(t *testing.T) {
}
l, _ = zap.NewProduction()
db = store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
s = store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
cfg = testConfig()
db = store.NewPebbleDB(l, cfg, 0)
s = store.NewPebbleHypergraphStore(cfg.DB, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree = &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
// Test get on empty tree
@ -163,8 +178,9 @@ func TestLazyVectorCommitmentTrees(t *testing.T) {
}
l, _ = zap.NewProduction()
db = store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
s = store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
cfg = testConfig()
db = store.NewPebbleDB(l, cfg, 0)
s = store.NewPebbleHypergraphStore(cfg.DB, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree = &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
// Test delete on empty tree
@ -193,8 +209,9 @@ func TestLazyVectorCommitmentTrees(t *testing.T) {
}
l, _ = zap.NewProduction()
db = store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
s = store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
cfg = testConfig()
db = store.NewPebbleDB(l, cfg, 0)
s = store.NewPebbleHypergraphStore(cfg.DB, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree = &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
// Insert keys that share common prefix
@ -251,8 +268,9 @@ func TestLazyVectorCommitmentTrees(t *testing.T) {
}
l, _ = zap.NewProduction()
db = store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
s = store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
cfg = testConfig()
db = store.NewPebbleDB(l, cfg, 0)
s = store.NewPebbleHypergraphStore(cfg.DB, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree = &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
// Empty tree should be empty
@ -286,8 +304,9 @@ func TestLazyVectorCommitmentTrees(t *testing.T) {
}
l, _ = zap.NewProduction()
db = store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
s = store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
cfg = testConfig()
db = store.NewPebbleDB(l, cfg, 0)
s = store.NewPebbleHypergraphStore(cfg.DB, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree = &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
cmptree := &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
@ -429,8 +448,9 @@ func TestLazyVectorCommitmentTrees(t *testing.T) {
// make previous proofs invalid.
func TestTreeLeafReaddition(t *testing.T) {
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
cfg := testConfig()
db := store.NewPebbleDB(l, cfg, 0)
s := store.NewPebbleHypergraphStore(cfg.DB, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree := &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
// Generate 1000 random 64-byte keys and corresponding values
@ -510,8 +530,9 @@ func TestTreeLeafReaddition(t *testing.T) {
// but proofs still work after recommitting the tree.
func TestTreeRemoveReaddLeaf(t *testing.T) {
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
cfg := testConfig()
db := store.NewPebbleDB(l, cfg, 0)
s := store.NewPebbleHypergraphStore(cfg.DB, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree := &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
// Generate 1000 random 64-byte keys and corresponding values
@ -626,8 +647,9 @@ func TestTreeRemoveReaddLeaf(t *testing.T) {
// correct.
func TestTreeLongestBranch(t *testing.T) {
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
cfg := testConfig()
db := store.NewPebbleDB(l, cfg, 0)
s := store.NewPebbleHypergraphStore(cfg.DB, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree := &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
// Test with an empty tree
@ -798,8 +820,9 @@ func TestTreeLongestBranch(t *testing.T) {
// adding and removing leaves that cause branch creation due to shared prefixes.
func TestTreeBranchStructure(t *testing.T) {
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
cfg := testConfig()
db := store.NewPebbleDB(l, cfg, 0)
s := store.NewPebbleHypergraphStore(cfg.DB, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree := &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
// Create three base keys with 64-byte size
@ -831,7 +854,8 @@ func TestTreeBranchStructure(t *testing.T) {
// Commit the initial state
initialRoot := tree.Commit(false)
initialSize := tree.GetSize()
// Copy the size value to avoid aliasing (GetSize returns pointer to internal big.Int)
initialSize := new(big.Int).Set(tree.GetSize())
// Confirm initial state
if initialSize.Cmp(big.NewInt(3)) != 0 {
@ -1056,7 +1080,7 @@ func TestNonLazyProveVerify(t *testing.T) {
func TestDeleteLeafPromotion(t *testing.T) {
bls48581.Init()
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree := &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
@ -1162,7 +1186,7 @@ func TestDeleteLeafPromotion(t *testing.T) {
func TestDeleteBranchPromotion(t *testing.T) {
bls48581.Init()
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree := &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
@ -1320,7 +1344,7 @@ func TestDeleteWithLazyLoadedBranches(t *testing.T) {
l, _ := zap.NewProduction()
// First tree: insert data and commit to storage
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree1 := &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
@ -1478,7 +1502,7 @@ func TestDeleteWithLazyLoadedBranches(t *testing.T) {
func TestDeleteBranchCollapse(t *testing.T) {
bls48581.Init()
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree := &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
@ -1552,13 +1576,72 @@ func TestDeleteBranchCollapse(t *testing.T) {
}
}
// compareTreeBranches walks two trees and logs differences
func compareTreeBranches(t *testing.T, name1 string, node1 crypto.LazyVectorCommitmentNode, name2 string, node2 crypto.LazyVectorCommitmentNode, depth int) {
indent := ""
for i := 0; i < depth; i++ {
indent += " "
}
if node1 == nil && node2 == nil {
return
}
if node1 == nil {
t.Logf("%s%s is nil but %s is not", indent, name1, name2)
return
}
if node2 == nil {
t.Logf("%s%s is nil but %s is not", indent, name2, name1)
return
}
b1, ok1 := node1.(*crypto.LazyVectorCommitmentBranchNode)
b2, ok2 := node2.(*crypto.LazyVectorCommitmentBranchNode)
if ok1 != ok2 {
t.Logf("%sType mismatch: %s is branch=%v, %s is branch=%v", indent, name1, ok1, name2, ok2)
return
}
if !ok1 {
// Both are leaves
return
}
// Compare Prefix (only log if different or if there's also FullPrefix difference)
prefixMatch := slices.Equal(b1.Prefix, b2.Prefix)
fullPrefixMatch := slices.Equal(b1.FullPrefix, b2.FullPrefix)
if !prefixMatch || !fullPrefixMatch {
if !prefixMatch {
t.Logf("%sPrefix mismatch at depth %d:", indent, depth)
t.Logf("%s %s.Prefix = %v (len=%d)", indent, name1, b1.Prefix, len(b1.Prefix))
t.Logf("%s %s.Prefix = %v (len=%d)", indent, name2, b2.Prefix, len(b2.Prefix))
}
}
// Compare FullPrefix
if !slices.Equal(b1.FullPrefix, b2.FullPrefix) {
t.Logf("%sFullPrefix mismatch at depth %d:", indent, depth)
t.Logf("%s %s.FullPrefix = %v", indent, name1, b1.FullPrefix)
t.Logf("%s %s.FullPrefix = %v", indent, name2, b2.FullPrefix)
}
// Compare children
for i := 0; i < 64; i++ {
c1, c2 := b1.Children[i], b2.Children[i]
if c1 != nil || c2 != nil {
compareTreeBranches(t, fmt.Sprintf("%s.Child[%d]", name1, i), c1, fmt.Sprintf("%s.Child[%d]", name2, i), c2, depth+1)
}
}
}
// TestDeleteDeepNestedPrefixes tests deletion in a tree with deeply nested
// branch prefixes, ensuring prefix merging works correctly.
// Uses 5000 keys organized into groups with very long shared prefixes.
func TestDeleteDeepNestedPrefixes(t *testing.T) {
bls48581.Init()
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree := &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
@ -1609,6 +1692,11 @@ func TestDeleteDeepNestedPrefixes(t *testing.T) {
leaves1, depth1 := tree.GetMetadata()
t.Logf("Initial tree: %d leaves, longest branch: %d", leaves1, depth1)
// Debug: check initial root Prefix
if rootBranch, ok := tree.Root.(*crypto.LazyVectorCommitmentBranchNode); ok {
t.Logf("Initial root: Prefix=%v, FullPrefix=%v", rootBranch.Prefix, rootBranch.FullPrefix)
}
// Delete all keys from half the groups
// This exercises prefix merging as groups collapse
deletedGroups := numGroups / 2
@ -1662,6 +1750,11 @@ func TestDeleteDeepNestedPrefixes(t *testing.T) {
leaves2, depth2 := tree.GetMetadata()
t.Logf("After deletion: %d leaves, longest branch: %d", leaves2, depth2)
// Debug: check root Prefix before re-insert
if rootBranch, ok := tree.Root.(*crypto.LazyVectorCommitmentBranchNode); ok {
t.Logf("Root before re-insert: Prefix=%v, FullPrefix=%v", rootBranch.Prefix, rootBranch.FullPrefix)
}
// Now re-insert deleted keys and verify tree matches original
for g := 0; g < deletedGroups; g++ {
start := groupBoundaries[g]
@ -1677,6 +1770,32 @@ func TestDeleteDeepNestedPrefixes(t *testing.T) {
leaves3, depth3 := tree.GetMetadata()
t.Logf("After re-insert: %d leaves, longest branch: %d", leaves3, depth3)
// Debug: check final root Prefix
if rootBranch, ok := tree.Root.(*crypto.LazyVectorCommitmentBranchNode); ok {
t.Logf("Final root: Prefix=%v, FullPrefix=%v", rootBranch.Prefix, rootBranch.FullPrefix)
}
// Build a fresh tree with all keys to compare structure
db2 := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store2"}}, 0)
s2 := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true}, db2, l, verEncr, bls48581.NewKZGInclusionProver(l))
freshTree := &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: s2, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
for i, key := range keys {
if err := freshTree.Insert(nil, key, values[i], nil, big.NewInt(1)); err != nil {
t.Fatalf("Failed to insert key %d in fresh tree: %v", i, err)
}
}
rootFresh := freshTree.Commit(false)
t.Logf("Fresh tree root: %x", rootFresh[:16])
// Compare re-inserted tree to fresh tree
if !bytes.Equal(root3, rootFresh) {
t.Logf("Re-inserted tree differs from fresh tree!")
t.Logf(" Re-inserted: %x", root3[:16])
t.Logf(" Fresh: %x", rootFresh[:16])
// Walk both trees to find differences
compareTreeBranches(t, "restored", tree.Root, "fresh", freshTree.Root, 0)
}
// The tree structure should be equivalent (same root commitment)
if !bytes.Equal(root1, root3) {
t.Fatalf("Root mismatch after delete-and-reinsert cycle\nOriginal: %x\nRestored: %x", root1, root3)
@ -1701,7 +1820,7 @@ func TestDeleteDeepNestedPrefixes(t *testing.T) {
func TestDeleteMultipleChildrenRemaining(t *testing.T) {
bls48581.Init()
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree := &crypto.LazyVectorCommitmentTree{InclusionProver: bls48581.NewKZGInclusionProver(l), Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: crypto.ShardKey{}}
@ -1834,7 +1953,7 @@ func TestDeleteMultipleChildrenRemaining(t *testing.T) {
func TestDeleteBranchPromotionFullPrefixBug(t *testing.T) {
bls48581.Init()
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree := &crypto.LazyVectorCommitmentTree{
InclusionProver: bls48581.NewKZGInclusionProver(l),
@ -2056,7 +2175,7 @@ func TestDeleteBranchPromotionFullPrefixBug(t *testing.T) {
func TestDeleteBranchPromotionDeepNesting(t *testing.T) {
bls48581.Init()
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
tree := &crypto.LazyVectorCommitmentTree{
InclusionProver: bls48581.NewKZGInclusionProver(l),
@ -2220,7 +2339,7 @@ func TestDeleteBranchPromotionDeepNesting(t *testing.T) {
func TestBranchPromotionPathIndexCorruption(t *testing.T) {
bls48581.Init()
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/pathidx"}, 0)
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/pathidx"}}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
// Create initial tree

View File

@ -21,7 +21,7 @@ var verencr = &mocks.MockVerifiableEncryptor{}
func TestLazyVectorCommitmentTreesNoBLS(t *testing.T) {
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true}, db, l, verencr, nil)
tree := &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
@ -52,7 +52,7 @@ func TestLazyVectorCommitmentTreesNoBLS(t *testing.T) {
}
l, _ = zap.NewProduction()
db = store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db = store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s = store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
tree = &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
@ -79,7 +79,7 @@ func TestLazyVectorCommitmentTreesNoBLS(t *testing.T) {
}
l, _ = zap.NewProduction()
db = store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db = store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s = store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
tree = &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
@ -109,7 +109,7 @@ func TestLazyVectorCommitmentTreesNoBLS(t *testing.T) {
}
l, _ = zap.NewProduction()
db = store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db = store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s = store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
tree = &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
@ -167,7 +167,7 @@ func TestLazyVectorCommitmentTreesNoBLS(t *testing.T) {
}
l, _ = zap.NewProduction()
db = store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db = store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s = store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
tree = &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
@ -181,7 +181,7 @@ func TestLazyVectorCommitmentTreesNoBLS(t *testing.T) {
tree.Delete(nil, []byte("key1"))
l, _ = zap.NewProduction()
db = store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db = store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s = store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
tree = &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
cmptree := &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
@ -312,7 +312,7 @@ func TestLazyVectorCommitmentTreesNoBLS(t *testing.T) {
// increase the Size metadata
func TestTreeLeafReadditionNoBLS(t *testing.T) {
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
tree := &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
@ -368,7 +368,7 @@ func TestTreeLeafReadditionNoBLS(t *testing.T) {
// decreases and increases the size metadata appropriately
func TestTreeRemoveReaddLeafNoBLS(t *testing.T) {
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
tree := &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
@ -437,7 +437,7 @@ func TestTreeRemoveReaddLeafNoBLS(t *testing.T) {
// correct.
func TestTreeLongestBranchNoBLS(t *testing.T) {
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
tree := &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
@ -593,7 +593,7 @@ func TestTreeLongestBranchNoBLS(t *testing.T) {
// where branch merging occurs during deletion.
func TestTreeNoStaleNodesAfterDeleteNoBLS(t *testing.T) {
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
shardKey := tries.ShardKey{}
tree := &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: shardKey}
@ -710,7 +710,7 @@ func TestTreeNoStaleNodesAfterDeleteNoBLS(t *testing.T) {
// This tests the FullPrefix update bug hypothesis.
func TestTreeNoStaleNodesAfterBranchMergeNoBLS(t *testing.T) {
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
shardKey := tries.ShardKey{}
tree := &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: shardKey}
@ -807,7 +807,7 @@ func TestTreeNoStaleNodesAfterBranchMergeNoBLS(t *testing.T) {
// TestTreeNoStaleNodesAfterMassDelete tests stale node detection with many keys
func TestTreeNoStaleNodesAfterMassDeleteNoBLS(t *testing.T) {
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
shardKey := tries.ShardKey{}
tree := &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: shardKey}
@ -935,7 +935,7 @@ func countReachableNodes(t *testing.T, tree *tries.LazyVectorCommitmentTree) int
// adding and removing leaves that cause branch creation due to shared prefixes.
func TestTreeBranchStructureNoBLS(t *testing.T) {
l, _ := zap.NewProduction()
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
tree := &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
@ -966,7 +966,8 @@ func TestTreeBranchStructureNoBLS(t *testing.T) {
}
}
initialSize := tree.GetSize()
// Copy the size value to avoid aliasing (GetSize returns pointer to internal big.Int)
initialSize := new(big.Int).Set(tree.GetSize())
// Confirm initial state
if initialSize.Cmp(big.NewInt(3)) != 0 {

View File

@ -15,6 +15,7 @@ import (
"net"
"os"
"slices"
"strings"
"sync"
"testing"
"time"
@ -2759,9 +2760,9 @@ func TestMainnetBlossomsubFrameReceptionAndHypersync(t *testing.T) {
}
peerInfoMu.Unlock()
peerIdStr := peer.ID(peerInfoMsg.PeerId).String()
t.Logf("Received peer info for %s with %d reachability entries",
peerIdStr, len(reachability))
// peerIdStr := peer.ID(peerInfoMsg.PeerId).String()
// t.Logf("Received peer info for %s with %d reachability entries",
// peerIdStr, len(reachability))
case protobufs.KeyRegistryType:
keyRegistry := &protobufs.KeyRegistry{}
@ -2803,8 +2804,8 @@ func TestMainnetBlossomsubFrameReceptionAndHypersync(t *testing.T) {
keyRegistryMap[string(proverAddress)] = identityPeerID
keyRegistryMu.Unlock()
t.Logf("Received key registry: prover %x -> peer %s",
proverAddress, identityPeerID.String())
// t.Logf("Received key registry: prover %x -> peer %s",
// proverAddress, identityPeerID.String())
}
return nil
@ -3109,6 +3110,70 @@ waitLoop:
client := protobufs.NewHypergraphComparisonServiceClient(conn)
// First, query the server's root commitment to verify what it claims to have
t.Log("Querying server's root commitment before sync...")
{
diagStream, err := client.PerformSync(context.Background())
require.NoError(t, err)
shardKeyBytes := slices.Concat(proverShardKey.L1[:], proverShardKey.L2[:])
err = diagStream.Send(&protobufs.HypergraphSyncQuery{
Request: &protobufs.HypergraphSyncQuery_GetBranch{
GetBranch: &protobufs.HypergraphSyncGetBranchRequest{
ShardKey: shardKeyBytes,
PhaseSet: protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_ADDS,
Path: []int32{},
},
},
})
require.NoError(t, err)
resp, err := diagStream.Recv()
require.NoError(t, err)
if errResp := resp.GetError(); errResp != nil {
t.Logf("Server error on root query: %s", errResp.Message)
} else if branch := resp.GetBranch(); branch != nil {
t.Logf("Server root commitment: %x", branch.Commitment)
t.Logf("Server root path: %v", branch.FullPath)
t.Logf("Server root isLeaf: %v", branch.IsLeaf)
t.Logf("Server root children count: %d", len(branch.Children))
t.Logf("Server root leafCount: %d", branch.LeafCount)
t.Logf("Frame expected root: %x", expectedRoot)
if !bytes.Equal(branch.Commitment, expectedRoot) {
t.Logf("WARNING: Server root commitment does NOT match frame expected root!")
} else {
t.Logf("OK: Server root commitment matches frame expected root")
}
// Log each child's commitment
for _, child := range branch.Children {
t.Logf(" Server child[%d]: commitment=%x", child.Index, child.Commitment)
}
// Drill into child[37] specifically to compare
child37Path := append(slices.Clone(branch.FullPath), 37)
err = diagStream.Send(&protobufs.HypergraphSyncQuery{
Request: &protobufs.HypergraphSyncQuery_GetBranch{
GetBranch: &protobufs.HypergraphSyncGetBranchRequest{
ShardKey: shardKeyBytes,
PhaseSet: protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_ADDS,
Path: child37Path,
},
},
})
if err == nil {
resp37, err := diagStream.Recv()
if err == nil {
if b37 := resp37.GetBranch(); b37 != nil {
t.Logf("Server child[37] details: path=%v, leafCount=%d, isLeaf=%v, childrenCount=%d",
b37.FullPath, b37.LeafCount, b37.IsLeaf, len(b37.Children))
}
}
}
}
_ = diagStream.CloseSend()
}
// Perform hypersync on all phases
t.Log("Performing hypersync on prover shard...")
@ -3139,27 +3204,541 @@ waitLoop:
t.Logf("Client prover root after sync: %x", clientProverRoot)
t.Logf("Expected prover root from frame: %x", expectedRoot)
// Diagnostic: show client tree structure
clientTreeForDiag := clientHG.GetVertexAddsSet(proverShardKey).GetTree()
if clientTreeForDiag != nil && clientTreeForDiag.Root != nil {
switch n := clientTreeForDiag.Root.(type) {
case *tries.LazyVectorCommitmentBranchNode:
t.Logf("Client root is BRANCH: path=%v, commitment=%x, leafCount=%d", n.FullPrefix, n.Commitment, n.LeafCount)
childCount := 0
for i := 0; i < 64; i++ {
if n.Children[i] != nil {
childCount++
child := n.Children[i]
switch c := child.(type) {
case *tries.LazyVectorCommitmentBranchNode:
t.Logf(" Client child[%d]: BRANCH commitment=%x, leafCount=%d", i, c.Commitment, c.LeafCount)
case *tries.LazyVectorCommitmentLeafNode:
t.Logf(" Client child[%d]: LEAF commitment=%x", i, c.Commitment)
}
}
}
t.Logf("Client root in-memory children: %d", childCount)
case *tries.LazyVectorCommitmentLeafNode:
t.Logf("Client root is LEAF: key=%x, commitment=%x", n.Key, n.Commitment)
}
} else {
t.Logf("Client tree root is nil")
}
// Deep dive into child[37] - get server leaves to compare
t.Log("=== Deep dive into child[37] ===")
var serverChild37Leaves []*protobufs.LeafData
{
diagStream, err := client.PerformSync(context.Background())
if err != nil {
t.Logf("Failed to create diag stream: %v", err)
} else {
shardKeyBytes := slices.Concat(proverShardKey.L1[:], proverShardKey.L2[:])
// Correct path: root is at [...60], child[37] is at [...60, 37]
child37Path := []int32{63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 60, 37}
err = diagStream.Send(&protobufs.HypergraphSyncQuery{
Request: &protobufs.HypergraphSyncQuery_GetLeaves{
GetLeaves: &protobufs.HypergraphSyncGetLeavesRequest{
ShardKey: shardKeyBytes,
PhaseSet: protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_ADDS,
Path: child37Path,
MaxLeaves: 1000,
},
},
})
if err != nil {
t.Logf("Failed to send GetLeaves request: %v", err)
} else {
resp, err := diagStream.Recv()
if err != nil {
t.Logf("Failed to receive GetLeaves response: %v", err)
} else if errResp := resp.GetError(); errResp != nil {
t.Logf("Server returned error: %s", errResp.Message)
} else if leaves := resp.GetLeaves(); leaves != nil {
serverChild37Leaves = leaves.Leaves
t.Logf("Server child[37] leaves: count=%d, total=%d", len(leaves.Leaves), leaves.TotalLeaves)
// Show first few leaf keys
for i, leaf := range leaves.Leaves {
if i < 5 {
t.Logf(" Server leaf[%d]: key=%x (len=%d)", i, leaf.Key[:min(32, len(leaf.Key))], len(leaf.Key))
}
}
if len(leaves.Leaves) > 5 {
t.Logf(" ... and %d more leaves", len(leaves.Leaves)-5)
}
} else {
t.Logf("Server returned unexpected response type")
}
}
_ = diagStream.CloseSend()
}
}
// Get all client leaves and compare with server child[37] leaves
clientTree := clientHG.GetVertexAddsSet(proverShardKey).GetTree()
allClientLeaves := tries.GetAllLeaves(
clientTree.SetType,
clientTree.PhaseType,
clientTree.ShardKey,
clientTree.Root,
)
t.Logf("Total client leaves: %d", len(allClientLeaves))
// Build map of client leaf keys -> values
clientLeafMap := make(map[string][]byte)
for _, leaf := range allClientLeaves {
if leaf != nil {
clientLeafMap[string(leaf.Key)] = leaf.Value
}
}
// Check which server child[37] leaves are in client and compare values
if len(serverChild37Leaves) > 0 {
found := 0
missing := 0
valueMismatch := 0
for _, serverLeaf := range serverChild37Leaves {
clientValue, exists := clientLeafMap[string(serverLeaf.Key)]
if !exists {
if missing < 3 {
t.Logf(" Missing server leaf: key=%x", serverLeaf.Key[:min(32, len(serverLeaf.Key))])
}
missing++
} else {
found++
if !bytes.Equal(clientValue, serverLeaf.Value) {
if valueMismatch < 5 {
t.Logf(" VALUE MISMATCH for key=%x: serverLen=%d, clientLen=%d",
serverLeaf.Key[:min(32, len(serverLeaf.Key))],
len(serverLeaf.Value), len(clientValue))
t.Logf(" Server value prefix: %x", serverLeaf.Value[:min(64, len(serverLeaf.Value))])
t.Logf(" Client value prefix: %x", clientValue[:min(64, len(clientValue))])
}
valueMismatch++
}
}
}
t.Logf("Server child[37] leaves in client: found=%d, missing=%d, valueMismatch=%d", found, missing, valueMismatch)
}
// Compare branch structure for child[37]
t.Log("=== Comparing branch structure for child[37] ===")
{
// Query server's child[37] branch info
diagStream, err := client.PerformSync(context.Background())
if err == nil {
shardKeyBytes := slices.Concat(proverShardKey.L1[:], proverShardKey.L2[:])
child37Path := []int32{63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 60, 37}
err = diagStream.Send(&protobufs.HypergraphSyncQuery{
Request: &protobufs.HypergraphSyncQuery_GetBranch{
GetBranch: &protobufs.HypergraphSyncGetBranchRequest{
ShardKey: shardKeyBytes,
PhaseSet: protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_ADDS,
Path: child37Path,
},
},
})
if err == nil {
resp, err := diagStream.Recv()
if err == nil {
if branch := resp.GetBranch(); branch != nil {
t.Logf("Server child[37] branch: path=%v, commitment=%x, children=%d",
branch.FullPath, branch.Commitment[:min(32, len(branch.Commitment))], len(branch.Children))
// Show first few children with their commitments
for i, child := range branch.Children {
if i < 10 {
t.Logf(" Server sub-child[%d]: commitment=%x", child.Index, child.Commitment[:min(32, len(child.Commitment))])
}
}
// Now check client's child[37] branch structure
if clientTree != nil && clientTree.Root != nil {
if rootBranch, ok := clientTree.Root.(*tries.LazyVectorCommitmentBranchNode); ok {
if child37 := rootBranch.Children[37]; child37 != nil {
if clientChild37Branch, ok := child37.(*tries.LazyVectorCommitmentBranchNode); ok {
t.Logf("Client child[37] branch: path=%v, commitment=%x, leafCount=%d",
clientChild37Branch.FullPrefix, clientChild37Branch.Commitment[:min(32, len(clientChild37Branch.Commitment))], clientChild37Branch.LeafCount)
// Count and show client's children
clientChildCount := 0
for i := 0; i < 64; i++ {
if clientChild37Branch.Children[i] != nil {
if clientChildCount < 10 {
switch c := clientChild37Branch.Children[i].(type) {
case *tries.LazyVectorCommitmentBranchNode:
t.Logf(" Client sub-child[%d]: BRANCH commitment=%x", i, c.Commitment[:min(32, len(c.Commitment))])
case *tries.LazyVectorCommitmentLeafNode:
t.Logf(" Client sub-child[%d]: LEAF commitment=%x", i, c.Commitment[:min(32, len(c.Commitment))])
}
}
clientChildCount++
}
}
t.Logf("Client child[37] has %d in-memory children, server has %d", clientChildCount, len(branch.Children))
} else if clientChild37Leaf, ok := child37.(*tries.LazyVectorCommitmentLeafNode); ok {
t.Logf("Client child[37] is LEAF: key=%x, commitment=%x",
clientChild37Leaf.Key[:min(32, len(clientChild37Leaf.Key))], clientChild37Leaf.Commitment[:min(32, len(clientChild37Leaf.Commitment))])
}
} else {
t.Logf("Client has NO child at index 37")
}
}
}
}
}
}
_ = diagStream.CloseSend()
}
}
// Recursive comparison function to drill into mismatches
var recursiveCompare func(path []int32, depth int)
recursiveCompare = func(path []int32, depth int) {
if depth > 10 {
t.Logf("DEPTH LIMIT REACHED at path=%v", path)
return
}
indent := strings.Repeat(" ", depth)
// Get server branch at path
diagStream, err := client.PerformSync(context.Background())
if err != nil {
t.Logf("%sERROR creating stream: %v", indent, err)
return
}
defer diagStream.CloseSend()
shardKeyBytes := slices.Concat(proverShardKey.L1[:], proverShardKey.L2[:])
err = diagStream.Send(&protobufs.HypergraphSyncQuery{
Request: &protobufs.HypergraphSyncQuery_GetBranch{
GetBranch: &protobufs.HypergraphSyncGetBranchRequest{
ShardKey: shardKeyBytes,
PhaseSet: protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_ADDS,
Path: path,
},
},
})
if err != nil {
t.Logf("%sERROR sending request: %v", indent, err)
return
}
resp, err := diagStream.Recv()
if err != nil {
t.Logf("%sERROR receiving response: %v", indent, err)
return
}
if errResp := resp.GetError(); errResp != nil {
t.Logf("%sSERVER ERROR: %s", indent, errResp.Message)
return
}
serverBranch := resp.GetBranch()
if serverBranch == nil {
t.Logf("%sNO BRANCH in response", indent)
return
}
t.Logf("%sSERVER: path=%v, fullPath=%v, leafCount=%d, children=%d, isLeaf=%v",
indent, path, serverBranch.FullPath, serverBranch.LeafCount,
len(serverBranch.Children), serverBranch.IsLeaf)
t.Logf("%sSERVER commitment: %x", indent, serverBranch.Commitment[:min(48, len(serverBranch.Commitment))])
// Get corresponding client node - convert []int32 to []int
pathInt := make([]int, len(path))
for i, p := range path {
pathInt[i] = int(p)
}
clientNode, err := clientTree.GetByPath(pathInt)
if err != nil {
t.Logf("%sERROR getting client node: %v", indent, err)
return
}
if clientNode == nil {
t.Logf("%sCLIENT: NO NODE at path=%v", indent, path)
return
}
switch cn := clientNode.(type) {
case *tries.LazyVectorCommitmentBranchNode:
t.Logf("%sCLIENT: path=%v, fullPrefix=%v, leafCount=%d, commitment=%x",
indent, path, cn.FullPrefix, cn.LeafCount, cn.Commitment[:min(48, len(cn.Commitment))])
// Check if server is leaf but client is branch
if serverBranch.IsLeaf {
t.Logf("%s*** TYPE MISMATCH: server is LEAF, client is BRANCH ***", indent)
t.Logf("%s SERVER: fullPath=%v, isLeaf=%v, commitment=%x",
indent, serverBranch.FullPath, serverBranch.IsLeaf, serverBranch.Commitment[:min(48, len(serverBranch.Commitment))])
return
}
// Check if FullPath differs from FullPrefix
serverPathStr := fmt.Sprintf("%v", serverBranch.FullPath)
clientPathStr := fmt.Sprintf("%v", cn.FullPrefix)
if serverPathStr != clientPathStr {
t.Logf("%s*** PATH MISMATCH: server fullPath=%v, client fullPrefix=%v ***",
indent, serverBranch.FullPath, cn.FullPrefix)
}
// Check commitment match
if !bytes.Equal(serverBranch.Commitment, cn.Commitment) {
t.Logf("%s*** COMMITMENT MISMATCH ***", indent)
// Compare children
serverChildren := make(map[int32][]byte)
for _, sc := range serverBranch.Children {
serverChildren[sc.Index] = sc.Commitment
}
for i := int32(0); i < 64; i++ {
serverCommit := serverChildren[i]
var clientCommit []byte
clientChild := cn.Children[i]
// Lazy-load client child from store if needed
if clientChild == nil && len(serverCommit) > 0 {
childPathInt := make([]int, len(cn.FullPrefix)+1)
for j, p := range cn.FullPrefix {
childPathInt[j] = p
}
childPathInt[len(cn.FullPrefix)] = int(i)
clientChild, _ = clientTree.Store.GetNodeByPath(
clientTree.SetType,
clientTree.PhaseType,
clientTree.ShardKey,
childPathInt,
)
}
if clientChild != nil {
switch cc := clientChild.(type) {
case *tries.LazyVectorCommitmentBranchNode:
clientCommit = cc.Commitment
case *tries.LazyVectorCommitmentLeafNode:
clientCommit = cc.Commitment
}
}
if len(serverCommit) > 0 || len(clientCommit) > 0 {
if !bytes.Equal(serverCommit, clientCommit) {
t.Logf("%s CHILD[%d] MISMATCH: server=%x, client=%x",
indent, i,
serverCommit[:min(24, len(serverCommit))],
clientCommit[:min(24, len(clientCommit))])
// Recurse into mismatched child
childPath := append(slices.Clone(serverBranch.FullPath), i)
recursiveCompare(childPath, depth+1)
}
}
}
}
case *tries.LazyVectorCommitmentLeafNode:
t.Logf("%sCLIENT: LEAF key=%x, commitment=%x",
indent, cn.Key[:min(32, len(cn.Key))], cn.Commitment[:min(48, len(cn.Commitment))])
t.Logf("%sCLIENT LEAF DETAIL: fullKey=%x, value len=%d",
indent, cn.Key, len(cn.Value))
// Compare with server commitment
if serverBranch.IsLeaf {
if !bytes.Equal(serverBranch.Commitment, cn.Commitment) {
t.Logf("%s*** LEAF COMMITMENT MISMATCH ***", indent)
t.Logf("%s SERVER commitment: %x", indent, serverBranch.Commitment)
t.Logf("%s CLIENT commitment: %x", indent, cn.Commitment)
t.Logf("%s SERVER fullPath: %v", indent, serverBranch.FullPath)
// The key in LazyVectorCommitmentLeafNode doesn't have a "fullPrefix" directly -
// the path is determined by the key bytes
}
} else {
t.Logf("%s*** TYPE MISMATCH: server is branch, client is leaf ***", indent)
}
}
}
// Start recursive comparison at root
t.Log("=== RECURSIVE MISMATCH ANALYSIS ===")
rootPath := []int32{63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 60}
recursiveCompare(rootPath, 0)
// Now let's drill into the specific mismatched subtree to see the leaves
t.Log("=== LEAF-LEVEL ANALYSIS for [...60 37 1 50] ===")
{
// Get server leaves under this subtree
diagStream, err := client.PerformSync(context.Background())
if err == nil {
shardKeyBytes := slices.Concat(proverShardKey.L1[:], proverShardKey.L2[:])
mismatchPath := []int32{63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 60, 37, 1, 50}
err = diagStream.Send(&protobufs.HypergraphSyncQuery{
Request: &protobufs.HypergraphSyncQuery_GetLeaves{
GetLeaves: &protobufs.HypergraphSyncGetLeavesRequest{
ShardKey: shardKeyBytes,
PhaseSet: protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_ADDS,
Path: mismatchPath,
MaxLeaves: 100,
},
},
})
if err == nil {
resp, err := diagStream.Recv()
if err == nil {
if leaves := resp.GetLeaves(); leaves != nil {
t.Logf("SERVER leaves under [...60 37 1 50]: count=%d, total=%d",
len(leaves.Leaves), leaves.TotalLeaves)
for i, leaf := range leaves.Leaves {
t.Logf(" SERVER leaf[%d]: key=%x", i, leaf.Key)
}
}
}
}
_ = diagStream.CloseSend()
}
// Get client leaves under this subtree
clientTree = clientHG.GetVertexAddsSet(proverShardKey).GetTree()
mismatchPathInt := []int{63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 60, 37, 1, 50}
clientSubtreeNode, err := clientTree.GetByPath(mismatchPathInt)
if err != nil {
t.Logf("CLIENT error getting node at [...60 37 1 50]: %v", err)
} else if clientSubtreeNode != nil {
clientSubtreeLeaves := tries.GetAllLeaves(
clientTree.SetType,
clientTree.PhaseType,
clientTree.ShardKey,
clientSubtreeNode,
)
t.Logf("CLIENT leaves under [...60 37 1 50]: count=%d", len(clientSubtreeLeaves))
for i, leaf := range clientSubtreeLeaves {
if leaf != nil {
t.Logf(" CLIENT leaf[%d]: key=%x", i, leaf.Key)
}
}
}
}
// Check the deeper path [...60 37 1 50 50] which server claims has leafCount=2
t.Log("=== LEAF-LEVEL ANALYSIS for [...60 37 1 50 50] ===")
{
diagStream, err := client.PerformSync(context.Background())
if err == nil {
shardKeyBytes := slices.Concat(proverShardKey.L1[:], proverShardKey.L2[:])
deepPath := []int32{63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 60, 37, 1, 50, 50}
err = diagStream.Send(&protobufs.HypergraphSyncQuery{
Request: &protobufs.HypergraphSyncQuery_GetLeaves{
GetLeaves: &protobufs.HypergraphSyncGetLeavesRequest{
ShardKey: shardKeyBytes,
PhaseSet: protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_ADDS,
Path: deepPath,
MaxLeaves: 100,
},
},
})
if err == nil {
resp, err := diagStream.Recv()
if err == nil {
if leaves := resp.GetLeaves(); leaves != nil {
t.Logf("SERVER leaves under [...60 37 1 50 50]: count=%d, total=%d",
len(leaves.Leaves), leaves.TotalLeaves)
for i, leaf := range leaves.Leaves {
t.Logf(" SERVER leaf[%d]: key=%x", i, leaf.Key)
}
} else if errResp := resp.GetError(); errResp != nil {
t.Logf("SERVER error for [...60 37 1 50 50]: %s", errResp.Message)
}
}
}
_ = diagStream.CloseSend()
}
}
// Also check path [...60 37 1] to see the 3 vs 3 children issue
t.Log("=== LEAF-LEVEL ANALYSIS for [...60 37 1] ===")
{
diagStream, err := client.PerformSync(context.Background())
if err == nil {
shardKeyBytes := slices.Concat(proverShardKey.L1[:], proverShardKey.L2[:])
path371 := []int32{63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 60, 37, 1}
err = diagStream.Send(&protobufs.HypergraphSyncQuery{
Request: &protobufs.HypergraphSyncQuery_GetLeaves{
GetLeaves: &protobufs.HypergraphSyncGetLeavesRequest{
ShardKey: shardKeyBytes,
PhaseSet: protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_ADDS,
Path: path371,
MaxLeaves: 100,
},
},
})
if err == nil {
resp, err := diagStream.Recv()
if err == nil {
if leaves := resp.GetLeaves(); leaves != nil {
t.Logf("SERVER leaves under [...60 37 1]: count=%d, total=%d",
len(leaves.Leaves), leaves.TotalLeaves)
for i, leaf := range leaves.Leaves {
t.Logf(" SERVER leaf[%d]: key=%x", i, leaf.Key)
}
}
}
}
_ = diagStream.CloseSend()
}
// Client leaves under [...60 37 1]
clientTree = clientHG.GetVertexAddsSet(proverShardKey).GetTree()
path371Int := []int{63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 60, 37, 1}
clientNode371, err := clientTree.GetByPath(path371Int)
if err != nil {
t.Logf("CLIENT error getting node at [...60 37 1]: %v", err)
} else if clientNode371 != nil {
clientLeaves371 := tries.GetAllLeaves(
clientTree.SetType,
clientTree.PhaseType,
clientTree.ShardKey,
clientNode371,
)
t.Logf("CLIENT leaves under [...60 37 1]: count=%d", len(clientLeaves371))
for i, leaf := range clientLeaves371 {
if leaf != nil {
t.Logf(" CLIENT leaf[%d]: key=%x", i, leaf.Key)
}
}
}
}
assert.Equal(t, expectedRoot, clientProverRoot,
"client prover root should match frame's prover tree commitment after hypersync")
// Count vertices synced
clientTree := clientHG.GetVertexAddsSet(proverShardKey).GetTree()
clientLeaves := tries.GetAllLeaves(
clientTree = clientHG.GetVertexAddsSet(proverShardKey).GetTree()
clientLeaves2 := tries.GetAllLeaves(
clientTree.SetType,
clientTree.PhaseType,
clientTree.ShardKey,
clientTree.Root,
)
clientLeafCount := 0
for _, leaf := range clientLeaves {
clientLeafCount2 := 0
for _, leaf := range clientLeaves2 {
if leaf != nil {
clientLeafCount++
clientLeafCount2++
}
}
t.Logf("Hypersync complete: client synced %d prover vertices", clientLeafCount)
assert.Greater(t, clientLeafCount, 0, "should have synced at least some prover vertices")
t.Logf("Hypersync complete: client synced %d prover vertices", clientLeafCount2)
assert.Greater(t, clientLeafCount2, 0, "should have synced at least some prover vertices")
// Verify the sync-based repair approach:
// 1. Create a second in-memory hypergraph

View File

@ -93,6 +93,7 @@ var pebbleMigrations = []func(*pebble.Batch, *pebble.DB, *config.Config) error{
migration_2_1_0_1817,
migration_2_1_0_1818,
migration_2_1_0_1819,
migration_2_1_0_1820,
}
func NewPebbleDB(
@ -1136,6 +1137,10 @@ func migration_2_1_0_1819(b *pebble.Batch, db *pebble.DB, cfg *config.Config) er
return migration_2_1_0_18(b, db, cfg)
}
func migration_2_1_0_1820(b *pebble.Batch, db *pebble.DB, cfg *config.Config) error {
return doMigration1818(db, cfg)
}
// pebbleBatchDB wraps a *pebble.Batch to implement store.KVDB for use in migrations
type pebbleBatchDB struct {
b *pebble.Batch

View File

@ -1021,24 +1021,17 @@ func (t *LazyVectorCommitmentTree) Insert(
[]int{expectedNibble},
n.Prefix,
)
// Note: Relocation not needed in Insert's branch split case because
// the branch keeps its absolute position. Children are at paths
// relative to n.FullPrefix which doesn't change (only the Prefix gets split).
err = t.Store.InsertNode(
txn,
t.SetType,
t.PhaseType,
t.ShardKey,
generateKeyFromPath(slices.Concat(
path,
newBranch.Prefix,
[]int{expectedNibble},
n.Prefix,
)),
slices.Concat(
path,
newBranch.Prefix,
[]int{expectedNibble},
n.Prefix,
),
generateKeyFromPath(n.FullPrefix),
n.FullPrefix,
newBranch.Children[expectedNibble],
)
if err != nil {
@ -1924,7 +1917,9 @@ func (t *LazyVectorCommitmentTree) GetSize() *big.Int {
return t.Root.GetSize()
}
// Delete removes a key-value pair from the tree
// Delete removes a key-value pair from the tree.
// This is the inverse of Insert - when a branch is left with only one child,
// we merge it back (the reverse of Insert's branch split operation).
func (t *LazyVectorCommitmentTree) Delete(
txn TreeBackingStoreTransaction,
key []byte,
@ -1935,6 +1930,8 @@ func (t *LazyVectorCommitmentTree) Delete(
return errors.New("empty key not allowed")
}
// remove returns (sizeRemoved, newNode)
// newNode is nil if the node was deleted, otherwise the updated node
var remove func(
node LazyVectorCommitmentNode,
depth int,
@ -1946,6 +1943,7 @@ func (t *LazyVectorCommitmentTree) Delete(
depth int,
path []int,
) (*big.Int, LazyVectorCommitmentNode) {
// Lazy load if needed
if node == nil {
var err error
node, err = t.Store.GetNodeByPath(
@ -1964,8 +1962,8 @@ func (t *LazyVectorCommitmentTree) Delete(
switch n := node.(type) {
case *LazyVectorCommitmentLeafNode:
// Base case: found the leaf to delete
if bytes.Equal(n.Key, key) {
// Delete the node from storage
err := t.Store.DeleteNode(
txn,
t.SetType,
@ -1975,13 +1973,15 @@ func (t *LazyVectorCommitmentTree) Delete(
GetFullPath(key),
)
if err != nil {
log.Panic("failed to delete path", zap.Error(err))
log.Panic("failed to delete leaf", zap.Error(err))
}
return n.Size, nil
}
// Key doesn't match - nothing to delete
return big.NewInt(0), n
case *LazyVectorCommitmentBranchNode:
// Ensure branch is fully loaded
if !n.FullyLoaded {
for i := 0; i < BranchNodes; i++ {
var err error
@ -1998,30 +1998,43 @@ func (t *LazyVectorCommitmentTree) Delete(
n.FullyLoaded = true
}
// Check if key matches the prefix
for i, expectedNibble := range n.Prefix {
currentNibble := getNextNibble(key, depth+i*BranchBits)
if currentNibble != expectedNibble {
actualNibble := getNextNibble(key, depth+i*BranchBits)
if actualNibble != expectedNibble {
// Key doesn't match prefix - nothing to delete here
return big.NewInt(0), n
}
}
finalNibble := getNextNibble(key, depth+len(n.Prefix)*BranchBits)
newPath := slices.Concat(path, n.Prefix, []int{finalNibble})
// Key matches prefix, find the child nibble
childNibble := getNextNibble(key, depth+len(n.Prefix)*BranchBits)
childPath := slices.Concat(n.FullPrefix, []int{childNibble})
var size *big.Int
size, n.Children[finalNibble] = remove(
n.Children[finalNibble],
// Recursively delete from child
sizeRemoved, newChild := remove(
n.Children[childNibble],
depth+len(n.Prefix)*BranchBits+BranchBits,
newPath,
childPath,
)
if sizeRemoved.Cmp(big.NewInt(0)) == 0 {
// Nothing was deleted
return big.NewInt(0), n
}
// Update the child
n.Children[childNibble] = newChild
n.Commitment = nil
// Count remaining children and gather metadata
childCount := 0
var lastChild LazyVectorCommitmentNode
var lastChildIndex int
longestBranch := 1
leaves := 0
longestBranch := 0
leafCount := 0
totalSize := big.NewInt(0)
for i, child := range n.Children {
if child != nil {
childCount++
@ -2029,20 +2042,24 @@ func (t *LazyVectorCommitmentTree) Delete(
lastChildIndex = i
switch c := child.(type) {
case *LazyVectorCommitmentBranchNode:
leaves += c.LeafCount
if longestBranch < c.LongestBranch+1 {
leafCount += c.LeafCount
if c.LongestBranch+1 > longestBranch {
longestBranch = c.LongestBranch + 1
}
totalSize = totalSize.Add(totalSize, c.Size)
case *LazyVectorCommitmentLeafNode:
leaves += 1
leafCount++
if longestBranch < 1 {
longestBranch = 1
}
totalSize = totalSize.Add(totalSize, c.Size)
}
}
}
var retNode LazyVectorCommitmentNode
switch childCount {
case 0:
// Delete this node from storage
// No children left - delete this branch entirely
err := t.Store.DeleteNode(
txn,
t.SetType,
@ -2052,87 +2069,21 @@ func (t *LazyVectorCommitmentTree) Delete(
n.FullPrefix,
)
if err != nil {
log.Panic("failed to delete path", zap.Error(err))
log.Panic("failed to delete empty branch", zap.Error(err))
}
retNode = nil
return sizeRemoved, nil
case 1:
// Identify the child's original path to prevent orphaned storage entries
originalChildPath := slices.Concat(n.FullPrefix, []int{lastChildIndex})
// Only one child left - merge this branch with the child
// This is the REVERSE of Insert's branch split operation
return t.mergeBranchWithChild(txn, n, lastChild, lastChildIndex, path, sizeRemoved)
if childBranch, ok := lastChild.(*LazyVectorCommitmentBranchNode); ok {
// Merge this node's prefix with the child's prefix
mergedPrefix := []int{}
mergedPrefix = append(mergedPrefix, n.Prefix...)
mergedPrefix = append(mergedPrefix, lastChildIndex)
mergedPrefix = append(mergedPrefix, childBranch.Prefix...)
childBranch.Prefix = mergedPrefix
// Note: We do NOT update FullPrefix because children are stored
// relative to the branch's FullPrefix. If we updated FullPrefix,
// child lookups would compute wrong paths and fail.
// The FullPrefix remains at the old value for child path compatibility.
childBranch.Commitment = nil
// Delete the child from its original path to prevent orphan
_ = t.Store.DeleteNode(
txn,
t.SetType,
t.PhaseType,
t.ShardKey,
generateKeyFromPath(originalChildPath),
originalChildPath,
)
// Delete this node (parent) from storage
err := t.Store.DeleteNode(
txn,
t.SetType,
t.PhaseType,
t.ShardKey,
generateKeyFromPath(n.FullPrefix),
n.FullPrefix,
)
if err != nil {
log.Panic("failed to delete path", zap.Error(err))
}
// Insert the merged child at this path
err = t.Store.InsertNode(
txn,
t.SetType,
t.PhaseType,
t.ShardKey,
generateKeyFromPath(n.FullPrefix),
n.FullPrefix,
childBranch,
)
if err != nil {
log.Panic("failed to insert node", zap.Error(err))
}
retNode = childBranch
} else if leafChild, ok := lastChild.(*LazyVectorCommitmentLeafNode); ok {
// Delete this node from storage
err := t.Store.DeleteNode(
txn,
t.SetType,
t.PhaseType,
t.ShardKey,
generateKeyFromPath(n.FullPrefix),
n.FullPrefix,
)
if err != nil {
log.Panic("failed to delete path", zap.Error(err))
}
retNode = leafChild
}
default:
// Multiple children remain - just update metadata
n.LeafCount = leafCount
n.LongestBranch = longestBranch
n.LeafCount = leaves
n.Size = n.Size.Sub(n.Size, size)
n.Size = totalSize
// Update this node in storage
err := t.Store.InsertNode(
txn,
t.SetType,
@ -2143,13 +2094,11 @@ func (t *LazyVectorCommitmentTree) Delete(
n,
)
if err != nil {
log.Panic("failed to insert node", zap.Error(err))
log.Panic("failed to update branch", zap.Error(err))
}
retNode = n
return sizeRemoved, n
}
return size, retNode
default:
return big.NewInt(0), node
}
@ -2164,6 +2113,111 @@ func (t *LazyVectorCommitmentTree) Delete(
), "delete")
}
// mergeBranchWithChild merges a branch node with its only remaining child.
// This is the reverse of Insert's branch split operation.
//
// When Insert splits a branch/leaf, it creates:
// - A new branch at path with prefix[:splitPoint]
// - The old node as a child with remaining prefix
//
// When Delete leaves only one child, we reverse this:
// - If child is a leaf: just return the leaf (branch disappears)
// - If child is a branch: merge prefixes and the child takes this branch's place
func (t *LazyVectorCommitmentTree) mergeBranchWithChild(
txn TreeBackingStoreTransaction,
branch *LazyVectorCommitmentBranchNode,
child LazyVectorCommitmentNode,
childIndex int,
parentPath []int, // path to the branch (not including branch.Prefix)
sizeRemoved *big.Int,
) (*big.Int, LazyVectorCommitmentNode) {
switch c := child.(type) {
case *LazyVectorCommitmentLeafNode:
// Child is a leaf - the branch simply disappears
// The leaf stays at its current location (keyed by c.Key)
// We just need to delete the branch node
err := t.Store.DeleteNode(
txn,
t.SetType,
t.PhaseType,
t.ShardKey,
generateKeyFromPath(branch.FullPrefix),
branch.FullPrefix,
)
if err != nil {
log.Panic("failed to delete branch during leaf merge", zap.Error(err))
}
return sizeRemoved, c
case *LazyVectorCommitmentBranchNode:
// Child is a branch - merge prefixes
// New prefix = branch.Prefix + childIndex + child.Prefix
mergedPrefix := make([]int, 0, len(branch.Prefix)+1+len(c.Prefix))
mergedPrefix = append(mergedPrefix, branch.Prefix...)
mergedPrefix = append(mergedPrefix, childIndex)
mergedPrefix = append(mergedPrefix, c.Prefix...)
// The merged branch will be at parentPath with the merged prefix
// So its FullPrefix = parentPath + mergedPrefix
newFullPrefix := slices.Concat(parentPath, mergedPrefix)
// The child's children are currently stored relative to c.FullPrefix
// They need to stay at the same absolute positions, but we need to
// update the child branch's metadata
oldFullPrefix := c.FullPrefix
// Delete the old branch node
err := t.Store.DeleteNode(
txn,
t.SetType,
t.PhaseType,
t.ShardKey,
generateKeyFromPath(branch.FullPrefix),
branch.FullPrefix,
)
if err != nil {
log.Panic("failed to delete parent branch during merge", zap.Error(err))
}
// Delete the child from its old location
err = t.Store.DeleteNode(
txn,
t.SetType,
t.PhaseType,
t.ShardKey,
generateKeyFromPath(oldFullPrefix),
oldFullPrefix,
)
if err != nil {
log.Panic("failed to delete child branch during merge", zap.Error(err))
}
// Update the child branch's prefix and FullPrefix
c.Prefix = mergedPrefix
c.FullPrefix = newFullPrefix
c.Commitment = nil
// Insert the merged child at the parent's location
err = t.Store.InsertNode(
txn,
t.SetType,
t.PhaseType,
t.ShardKey,
generateKeyFromPath(newFullPrefix),
newFullPrefix,
c,
)
if err != nil {
log.Panic("failed to insert merged branch", zap.Error(err))
}
return sizeRemoved, c
default:
return sizeRemoved, child
}
}
func SerializeTree(tree *LazyVectorCommitmentTree) ([]byte, error) {
tree.treeMx.Lock()
defer tree.treeMx.Unlock()