mirror of
https://github.com/QuilibriumNetwork/ceremonyclient.git
synced 2026-02-21 10:27:26 +08:00
migration to recalculate
This commit is contained in:
parent
bc827eda3f
commit
289533f16f
@ -1821,6 +1821,563 @@ func TestDeleteMultipleChildrenRemaining(t *testing.T) {
|
||||
t.Logf("Delete tree matches fresh tree with same remaining keys")
|
||||
}
|
||||
|
||||
// TestDeleteBranchPromotionFullPrefixBug tests that when a delete operation
|
||||
// triggers branch promotion (where a parent branch is replaced by its only
|
||||
// remaining child branch), the child's FullPrefix is correctly updated to
|
||||
// reflect its new position in the tree.
|
||||
//
|
||||
// The bug: In lazy_proof_tree.go Delete(), when case 1 (single child remaining)
|
||||
// handles a branch child, it updates childBranch.Prefix but NOT childBranch.FullPrefix.
|
||||
// The node is then stored at the parent's path (n.FullPrefix), but the stored data
|
||||
// contains the OLD childBranch.FullPrefix. When loaded later, the node has wrong
|
||||
// FullPrefix, causing child lookups and commitment computation to fail.
|
||||
func TestDeleteBranchPromotionFullPrefixBug(t *testing.T) {
|
||||
bls48581.Init()
|
||||
l, _ := zap.NewProduction()
|
||||
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
|
||||
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
|
||||
tree := &crypto.LazyVectorCommitmentTree{
|
||||
InclusionProver: bls48581.NewKZGInclusionProver(l),
|
||||
Store: s,
|
||||
SetType: "vertex",
|
||||
PhaseType: "adds",
|
||||
ShardKey: crypto.ShardKey{},
|
||||
}
|
||||
|
||||
// Create a specific structure that will trigger branch promotion:
|
||||
//
|
||||
// Root Branch (prefix=[])
|
||||
// / \
|
||||
// Loner1 SubBranch (prefix=[X,Y,Z])
|
||||
// / \
|
||||
// Key1 Key2
|
||||
//
|
||||
// When we delete Loner1, SubBranch should be promoted to root with its
|
||||
// prefix merged. The bug is that SubBranch.FullPrefix is not updated.
|
||||
|
||||
// All keys start with 0xAA to share initial path
|
||||
// Loner diverges early (at nibble 2)
|
||||
// SubBranch keys share a longer prefix and diverge later
|
||||
|
||||
// Loner key: 0xAA 0x00 ... (diverges at nibble 2 with value 0)
|
||||
lonerKey := make([]byte, 64)
|
||||
lonerKey[0] = 0xAA
|
||||
lonerKey[1] = 0x00 // This makes nibbles [10, 10, 0, 0, ...]
|
||||
rand.Read(lonerKey[2:])
|
||||
|
||||
// SubBranch keys: 0xAA 0xFF ... (diverges at nibble 2 with value F)
|
||||
// Key1 and Key2 share more prefix and diverge at byte 32
|
||||
key1 := make([]byte, 64)
|
||||
key1[0] = 0xAA
|
||||
key1[1] = 0xFF // Nibbles [10, 10, 15, 15, ...]
|
||||
for i := 2; i < 32; i++ {
|
||||
key1[i] = 0xBB // Shared prefix within sub-branch
|
||||
}
|
||||
key1[32] = 0x00 // Key1 diverges here
|
||||
rand.Read(key1[33:])
|
||||
|
||||
key2 := make([]byte, 64)
|
||||
copy(key2, key1)
|
||||
key2[32] = 0xFF // Key2 diverges here differently
|
||||
rand.Read(key2[33:])
|
||||
|
||||
// Insert all keys
|
||||
err := tree.Insert(nil, lonerKey, lonerKey, nil, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to insert loner key: %v", err)
|
||||
}
|
||||
err = tree.Insert(nil, key1, key1, nil, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to insert key1: %v", err)
|
||||
}
|
||||
err = tree.Insert(nil, key2, key2, nil, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to insert key2: %v", err)
|
||||
}
|
||||
|
||||
// Commit to persist everything
|
||||
rootBefore := tree.Commit(false)
|
||||
t.Logf("Root before deletion: %x", rootBefore[:16])
|
||||
|
||||
// Verify all keys exist
|
||||
if _, err := tree.Get(lonerKey); err != nil {
|
||||
t.Fatalf("Loner key not found before deletion: %v", err)
|
||||
}
|
||||
if _, err := tree.Get(key1); err != nil {
|
||||
t.Fatalf("Key1 not found before deletion: %v", err)
|
||||
}
|
||||
if _, err := tree.Get(key2); err != nil {
|
||||
t.Fatalf("Key2 not found before deletion: %v", err)
|
||||
}
|
||||
|
||||
// Delete the loner - this triggers branch promotion for SubBranch
|
||||
err = tree.Delete(nil, lonerKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to delete loner key: %v", err)
|
||||
}
|
||||
|
||||
// Verify loner is gone
|
||||
if _, err := tree.Get(lonerKey); err == nil {
|
||||
t.Fatalf("Loner key still exists after deletion")
|
||||
}
|
||||
|
||||
// At this point, the in-memory tree should still work because
|
||||
// the node references are still valid (even if FullPrefix is wrong)
|
||||
val1, err := tree.Get(key1)
|
||||
if err != nil {
|
||||
t.Fatalf("Key1 not found after deletion (in-memory): %v", err)
|
||||
}
|
||||
if !bytes.Equal(val1, key1) {
|
||||
t.Fatalf("Key1 value corrupted after deletion")
|
||||
}
|
||||
|
||||
val2, err := tree.Get(key2)
|
||||
if err != nil {
|
||||
t.Fatalf("Key2 not found after deletion (in-memory): %v", err)
|
||||
}
|
||||
if !bytes.Equal(val2, key2) {
|
||||
t.Fatalf("Key2 value corrupted after deletion")
|
||||
}
|
||||
|
||||
// Commit after deletion
|
||||
rootAfterDelete := tree.Commit(false)
|
||||
t.Logf("Root after deletion: %x", rootAfterDelete[:16])
|
||||
|
||||
// Now create a FRESH tree that loads from storage
|
||||
// This is the critical test - if FullPrefix is wrong in storage,
|
||||
// the fresh tree will have issues
|
||||
tree2 := &crypto.LazyVectorCommitmentTree{
|
||||
InclusionProver: bls48581.NewKZGInclusionProver(l),
|
||||
Store: s,
|
||||
SetType: "vertex",
|
||||
PhaseType: "adds",
|
||||
ShardKey: crypto.ShardKey{},
|
||||
}
|
||||
|
||||
// Load root from storage
|
||||
rootNode, err := s.GetNodeByPath("vertex", "adds", crypto.ShardKey{}, []int{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to load root from storage: %v", err)
|
||||
}
|
||||
tree2.Root = rootNode
|
||||
|
||||
// Try to get key1 from the fresh tree
|
||||
// If FullPrefix bug exists, this may fail because child lookups use wrong paths
|
||||
val1Fresh, err := tree2.Get(key1)
|
||||
if err != nil {
|
||||
t.Fatalf("Key1 not found in fresh tree loaded from storage: %v", err)
|
||||
}
|
||||
if !bytes.Equal(val1Fresh, key1) {
|
||||
t.Fatalf("Key1 value wrong in fresh tree")
|
||||
}
|
||||
|
||||
val2Fresh, err := tree2.Get(key2)
|
||||
if err != nil {
|
||||
t.Fatalf("Key2 not found in fresh tree loaded from storage: %v", err)
|
||||
}
|
||||
if !bytes.Equal(val2Fresh, key2) {
|
||||
t.Fatalf("Key2 value wrong in fresh tree")
|
||||
}
|
||||
|
||||
// Commit the fresh tree and compare roots
|
||||
rootFresh := tree2.Commit(false)
|
||||
t.Logf("Root from fresh tree: %x", rootFresh[:16])
|
||||
|
||||
if !bytes.Equal(rootAfterDelete, rootFresh) {
|
||||
t.Fatalf("Root mismatch! In-memory tree produced different root than fresh tree loaded from storage\n"+
|
||||
"In-memory: %x\n"+
|
||||
"Fresh: %x\n"+
|
||||
"This indicates FullPrefix corruption during branch promotion",
|
||||
rootAfterDelete, rootFresh)
|
||||
}
|
||||
|
||||
// Also compare against a completely fresh tree built from scratch with same keys
|
||||
tree3 := &crypto.LazyVectorCommitmentTree{
|
||||
InclusionProver: bls48581.NewKZGInclusionProver(l),
|
||||
Store: s,
|
||||
SetType: "vertex",
|
||||
PhaseType: "scratch",
|
||||
ShardKey: crypto.ShardKey{},
|
||||
}
|
||||
|
||||
err = tree3.Insert(nil, key1, key1, nil, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to insert key1 into scratch tree: %v", err)
|
||||
}
|
||||
err = tree3.Insert(nil, key2, key2, nil, big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to insert key2 into scratch tree: %v", err)
|
||||
}
|
||||
|
||||
rootScratch := tree3.Commit(false)
|
||||
t.Logf("Root from scratch tree: %x", rootScratch[:16])
|
||||
|
||||
// Log tree structures for debugging
|
||||
if branch, ok := tree.Root.(*crypto.LazyVectorCommitmentBranchNode); ok {
|
||||
t.Logf("After-delete tree root: Prefix=%v, FullPrefix=%v", branch.Prefix, branch.FullPrefix)
|
||||
for i, child := range branch.Children {
|
||||
if child != nil {
|
||||
switch c := child.(type) {
|
||||
case *crypto.LazyVectorCommitmentBranchNode:
|
||||
t.Logf(" After-delete child[%d]: Branch Prefix=%v, FullPrefix=%v", i, c.Prefix, c.FullPrefix)
|
||||
case *crypto.LazyVectorCommitmentLeafNode:
|
||||
t.Logf(" After-delete child[%d]: Leaf Key=%x...", i, c.Key[:8])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if branch, ok := tree3.Root.(*crypto.LazyVectorCommitmentBranchNode); ok {
|
||||
t.Logf("Scratch tree root: Prefix=%v, FullPrefix=%v", branch.Prefix, branch.FullPrefix)
|
||||
for i, child := range branch.Children {
|
||||
if child != nil {
|
||||
switch c := child.(type) {
|
||||
case *crypto.LazyVectorCommitmentBranchNode:
|
||||
t.Logf(" Scratch child[%d]: Branch Prefix=%v, FullPrefix=%v", i, c.Prefix, c.FullPrefix)
|
||||
case *crypto.LazyVectorCommitmentLeafNode:
|
||||
t.Logf(" Scratch child[%d]: Leaf Key=%x...", i, c.Key[:8])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !bytes.Equal(rootAfterDelete, rootScratch) {
|
||||
t.Fatalf("Root mismatch! Delete-promoted tree produced different root than scratch tree\n"+
|
||||
"After delete: %x\n"+
|
||||
"From scratch: %x\n"+
|
||||
"This indicates structural difference after branch promotion",
|
||||
rootAfterDelete, rootScratch)
|
||||
}
|
||||
|
||||
t.Log("All roots match - branch promotion preserved correct tree structure")
|
||||
}
|
||||
|
||||
// TestDeleteBranchPromotionDeepNesting tests branch promotion with deeply nested
|
||||
// structures where multiple levels of promotion may occur.
|
||||
func TestDeleteBranchPromotionDeepNesting(t *testing.T) {
|
||||
bls48581.Init()
|
||||
l, _ := zap.NewProduction()
|
||||
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, 0)
|
||||
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
|
||||
tree := &crypto.LazyVectorCommitmentTree{
|
||||
InclusionProver: bls48581.NewKZGInclusionProver(l),
|
||||
Store: s,
|
||||
SetType: "vertex",
|
||||
PhaseType: "deep",
|
||||
ShardKey: crypto.ShardKey{},
|
||||
}
|
||||
|
||||
// Create a chain of nested branches, each with a loner and a sub-branch
|
||||
// When we delete all loners from innermost to outermost, we trigger
|
||||
// multiple successive branch promotions
|
||||
|
||||
// Structure:
|
||||
// Root
|
||||
// |-- Loner0
|
||||
// |-- Branch1
|
||||
// |-- Loner1
|
||||
// |-- Branch2
|
||||
// |-- Loner2
|
||||
// |-- Branch3
|
||||
// |-- Key1
|
||||
// |-- Key2
|
||||
|
||||
// Create keys with progressively longer shared prefixes
|
||||
numLoners := 5
|
||||
loners := make([][]byte, numLoners)
|
||||
|
||||
// Base prefix that all keys share
|
||||
basePrefix := []byte{0xAA, 0xBB, 0xCC, 0xDD}
|
||||
|
||||
for i := 0; i < numLoners; i++ {
|
||||
loner := make([]byte, 64)
|
||||
copy(loner, basePrefix)
|
||||
// Each loner diverges at a different depth
|
||||
// Loner i diverges at byte 4+i with value 0x00
|
||||
for j := 4; j < 4+i; j++ {
|
||||
loner[j] = 0xFF // Shared with sub-branch up to this point
|
||||
}
|
||||
loner[4+i] = 0x00 // Diverges here
|
||||
rand.Read(loner[5+i:])
|
||||
loners[i] = loner
|
||||
}
|
||||
|
||||
// Final keys share the longest prefix and diverge at the end
|
||||
key1 := make([]byte, 64)
|
||||
copy(key1, basePrefix)
|
||||
for i := 4; i < 32; i++ {
|
||||
key1[i] = 0xFF
|
||||
}
|
||||
key1[32] = 0x11
|
||||
rand.Read(key1[33:])
|
||||
|
||||
key2 := make([]byte, 64)
|
||||
copy(key2, key1)
|
||||
key2[32] = 0x22
|
||||
rand.Read(key2[33:])
|
||||
|
||||
// Insert all keys
|
||||
for i, loner := range loners {
|
||||
if err := tree.Insert(nil, loner, loner, nil, big.NewInt(1)); err != nil {
|
||||
t.Fatalf("Failed to insert loner %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
if err := tree.Insert(nil, key1, key1, nil, big.NewInt(1)); err != nil {
|
||||
t.Fatalf("Failed to insert key1: %v", err)
|
||||
}
|
||||
if err := tree.Insert(nil, key2, key2, nil, big.NewInt(1)); err != nil {
|
||||
t.Fatalf("Failed to insert key2: %v", err)
|
||||
}
|
||||
|
||||
initialRoot := tree.Commit(false)
|
||||
leaves, depth := tree.GetMetadata()
|
||||
t.Logf("Initial tree: %d leaves, depth %d", leaves, depth)
|
||||
|
||||
// Delete loners from outermost to innermost (reverse order)
|
||||
// Each deletion should trigger branch promotion
|
||||
for i := 0; i < numLoners; i++ {
|
||||
if err := tree.Delete(nil, loners[i]); err != nil {
|
||||
t.Fatalf("Failed to delete loner %d: %v", i, err)
|
||||
}
|
||||
|
||||
// After each deletion, verify remaining keys are accessible
|
||||
if _, err := tree.Get(key1); err != nil {
|
||||
t.Fatalf("Key1 not accessible after deleting loner %d: %v", i, err)
|
||||
}
|
||||
if _, err := tree.Get(key2); err != nil {
|
||||
t.Fatalf("Key2 not accessible after deleting loner %d: %v", i, err)
|
||||
}
|
||||
|
||||
// Commit and check structure
|
||||
root := tree.Commit(false)
|
||||
t.Logf("After deleting loner %d, root: %x", i, root[:8])
|
||||
}
|
||||
|
||||
finalRoot := tree.Commit(false)
|
||||
if bytes.Equal(initialRoot, finalRoot) {
|
||||
t.Fatalf("Root should have changed after deletions")
|
||||
}
|
||||
|
||||
// Load fresh tree from storage
|
||||
tree2 := &crypto.LazyVectorCommitmentTree{
|
||||
InclusionProver: bls48581.NewKZGInclusionProver(l),
|
||||
Store: s,
|
||||
SetType: "vertex",
|
||||
PhaseType: "deep",
|
||||
ShardKey: crypto.ShardKey{},
|
||||
}
|
||||
|
||||
rootNode, err := s.GetNodeByPath("vertex", "deep", crypto.ShardKey{}, []int{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to load root: %v", err)
|
||||
}
|
||||
tree2.Root = rootNode
|
||||
|
||||
// Verify keys accessible from fresh tree
|
||||
if _, err := tree2.Get(key1); err != nil {
|
||||
t.Fatalf("Key1 not found in fresh tree: %v", err)
|
||||
}
|
||||
if _, err := tree2.Get(key2); err != nil {
|
||||
t.Fatalf("Key2 not found in fresh tree: %v", err)
|
||||
}
|
||||
|
||||
// Verify roots match
|
||||
freshRoot := tree2.Commit(false)
|
||||
if !bytes.Equal(finalRoot, freshRoot) {
|
||||
t.Fatalf("Root mismatch after deep nesting promotion\n"+
|
||||
"Original: %x\n"+
|
||||
"Fresh: %x", finalRoot, freshRoot)
|
||||
}
|
||||
|
||||
// Compare with scratch tree
|
||||
tree3 := &crypto.LazyVectorCommitmentTree{
|
||||
InclusionProver: bls48581.NewKZGInclusionProver(l),
|
||||
Store: s,
|
||||
SetType: "vertex",
|
||||
PhaseType: "deepscratch",
|
||||
ShardKey: crypto.ShardKey{},
|
||||
}
|
||||
tree3.Insert(nil, key1, key1, nil, big.NewInt(1))
|
||||
tree3.Insert(nil, key2, key2, nil, big.NewInt(1))
|
||||
|
||||
scratchRoot := tree3.Commit(false)
|
||||
if !bytes.Equal(finalRoot, scratchRoot) {
|
||||
t.Fatalf("Root mismatch with scratch tree\n"+
|
||||
"After deletes: %x\n"+
|
||||
"From scratch: %x", finalRoot, scratchRoot)
|
||||
}
|
||||
|
||||
t.Log("Deep nesting branch promotion test passed")
|
||||
}
|
||||
|
||||
// TestBranchPromotionPathIndexCorruption specifically tests if the path index
|
||||
// is corrupted when a branch is promoted during delete. This test exercises the
|
||||
// scenario where a non-root branch is promoted and then accessed via path lookup.
|
||||
//
|
||||
// The bug hypothesis: When a branch is promoted (becomes the only child and takes
|
||||
// its parent's place), the code updates childBranch.Prefix but NOT childBranch.FullPrefix.
|
||||
// When InsertNode is called for a branch, it uses node.FullPrefix (not the path param)
|
||||
// to store the path index. This means the path index points to the wrong location.
|
||||
func TestBranchPromotionPathIndexCorruption(t *testing.T) {
|
||||
bls48581.Init()
|
||||
l, _ := zap.NewProduction()
|
||||
db := store.NewPebbleDB(l, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/pathidx"}, 0)
|
||||
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true}, db, l, verEncr, bls48581.NewKZGInclusionProver(l))
|
||||
|
||||
// Create initial tree
|
||||
tree := &crypto.LazyVectorCommitmentTree{
|
||||
InclusionProver: bls48581.NewKZGInclusionProver(l),
|
||||
Store: s,
|
||||
SetType: "vertex",
|
||||
PhaseType: "pathidx",
|
||||
ShardKey: crypto.ShardKey{},
|
||||
}
|
||||
|
||||
// Structure designed to create a specific path index scenario:
|
||||
//
|
||||
// Root Branch (FullPrefix=[])
|
||||
// / \
|
||||
// Loner(0x10) SubBranch(0x20) (FullPrefix=[2,0])
|
||||
// / \
|
||||
// Key1 Key2
|
||||
//
|
||||
// After deleting Loner, SubBranch gets promoted:
|
||||
// - Its Prefix becomes merged with root's prefix
|
||||
// - But FullPrefix stays [2,0] (the bug)
|
||||
// - Path index is stored at pathFn([2,0]) not pathFn([])
|
||||
//
|
||||
// If we then close the tree and try to load by path [], we won't find it
|
||||
// (or we'll find at wrong location)
|
||||
|
||||
// Keys designed to create the structure above
|
||||
// Loner: starts with 0x10 (nibbles: 1, 0)
|
||||
lonerKey := make([]byte, 64)
|
||||
lonerKey[0] = 0x10
|
||||
rand.Read(lonerKey[1:])
|
||||
|
||||
// SubBranch keys: start with 0x20 (nibbles: 2, 0)
|
||||
// Key1 and Key2 diverge at byte 10
|
||||
key1 := make([]byte, 64)
|
||||
key1[0] = 0x20
|
||||
for i := 1; i < 10; i++ {
|
||||
key1[i] = 0xAA // Common prefix
|
||||
}
|
||||
key1[10] = 0x11 // Divergence point
|
||||
rand.Read(key1[11:])
|
||||
|
||||
key2 := make([]byte, 64)
|
||||
copy(key2, key1[:10])
|
||||
key2[10] = 0xFF // Different divergence
|
||||
rand.Read(key2[11:])
|
||||
|
||||
// Insert all keys
|
||||
if err := tree.Insert(nil, lonerKey, lonerKey, nil, big.NewInt(1)); err != nil {
|
||||
t.Fatalf("Failed to insert loner: %v", err)
|
||||
}
|
||||
if err := tree.Insert(nil, key1, key1, nil, big.NewInt(1)); err != nil {
|
||||
t.Fatalf("Failed to insert key1: %v", err)
|
||||
}
|
||||
if err := tree.Insert(nil, key2, key2, nil, big.NewInt(1)); err != nil {
|
||||
t.Fatalf("Failed to insert key2: %v", err)
|
||||
}
|
||||
|
||||
// Commit to persist
|
||||
_ = tree.Commit(false)
|
||||
|
||||
// Log the structure before deletion
|
||||
if branch, ok := tree.Root.(*crypto.LazyVectorCommitmentBranchNode); ok {
|
||||
t.Logf("Root before delete: Prefix=%v, FullPrefix=%v", branch.Prefix, branch.FullPrefix)
|
||||
for i, child := range branch.Children {
|
||||
if child != nil {
|
||||
switch c := child.(type) {
|
||||
case *crypto.LazyVectorCommitmentBranchNode:
|
||||
t.Logf(" Child[%d] Branch: Prefix=%v, FullPrefix=%v", i, c.Prefix, c.FullPrefix)
|
||||
case *crypto.LazyVectorCommitmentLeafNode:
|
||||
t.Logf(" Child[%d] Leaf: Key=%x...", i, c.Key[:4])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete loner - triggers promotion of SubBranch to root
|
||||
if err := tree.Delete(nil, lonerKey); err != nil {
|
||||
t.Fatalf("Failed to delete loner: %v", err)
|
||||
}
|
||||
|
||||
// Commit after delete to persist changes
|
||||
rootAfterDelete := tree.Commit(false)
|
||||
t.Logf("Root after delete: %x", rootAfterDelete[:16])
|
||||
|
||||
// Log the structure after deletion
|
||||
if branch, ok := tree.Root.(*crypto.LazyVectorCommitmentBranchNode); ok {
|
||||
t.Logf("Root after delete: Prefix=%v, FullPrefix=%v", branch.Prefix, branch.FullPrefix)
|
||||
// THE BUG: If FullPrefix is not updated, it still shows the old path [2,0] or similar
|
||||
// but the node is now at the root (should be [])
|
||||
}
|
||||
|
||||
// Clear the in-memory tree completely
|
||||
tree.Root = nil
|
||||
tree = nil
|
||||
|
||||
// Create a completely fresh tree instance (simulating restart)
|
||||
tree2 := &crypto.LazyVectorCommitmentTree{
|
||||
InclusionProver: bls48581.NewKZGInclusionProver(l),
|
||||
Store: s,
|
||||
SetType: "vertex",
|
||||
PhaseType: "pathidx",
|
||||
ShardKey: crypto.ShardKey{},
|
||||
}
|
||||
|
||||
// Try to load root by path [] - this uses the path index
|
||||
t.Log("Attempting to load root from storage via path lookup...")
|
||||
rootNode, err := s.GetNodeByPath("vertex", "pathidx", crypto.ShardKey{}, []int{})
|
||||
if err != nil {
|
||||
t.Logf("ERROR: Failed to load root from storage: %v", err)
|
||||
t.Log("This confirms the FullPrefix bug - path index is at wrong location!")
|
||||
// The bug is confirmed if we can't load the root
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
tree2.Root = rootNode
|
||||
|
||||
// If we got here, check if the loaded root has correct FullPrefix
|
||||
if branch, ok := rootNode.(*crypto.LazyVectorCommitmentBranchNode); ok {
|
||||
t.Logf("Loaded root: Prefix=%v, FullPrefix=%v", branch.Prefix, branch.FullPrefix)
|
||||
if len(branch.FullPrefix) != 0 {
|
||||
t.Logf("BUG DETECTED: Root should have FullPrefix=[] but has %v", branch.FullPrefix)
|
||||
// Don't fail here yet, let's see if it affects functionality
|
||||
}
|
||||
}
|
||||
|
||||
// Try to get the keys from the fresh tree
|
||||
val1, err := tree2.Get(key1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get key1 from fresh tree: %v", err)
|
||||
}
|
||||
if !bytes.Equal(val1, key1) {
|
||||
t.Fatalf("Key1 value corrupted")
|
||||
}
|
||||
|
||||
val2, err := tree2.Get(key2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get key2 from fresh tree: %v", err)
|
||||
}
|
||||
if !bytes.Equal(val2, key2) {
|
||||
t.Fatalf("Key2 value corrupted")
|
||||
}
|
||||
|
||||
// Verify commitment matches
|
||||
freshRoot := tree2.Commit(false)
|
||||
t.Logf("Fresh tree root: %x", freshRoot[:16])
|
||||
|
||||
if !bytes.Equal(rootAfterDelete, freshRoot) {
|
||||
t.Fatalf("Root commitment mismatch!\n"+
|
||||
"After delete: %x\n"+
|
||||
"Fresh load: %x", rootAfterDelete, freshRoot)
|
||||
}
|
||||
|
||||
t.Log("Test passed - branch promotion path index is working correctly")
|
||||
}
|
||||
|
||||
func TestNonLazyProveMultipleVerify(t *testing.T) {
|
||||
l, _ := zap.NewProduction()
|
||||
prover := bls48581.NewKZGInclusionProver(l)
|
||||
|
||||
@ -33,12 +33,12 @@ import (
|
||||
"source.quilibrium.com/quilibrium/monorepo/config"
|
||||
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||
hgcrdt "source.quilibrium.com/quilibrium/monorepo/hypergraph"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/channel"
|
||||
internal_grpc "source.quilibrium.com/quilibrium/monorepo/node/internal/grpc"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/store"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/tests"
|
||||
"source.quilibrium.com/quilibrium/monorepo/protobufs"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/channel"
|
||||
application "source.quilibrium.com/quilibrium/monorepo/types/hypergraph"
|
||||
tp2p "source.quilibrium.com/quilibrium/monorepo/types/p2p"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/tries"
|
||||
@ -2594,53 +2594,53 @@ func TestMainnetBlossomsubFrameReceptionAndHypersync(t *testing.T) {
|
||||
|
||||
// Create P2P config with mainnet bootstrap peers
|
||||
p2pConfig := &config.P2PConfig{
|
||||
ListenMultiaddr: "/ip4/0.0.0.0/udp/0/quic-v1", // Use random port
|
||||
BootstrapPeers: config.BootstrapPeers,
|
||||
PeerPrivKey: fmt.Sprintf("%x", peerPrivKeyBytes),
|
||||
Network: 0, // Mainnet
|
||||
D: 8,
|
||||
DLo: 6,
|
||||
DHi: 12,
|
||||
DScore: 4,
|
||||
DOut: 2,
|
||||
HistoryLength: 5,
|
||||
HistoryGossip: 3,
|
||||
DLazy: 6,
|
||||
GossipFactor: 0.25,
|
||||
GossipRetransmission: 3,
|
||||
HeartbeatInitialDelay: 100 * time.Millisecond,
|
||||
HeartbeatInterval: 1 * time.Second,
|
||||
FanoutTTL: 60 * time.Second,
|
||||
PrunePeers: 16,
|
||||
PruneBackoff: time.Minute,
|
||||
UnsubscribeBackoff: 10 * time.Second,
|
||||
Connectors: 8,
|
||||
MaxPendingConnections: 128,
|
||||
ConnectionTimeout: 30 * time.Second,
|
||||
DirectConnectTicks: 300,
|
||||
ListenMultiaddr: "/ip4/0.0.0.0/udp/0/quic-v1", // Use random port
|
||||
BootstrapPeers: config.BootstrapPeers,
|
||||
PeerPrivKey: fmt.Sprintf("%x", peerPrivKeyBytes),
|
||||
Network: 0, // Mainnet
|
||||
D: 8,
|
||||
DLo: 6,
|
||||
DHi: 12,
|
||||
DScore: 4,
|
||||
DOut: 2,
|
||||
HistoryLength: 5,
|
||||
HistoryGossip: 3,
|
||||
DLazy: 6,
|
||||
GossipFactor: 0.25,
|
||||
GossipRetransmission: 3,
|
||||
HeartbeatInitialDelay: 100 * time.Millisecond,
|
||||
HeartbeatInterval: 1 * time.Second,
|
||||
FanoutTTL: 60 * time.Second,
|
||||
PrunePeers: 16,
|
||||
PruneBackoff: time.Minute,
|
||||
UnsubscribeBackoff: 10 * time.Second,
|
||||
Connectors: 8,
|
||||
MaxPendingConnections: 128,
|
||||
ConnectionTimeout: 30 * time.Second,
|
||||
DirectConnectTicks: 300,
|
||||
DirectConnectInitialDelay: 1 * time.Second,
|
||||
OpportunisticGraftTicks: 60,
|
||||
OpportunisticGraftPeers: 2,
|
||||
GraftFloodThreshold: 10 * time.Second,
|
||||
MaxIHaveLength: 5000,
|
||||
MaxIHaveMessages: 10,
|
||||
MaxIDontWantMessages: 10,
|
||||
IWantFollowupTime: 3 * time.Second,
|
||||
OpportunisticGraftTicks: 60,
|
||||
OpportunisticGraftPeers: 2,
|
||||
GraftFloodThreshold: 10 * time.Second,
|
||||
MaxIHaveLength: 5000,
|
||||
MaxIHaveMessages: 10,
|
||||
MaxIDontWantMessages: 10,
|
||||
IWantFollowupTime: 3 * time.Second,
|
||||
IDontWantMessageThreshold: 10000,
|
||||
IDontWantMessageTTL: 3,
|
||||
MinBootstrapPeers: 1,
|
||||
BootstrapParallelism: 4,
|
||||
DiscoveryParallelism: 4,
|
||||
DiscoveryPeerLookupLimit: 100,
|
||||
PingTimeout: 30 * time.Second,
|
||||
PingPeriod: time.Minute,
|
||||
PingAttempts: 3,
|
||||
LowWatermarkConnections: -1,
|
||||
HighWatermarkConnections: -1,
|
||||
SubscriptionQueueSize: 128,
|
||||
ValidateQueueSize: 128,
|
||||
ValidateWorkers: 4,
|
||||
PeerOutboundQueueSize: 128,
|
||||
MinBootstrapPeers: 1,
|
||||
BootstrapParallelism: 4,
|
||||
DiscoveryParallelism: 4,
|
||||
DiscoveryPeerLookupLimit: 100,
|
||||
PingTimeout: 30 * time.Second,
|
||||
PingPeriod: time.Minute,
|
||||
PingAttempts: 3,
|
||||
LowWatermarkConnections: -1,
|
||||
HighWatermarkConnections: -1,
|
||||
SubscriptionQueueSize: 128,
|
||||
ValidateQueueSize: 128,
|
||||
ValidateWorkers: 4,
|
||||
PeerOutboundQueueSize: 128,
|
||||
}
|
||||
|
||||
engineConfig := &config.EngineConfig{}
|
||||
@ -3120,7 +3120,7 @@ waitLoop:
|
||||
continue
|
||||
}
|
||||
|
||||
_, err = clientHG.SyncFrom(stream, proverShardKey, phase, expectedRoot)
|
||||
_, err = clientHG.SyncFrom(stream, proverShardKey, phase, nil)
|
||||
if err != nil {
|
||||
t.Logf("SyncFrom error for phase %v: %v", phase, err)
|
||||
}
|
||||
|
||||
@ -122,6 +122,7 @@ const (
|
||||
VERTEX_TOMBSTONE = 0xF1
|
||||
HYPERGRAPH_COVERED_PREFIX = 0xFA
|
||||
HYPERGRAPH_COMPLETE = 0xFB
|
||||
HYPERGRAPH_GLOBAL_PROVER_RECALC_DONE = 0xF9
|
||||
VERTEX_ADDS_TREE_ROOT = 0xFC
|
||||
VERTEX_REMOVES_TREE_ROOT = 0xFD
|
||||
HYPEREDGE_ADDS_TREE_ROOT = 0xFE
|
||||
|
||||
@ -8,6 +8,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
pebblev1 "github.com/cockroachdb/pebble"
|
||||
@ -15,7 +16,9 @@ import (
|
||||
"github.com/cockroachdb/pebble/v2/vfs"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
"source.quilibrium.com/quilibrium/monorepo/bls48581"
|
||||
"source.quilibrium.com/quilibrium/monorepo/config"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/hypergraph"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/store"
|
||||
"source.quilibrium.com/quilibrium/monorepo/types/tries"
|
||||
)
|
||||
@ -76,6 +79,13 @@ var pebbleMigrations = []func(*pebble.Batch) error{
|
||||
migration_2_1_0_187,
|
||||
migration_2_1_0_188,
|
||||
migration_2_1_0_189,
|
||||
migration_2_1_0_1810,
|
||||
migration_2_1_0_1811,
|
||||
migration_2_1_0_1812,
|
||||
migration_2_1_0_1813,
|
||||
migration_2_1_0_1814,
|
||||
migration_2_1_0_1815,
|
||||
migration_2_1_0_1816,
|
||||
}
|
||||
|
||||
func NewPebbleDB(
|
||||
@ -818,9 +828,264 @@ func migration_2_1_0_188(b *pebble.Batch) error {
|
||||
}
|
||||
|
||||
func migration_2_1_0_189(b *pebble.Batch) error {
|
||||
return migration_2_1_0_18(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func migration_2_1_0_1810(b *pebble.Batch) error {
|
||||
return migration_2_1_0_189(b)
|
||||
}
|
||||
|
||||
func migration_2_1_0_1811(b *pebble.Batch) error {
|
||||
return migration_2_1_0_189(b)
|
||||
}
|
||||
|
||||
func migration_2_1_0_1812(b *pebble.Batch) error {
|
||||
return migration_2_1_0_189(b)
|
||||
}
|
||||
|
||||
func migration_2_1_0_1813(b *pebble.Batch) error {
|
||||
return migration_2_1_0_189(b)
|
||||
}
|
||||
|
||||
func migration_2_1_0_1814(b *pebble.Batch) error {
|
||||
return migration_2_1_0_189(b)
|
||||
}
|
||||
|
||||
func migration_2_1_0_1815(b *pebble.Batch) error {
|
||||
return migration_2_1_0_189(b)
|
||||
}
|
||||
|
||||
// migration_2_1_0_1816 recalculates commitments for the global prover trees
|
||||
// to fix potential corruption from earlier versions of sync.
|
||||
func migration_2_1_0_1816(b *pebble.Batch) error {
|
||||
// Check if already done
|
||||
doneKey := []byte{HYPERGRAPH_SHARD, HYPERGRAPH_GLOBAL_PROVER_RECALC_DONE}
|
||||
if _, closer, err := b.Get(doneKey); err == nil {
|
||||
closer.Close()
|
||||
return nil // Already done
|
||||
}
|
||||
|
||||
// Global prover shard key: L1={0,0,0}, L2=0xff*32
|
||||
globalShardKey := tries.ShardKey{
|
||||
L1: [3]byte{},
|
||||
L2: [32]byte{
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
},
|
||||
}
|
||||
|
||||
// Initialize prover (logger can be nil for migrations)
|
||||
prover := bls48581.NewKZGInclusionProver(nil)
|
||||
|
||||
// Create a batch-backed KVDB for the hypergraph store
|
||||
batchDB := &pebbleBatchDB{b: b}
|
||||
|
||||
// Create hypergraph store using the batch
|
||||
hgStore := NewPebbleHypergraphStore(nil, batchDB, nil, nil, prover)
|
||||
|
||||
// Load and recalculate each tree for the global prover shard
|
||||
treeTypes := []struct {
|
||||
setType string
|
||||
phaseType string
|
||||
rootKey func(tries.ShardKey) []byte
|
||||
}{
|
||||
{
|
||||
string(hypergraph.VertexAtomType),
|
||||
string(hypergraph.AddsPhaseType),
|
||||
hypergraphVertexAddsTreeRootKey,
|
||||
},
|
||||
{
|
||||
string(hypergraph.VertexAtomType),
|
||||
string(hypergraph.RemovesPhaseType),
|
||||
hypergraphVertexRemovesTreeRootKey,
|
||||
},
|
||||
{
|
||||
string(hypergraph.HyperedgeAtomType),
|
||||
string(hypergraph.AddsPhaseType),
|
||||
hypergraphHyperedgeAddsTreeRootKey,
|
||||
},
|
||||
{
|
||||
string(hypergraph.HyperedgeAtomType),
|
||||
string(hypergraph.RemovesPhaseType),
|
||||
hypergraphHyperedgeRemovesTreeRootKey,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range treeTypes {
|
||||
rootData, closer, err := b.Get(tt.rootKey(globalShardKey))
|
||||
if err != nil {
|
||||
// No root for this tree, skip
|
||||
continue
|
||||
}
|
||||
data := slices.Clone(rootData)
|
||||
closer.Close()
|
||||
|
||||
if len(data) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
var node tries.LazyVectorCommitmentNode
|
||||
switch data[0] {
|
||||
case tries.TypeLeaf:
|
||||
node, err = tries.DeserializeLeafNode(hgStore, bytes.NewReader(data[1:]))
|
||||
case tries.TypeBranch:
|
||||
pathLength := binary.BigEndian.Uint32(data[1:5])
|
||||
node, err = tries.DeserializeBranchNode(
|
||||
hgStore,
|
||||
bytes.NewReader(data[5+(pathLength*4):]),
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrapf(
|
||||
err,
|
||||
"deserialize %s %s branch",
|
||||
tt.setType,
|
||||
tt.phaseType,
|
||||
)
|
||||
}
|
||||
|
||||
fullPrefix := []int{}
|
||||
for i := range pathLength {
|
||||
fullPrefix = append(
|
||||
fullPrefix,
|
||||
int(binary.BigEndian.Uint32(data[5+(i*4):5+((i+1)*4)])),
|
||||
)
|
||||
}
|
||||
branch := node.(*tries.LazyVectorCommitmentBranchNode)
|
||||
branch.FullPrefix = fullPrefix
|
||||
default:
|
||||
continue // Unknown type, skip
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(
|
||||
err,
|
||||
"deserialize %s %s root",
|
||||
tt.setType,
|
||||
tt.phaseType,
|
||||
)
|
||||
}
|
||||
|
||||
// Create tree and force recalculation
|
||||
tree := &tries.LazyVectorCommitmentTree{
|
||||
Root: node,
|
||||
SetType: tt.setType,
|
||||
PhaseType: tt.phaseType,
|
||||
ShardKey: globalShardKey,
|
||||
Store: hgStore,
|
||||
CoveredPrefix: nil,
|
||||
InclusionProver: prover,
|
||||
}
|
||||
|
||||
// Force full recalculation of commitments
|
||||
tree.Commit(true)
|
||||
}
|
||||
|
||||
// Mark migration as done
|
||||
if err := b.Set(doneKey, []byte{0x01}, &pebble.WriteOptions{}); err != nil {
|
||||
return errors.Wrap(err, "mark global prover recalc done")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// pebbleBatchDB wraps a *pebble.Batch to implement store.KVDB for use in migrations
|
||||
type pebbleBatchDB struct {
|
||||
b *pebble.Batch
|
||||
}
|
||||
|
||||
func (p *pebbleBatchDB) Get(key []byte) ([]byte, io.Closer, error) {
|
||||
return p.b.Get(key)
|
||||
}
|
||||
|
||||
func (p *pebbleBatchDB) Set(key, value []byte) error {
|
||||
return p.b.Set(key, value, &pebble.WriteOptions{})
|
||||
}
|
||||
|
||||
func (p *pebbleBatchDB) Delete(key []byte) error {
|
||||
return p.b.Delete(key, &pebble.WriteOptions{})
|
||||
}
|
||||
|
||||
func (p *pebbleBatchDB) NewBatch(indexed bool) store.Transaction {
|
||||
// Migrations don't need nested transactions; return a wrapper around the same
|
||||
// batch
|
||||
return &pebbleBatchTransaction{b: p.b}
|
||||
}
|
||||
|
||||
func (p *pebbleBatchDB) NewIter(lowerBound []byte, upperBound []byte) (
|
||||
store.Iterator,
|
||||
error,
|
||||
) {
|
||||
return p.b.NewIter(&pebble.IterOptions{
|
||||
LowerBound: lowerBound,
|
||||
UpperBound: upperBound,
|
||||
})
|
||||
}
|
||||
|
||||
func (p *pebbleBatchDB) Compact(start, end []byte, parallelize bool) error {
|
||||
return nil // No-op for batch
|
||||
}
|
||||
|
||||
func (p *pebbleBatchDB) Close() error {
|
||||
return nil // Don't close the batch here
|
||||
}
|
||||
|
||||
func (p *pebbleBatchDB) DeleteRange(start, end []byte) error {
|
||||
return p.b.DeleteRange(start, end, &pebble.WriteOptions{})
|
||||
}
|
||||
|
||||
func (p *pebbleBatchDB) CompactAll() error {
|
||||
return nil // No-op for batch
|
||||
}
|
||||
|
||||
var _ store.KVDB = (*pebbleBatchDB)(nil)
|
||||
|
||||
// pebbleBatchTransaction wraps a *pebble.Batch to implement store.Transaction
|
||||
type pebbleBatchTransaction struct {
|
||||
b *pebble.Batch
|
||||
}
|
||||
|
||||
func (t *pebbleBatchTransaction) Get(key []byte) ([]byte, io.Closer, error) {
|
||||
return t.b.Get(key)
|
||||
}
|
||||
|
||||
func (t *pebbleBatchTransaction) Set(key []byte, value []byte) error {
|
||||
return t.b.Set(key, value, &pebble.WriteOptions{})
|
||||
}
|
||||
|
||||
func (t *pebbleBatchTransaction) Commit() error {
|
||||
return nil // Don't commit; the migration batch handles this
|
||||
}
|
||||
|
||||
func (t *pebbleBatchTransaction) Delete(key []byte) error {
|
||||
return t.b.Delete(key, &pebble.WriteOptions{})
|
||||
}
|
||||
|
||||
func (t *pebbleBatchTransaction) Abort() error {
|
||||
return nil // Can't abort part of a batch
|
||||
}
|
||||
|
||||
func (t *pebbleBatchTransaction) NewIter(lowerBound []byte, upperBound []byte) (
|
||||
store.Iterator,
|
||||
error,
|
||||
) {
|
||||
return t.b.NewIter(&pebble.IterOptions{
|
||||
LowerBound: lowerBound,
|
||||
UpperBound: upperBound,
|
||||
})
|
||||
}
|
||||
|
||||
func (t *pebbleBatchTransaction) DeleteRange(
|
||||
lowerBound []byte,
|
||||
upperBound []byte,
|
||||
) error {
|
||||
return t.b.DeleteRange(lowerBound, upperBound, &pebble.WriteOptions{})
|
||||
}
|
||||
|
||||
var _ store.Transaction = (*pebbleBatchTransaction)(nil)
|
||||
|
||||
type pebbleSnapshotDB struct {
|
||||
snap *pebble.Snapshot
|
||||
}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user