mirror of
https://github.com/QuilibriumNetwork/ceremonyclient.git
synced 2026-02-21 18:37:26 +08:00
1069 lines
34 KiB
Go
1069 lines
34 KiB
Go
//go:build integrationtest
|
|
// +build integrationtest
|
|
|
|
package crypto
|
|
|
|
import (
|
|
"bytes"
|
|
"crypto/rand"
|
|
"math/big"
|
|
mrand "math/rand"
|
|
"testing"
|
|
|
|
"go.uber.org/zap"
|
|
"source.quilibrium.com/quilibrium/monorepo/config"
|
|
"source.quilibrium.com/quilibrium/monorepo/node/store"
|
|
"source.quilibrium.com/quilibrium/monorepo/types/mocks"
|
|
"source.quilibrium.com/quilibrium/monorepo/types/tries"
|
|
)
|
|
|
|
var verencr = &mocks.MockVerifiableEncryptor{}
|
|
|
|
func TestLazyVectorCommitmentTreesNoBLS(t *testing.T) {
|
|
l, _ := zap.NewProduction()
|
|
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
|
|
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true}, db, l, verencr, nil)
|
|
tree := &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
|
|
|
|
// Test single insert
|
|
err := tree.Insert(nil, []byte("key1"), []byte("value1"), nil, big.NewInt(1))
|
|
if err != nil {
|
|
t.Errorf("Failed to insert: %v", err)
|
|
}
|
|
|
|
// Test duplicate key
|
|
err = tree.Insert(nil, []byte("key1"), []byte("value2"), nil, big.NewInt(1))
|
|
if err != nil {
|
|
t.Errorf("Failed to update existing key: %v", err)
|
|
}
|
|
|
|
value, err := tree.Get([]byte("key1"))
|
|
if err != nil {
|
|
t.Errorf("Failed to get value: %v", err)
|
|
}
|
|
if !bytes.Equal(value, []byte("value2")) {
|
|
t.Errorf("Expected value2, got %s", string(value))
|
|
}
|
|
|
|
// Test empty key
|
|
err = tree.Insert(nil, []byte{}, []byte("value"), nil, big.NewInt(1))
|
|
if err == nil {
|
|
t.Error("Expected error for empty key, got none")
|
|
}
|
|
|
|
l, _ = zap.NewProduction()
|
|
db = store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
|
|
s = store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
|
|
tree = &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
|
|
|
|
// Test get on empty tree
|
|
_, err = tree.Get([]byte("nonexistent"))
|
|
if err == nil {
|
|
t.Error("Expected error for nonexistent key, got none")
|
|
}
|
|
|
|
// Insert and get
|
|
tree.Insert(nil, []byte("key1"), []byte("value1"), nil, big.NewInt(1))
|
|
value, err = tree.Get([]byte("key1"))
|
|
if err != nil {
|
|
t.Errorf("Failed to get value: %v", err)
|
|
}
|
|
if !bytes.Equal(value, []byte("value1")) {
|
|
t.Errorf("Expected value1, got %s", string(value))
|
|
}
|
|
|
|
// Test empty key
|
|
_, err = tree.Get([]byte{})
|
|
if err == nil {
|
|
t.Error("Expected error for empty key, got none")
|
|
}
|
|
|
|
l, _ = zap.NewProduction()
|
|
db = store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
|
|
s = store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
|
|
tree = &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
|
|
|
|
// Test delete on empty tree
|
|
err = tree.Delete(nil, []byte("nonexistent"))
|
|
if err != nil {
|
|
t.Errorf("Delete on empty tree should not return error: %v", err)
|
|
}
|
|
|
|
// Insert and delete
|
|
tree.Insert(nil, []byte("key1"), []byte("value1"), nil, big.NewInt(1))
|
|
err = tree.Delete(nil, []byte("key1"))
|
|
if err != nil {
|
|
t.Errorf("Failed to delete: %v", err)
|
|
}
|
|
|
|
// Verify deletion
|
|
v, err := tree.Get([]byte("key1"))
|
|
if err == nil {
|
|
t.Errorf("Expected error for deleted key, got none, %v", v)
|
|
}
|
|
|
|
// Test empty key
|
|
err = tree.Delete(nil, []byte{})
|
|
if err == nil {
|
|
t.Error("Expected error for empty key, got none")
|
|
}
|
|
|
|
l, _ = zap.NewProduction()
|
|
db = store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
|
|
s = store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
|
|
tree = &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
|
|
|
|
// Insert keys that share common prefix
|
|
keys := []string{
|
|
"key1",
|
|
"key2",
|
|
"key3",
|
|
"completely_different",
|
|
}
|
|
|
|
for i, key := range keys {
|
|
err := tree.Insert(nil, []byte(key), []byte("value"+string(rune('1'+i))), nil, big.NewInt(1))
|
|
if err != nil {
|
|
t.Errorf("Failed to insert key %s: %v", key, err)
|
|
}
|
|
}
|
|
|
|
// Verify all values
|
|
for i, key := range keys {
|
|
value, err := tree.Get([]byte(key))
|
|
if err != nil {
|
|
t.Errorf("Failed to get key %s: %v", key, err)
|
|
}
|
|
expected := []byte("value" + string(rune('1'+i)))
|
|
if !bytes.Equal(value, expected) {
|
|
t.Errorf("Expected %s, got %s", string(expected), string(value))
|
|
}
|
|
}
|
|
|
|
// Delete middle key
|
|
err = tree.Delete(nil, []byte("key2"))
|
|
if err != nil {
|
|
t.Errorf("Failed to delete key2: %v", err)
|
|
}
|
|
|
|
// Verify key2 is gone but others remain
|
|
_, err = tree.Get([]byte("key2"))
|
|
if err == nil {
|
|
t.Error("Expected error for deleted key2, got none")
|
|
}
|
|
|
|
// Check remaining keys
|
|
remainingKeys := []string{"key1", "key3", "completely_different"}
|
|
remainingValues := []string{"value1", "value3", "value4"}
|
|
for i, key := range remainingKeys {
|
|
value, err := tree.Get([]byte(key))
|
|
if err != nil {
|
|
t.Errorf("Failed to get key %s after deletion: %v", key, err)
|
|
}
|
|
expected := []byte(remainingValues[i])
|
|
if !bytes.Equal(value, expected) {
|
|
t.Errorf("Expected %s, got %s", string(expected), string(value))
|
|
}
|
|
}
|
|
|
|
l, _ = zap.NewProduction()
|
|
db = store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
|
|
s = store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
|
|
tree = &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
|
|
|
|
// Empty tree should be empty
|
|
emptyRoot := tree.Root
|
|
if emptyRoot != nil {
|
|
t.Errorf("Expected empty root")
|
|
}
|
|
|
|
// Root should change after delete
|
|
tree.Delete(nil, []byte("key1"))
|
|
|
|
l, _ = zap.NewProduction()
|
|
db = store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
|
|
s = store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
|
|
tree = &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
|
|
cmptree := &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
|
|
|
|
addresses := [][]byte{}
|
|
|
|
for i := 0; i < 10000; i++ {
|
|
d := make([]byte, 32)
|
|
rand.Read(d)
|
|
addresses = append(addresses, append(append([]byte{}, make([]byte, 32)...), d...))
|
|
}
|
|
|
|
kept := [][]byte{}
|
|
for i := 0; i < 5000; i++ {
|
|
kept = append(kept, addresses[i])
|
|
}
|
|
|
|
newAdditions := [][]byte{}
|
|
for i := 0; i < 5000; i++ {
|
|
d := make([]byte, 32)
|
|
rand.Read(d)
|
|
newAdditions = append(newAdditions, append(append([]byte{}, make([]byte, 32)...), d...))
|
|
kept = append(kept, append(append([]byte{}, make([]byte, 32)...), d...))
|
|
}
|
|
|
|
// Insert 10000 items
|
|
for i := 0; i < 10000; i++ {
|
|
key := addresses[i]
|
|
value := addresses[i]
|
|
err := tree.Insert(nil, key, value, nil, big.NewInt(1))
|
|
if err != nil {
|
|
t.Errorf("Failed to insert item %d: %v", i, err)
|
|
}
|
|
}
|
|
|
|
if tree.GetSize().Cmp(big.NewInt(10000)) != 0 {
|
|
t.Errorf("invalid tree size: %s", tree.GetSize().String())
|
|
}
|
|
|
|
// Insert 10000 items in reverse
|
|
for i := 9999; i >= 0; i-- {
|
|
key := addresses[i]
|
|
value := addresses[i]
|
|
err := cmptree.Insert(nil, key, value, nil, big.NewInt(1))
|
|
if err != nil {
|
|
t.Errorf("Failed to insert item %d: %v", i, err)
|
|
}
|
|
}
|
|
|
|
// Verify all items
|
|
for i := 0; i < 10000; i++ {
|
|
key := addresses[i]
|
|
expected := addresses[i]
|
|
value, err := tree.Get(key)
|
|
if err != nil {
|
|
t.Errorf("Failed to get item %d: %v", i, err)
|
|
}
|
|
cmpvalue, err := cmptree.Get(key)
|
|
if err != nil {
|
|
t.Errorf("Failed to get item %d: %v", i, err)
|
|
}
|
|
if !bytes.Equal(value, expected) {
|
|
t.Errorf("Item %d: expected %x, got %x", i, string(expected), string(value))
|
|
}
|
|
if !bytes.Equal(value, cmpvalue) {
|
|
t.Errorf("Item %d: expected %x, got %x", i, string(value), string(cmpvalue))
|
|
}
|
|
}
|
|
|
|
// delete keys
|
|
for i := 5000; i < 10000; i++ {
|
|
key := addresses[i]
|
|
tree.Delete(nil, key)
|
|
}
|
|
|
|
if tree.GetSize().Cmp(big.NewInt(5000)) != 0 {
|
|
t.Errorf("invalid tree size: %s", tree.GetSize().String())
|
|
}
|
|
|
|
// add new
|
|
for i := 0; i < 5000; i++ {
|
|
tree.Insert(nil, newAdditions[i], newAdditions[i], nil, big.NewInt(1))
|
|
}
|
|
|
|
if tree.GetSize().Cmp(big.NewInt(10000)) != 0 {
|
|
t.Errorf("invalid tree size: %s", tree.GetSize().String())
|
|
}
|
|
|
|
cmptree = &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
|
|
|
|
for i := 0; i < 10000; i++ {
|
|
cmptree.Insert(nil, kept[i], kept[i], nil, big.NewInt(1))
|
|
}
|
|
// Verify all items
|
|
for i := 0; i < 10000; i++ {
|
|
key := kept[i]
|
|
expected := kept[i]
|
|
value, err := tree.Get(key)
|
|
if err != nil {
|
|
t.Errorf("Failed to get item %d: %v", i, err)
|
|
}
|
|
cmpvalue, err := cmptree.Get(key)
|
|
if err != nil {
|
|
t.Errorf("Failed to get item %d: %v", i, err)
|
|
}
|
|
if !bytes.Equal(value, expected) {
|
|
t.Errorf("Item %d: expected %x, got %x", i, string(expected), string(value))
|
|
}
|
|
if !bytes.Equal(expected, cmpvalue) {
|
|
t.Errorf("Item %d: expected %x, got %x", i, string(value), string(cmpvalue))
|
|
}
|
|
}
|
|
|
|
leaves, longestBranch := tree.GetMetadata()
|
|
|
|
if leaves != 10000 {
|
|
t.Errorf("incorrect leaf count, %d, %d,", 10000, leaves)
|
|
}
|
|
|
|
// Statistical assumption, can be flaky
|
|
if longestBranch < 4 || longestBranch > 5 {
|
|
tries.DebugNode(tree.SetType, tree.PhaseType, tree.ShardKey, tree.Root, 0, "")
|
|
t.Errorf("unlikely longest branch count, %d, %d, review this tree", 4, longestBranch)
|
|
}
|
|
}
|
|
|
|
// TestTreeLeafReaddition tests that re-adding the exact same leaf does not
|
|
// increase the Size metadata
|
|
func TestTreeLeafReadditionNoBLS(t *testing.T) {
|
|
l, _ := zap.NewProduction()
|
|
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
|
|
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
|
|
tree := &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
|
|
|
|
// Generate 1000 random 64-byte keys and corresponding values
|
|
numKeys := 1000
|
|
keys := make([][]byte, numKeys)
|
|
values := make([][]byte, numKeys)
|
|
|
|
for i := 0; i < numKeys; i++ {
|
|
// Generate random 64-byte key
|
|
key := make([]byte, 64)
|
|
rand.Read(key)
|
|
keys[i] = key
|
|
|
|
// Generate random value
|
|
val := make([]byte, 32)
|
|
rand.Read(val)
|
|
values[i] = val
|
|
|
|
// Insert into tree
|
|
err := tree.Insert(nil, key, val, nil, big.NewInt(1))
|
|
if err != nil {
|
|
t.Errorf("Failed to insert item %d: %v", i, err)
|
|
}
|
|
}
|
|
|
|
// Get original size metadata
|
|
originalSize := tree.GetSize()
|
|
expectedSize := big.NewInt(int64(numKeys))
|
|
if originalSize.Cmp(expectedSize) != 0 {
|
|
t.Errorf("Expected tree size to be %s, got %s", expectedSize.String(), originalSize.String())
|
|
}
|
|
|
|
// Choose a random key to test with
|
|
testIndex := mrand.Intn(numKeys)
|
|
testKey := keys[testIndex]
|
|
testValue := values[testIndex]
|
|
|
|
// Re-add the exact same leaf
|
|
err := tree.Insert(nil, testKey, testValue, nil, big.NewInt(1))
|
|
if err != nil {
|
|
t.Errorf("Failed to re-insert the same leaf: %v", err)
|
|
}
|
|
|
|
// Check size hasn't changed
|
|
newSize := tree.GetSize()
|
|
if newSize.Cmp(originalSize) != 0 {
|
|
t.Errorf("Expected size to remain %s after re-adding same leaf, got %s", originalSize.String(), newSize.String())
|
|
}
|
|
}
|
|
|
|
// TestTreeRemoveReaddLeaf tests that removing a leaf and re-adding it
|
|
// decreases and increases the size metadata appropriately
|
|
func TestTreeRemoveReaddLeafNoBLS(t *testing.T) {
|
|
l, _ := zap.NewProduction()
|
|
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
|
|
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
|
|
tree := &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
|
|
|
|
// Generate 1000 random 64-byte keys and corresponding values
|
|
numKeys := 1000
|
|
keys := make([][]byte, numKeys)
|
|
values := make([][]byte, numKeys)
|
|
|
|
for i := 0; i < numKeys; i++ {
|
|
// Generate random 64-byte key
|
|
key := make([]byte, 64)
|
|
rand.Read(key)
|
|
keys[i] = key
|
|
|
|
// Generate random value
|
|
val := make([]byte, 32)
|
|
rand.Read(val)
|
|
values[i] = val
|
|
|
|
// Insert into tree
|
|
err := tree.Insert(nil, key, val, nil, big.NewInt(1))
|
|
if err != nil {
|
|
t.Errorf("Failed to insert item %d: %v", i, err)
|
|
}
|
|
}
|
|
|
|
// Get original size metadata
|
|
originalSize := tree.GetSize()
|
|
expectedSize := big.NewInt(int64(numKeys))
|
|
if originalSize.Cmp(expectedSize) != 0 {
|
|
t.Errorf("Expected tree size to be %s, got %s", expectedSize.String(), originalSize.String())
|
|
}
|
|
|
|
// Choose a random key to test with
|
|
testIndex := mrand.Intn(numKeys)
|
|
testKey := keys[testIndex]
|
|
testValue := values[testIndex]
|
|
|
|
// Remove the leaf
|
|
err := tree.Delete(nil, testKey)
|
|
if err != nil {
|
|
t.Errorf("Failed to delete leaf: %v", err)
|
|
}
|
|
|
|
// Check size has decreased
|
|
reducedSize := tree.GetSize()
|
|
expectedReducedSize := big.NewInt(int64(numKeys - 1))
|
|
if reducedSize.Cmp(expectedReducedSize) != 0 {
|
|
t.Errorf("Expected size to be %s after removing leaf, got %s", expectedReducedSize.String(), reducedSize.String())
|
|
}
|
|
|
|
// Re-add the same leaf
|
|
err = tree.Insert(nil, testKey, testValue, nil, big.NewInt(1))
|
|
if err != nil {
|
|
t.Errorf("Failed to re-add leaf: %v", err)
|
|
}
|
|
|
|
// Check size has increased back to original
|
|
restoredSize := tree.GetSize()
|
|
if restoredSize.Cmp(originalSize) != 0 {
|
|
t.Errorf("Expected size to be restored to %s after re-adding leaf, got %s", originalSize.String(), restoredSize.String())
|
|
}
|
|
}
|
|
|
|
// TestTreeLongestBranch tests that the longest branch metadata value is always
|
|
// correct.
|
|
func TestTreeLongestBranchNoBLS(t *testing.T) {
|
|
l, _ := zap.NewProduction()
|
|
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
|
|
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
|
|
tree := &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
|
|
|
|
// Test with an empty tree
|
|
leaves, longestBranch := tree.GetMetadata()
|
|
if leaves != 0 {
|
|
t.Errorf("Expected 0 leaves in empty tree, got %d", leaves)
|
|
}
|
|
if longestBranch != 0 {
|
|
t.Errorf("Expected longest branch of 0 in empty tree, got %d", longestBranch)
|
|
}
|
|
|
|
// Insert one item with a random 64-byte key
|
|
key1 := make([]byte, 64)
|
|
rand.Read(key1)
|
|
value1 := make([]byte, 32)
|
|
rand.Read(value1)
|
|
|
|
tree.Insert(nil, key1, value1, nil, big.NewInt(1))
|
|
|
|
leaves, longestBranch = tree.GetMetadata()
|
|
if leaves != 1 {
|
|
t.Errorf("Expected 1 leaf after single insert, got %d", leaves)
|
|
}
|
|
if longestBranch != 0 {
|
|
t.Errorf("Expected longest branch of 0 with single leaf, got %d", longestBranch)
|
|
}
|
|
|
|
// Generate batch 1: Add 999 more random keys (total 1000)
|
|
batch1Size := 999
|
|
batch1Keys := make([][]byte, batch1Size)
|
|
|
|
for i := 0; i < batch1Size; i++ {
|
|
key := make([]byte, 64)
|
|
rand.Read(key)
|
|
batch1Keys[i] = key
|
|
|
|
val := make([]byte, 32)
|
|
rand.Read(val)
|
|
|
|
err := tree.Insert(nil, key, val, nil, big.NewInt(1))
|
|
if err != nil {
|
|
t.Errorf("Failed to insert batch 1 item %d: %v", i, err)
|
|
}
|
|
}
|
|
|
|
// With 1000 random keys, we should have created some branches
|
|
leaves, longestBranch = tree.GetMetadata()
|
|
expectedLeaves := 1000
|
|
if leaves != expectedLeaves {
|
|
t.Errorf("Expected %d leaves after batch 1, got %d", expectedLeaves, leaves)
|
|
}
|
|
|
|
// Due to random distribution of keys, we expect a minimum branch depth
|
|
// For 1000 64-byte keys, we expect a minimum branch depth of at least 3
|
|
expectedMinLongestBranch := 3
|
|
if longestBranch < expectedMinLongestBranch {
|
|
t.Errorf("Expected longest branch of at least %d with 1000 random keys, got %d",
|
|
expectedMinLongestBranch, longestBranch)
|
|
}
|
|
|
|
expectedMaxLongestBranch := 4
|
|
if longestBranch > expectedMaxLongestBranch {
|
|
t.Errorf("Expected longest branch to be at most %d with 1000 random keys, got %d",
|
|
expectedMaxLongestBranch, longestBranch)
|
|
}
|
|
|
|
t.Logf("Tree with 1000 random keys has longest branch of %d", longestBranch)
|
|
|
|
// Generate batch 2: Insert 1000 more items with controlled prefixes to create
|
|
// deeper branches. We'll create keys with the same prefix bytes to force
|
|
// branch creation
|
|
batch2Size := 1000
|
|
batch2Keys := make([][]byte, batch2Size)
|
|
|
|
// Create a common prefix for first 8 bytes (forcing branch at this level)
|
|
commonPrefix := make([]byte, 8)
|
|
rand.Read(commonPrefix)
|
|
|
|
for i := 0; i < batch2Size; i++ {
|
|
key := make([]byte, 64)
|
|
// First 8 bytes are the same for all keys
|
|
copy(key[:8], commonPrefix)
|
|
// Next bytes are random but within controlled ranges to create subgroups
|
|
// This creates deeper branch structures
|
|
key[8] = byte(i % 4) // Creates 4 major groups
|
|
key[9] = byte(i % 16) // Creates 16 subgroups
|
|
// Rest is random
|
|
rand.Read(key[10:])
|
|
|
|
batch2Keys[i] = key
|
|
|
|
val := make([]byte, 32)
|
|
rand.Read(val)
|
|
|
|
err := tree.Insert(nil, key, val, nil, big.NewInt(1))
|
|
if err != nil {
|
|
t.Errorf("Failed to insert batch 2 item %d: %v", i, err)
|
|
}
|
|
}
|
|
|
|
// With controlled prefixes, branches should be deeper
|
|
leaves, newLongestBranch := tree.GetMetadata()
|
|
expectedLeaves = 2000 // 1000 from batch 1 + 1000 from batch 2
|
|
if leaves != expectedLeaves {
|
|
t.Errorf("Expected %d leaves after batch 2, got %d", expectedLeaves, leaves)
|
|
}
|
|
|
|
// With our specific prefix design, branches should be deeper
|
|
// The depth should have increased from batch 1
|
|
if newLongestBranch <= longestBranch {
|
|
t.Errorf("Expected longest branch to increase after adding batch 2 with controlled prefixes, "+
|
|
"previous: %d, current: %d", longestBranch, newLongestBranch)
|
|
}
|
|
|
|
t.Logf("Tree with 2000 keys including controlled prefixes has longest branch of %d", newLongestBranch)
|
|
|
|
// Delete all batch 2 keys
|
|
for _, key := range batch2Keys {
|
|
err := tree.Delete(nil, key)
|
|
if err != nil {
|
|
t.Errorf("Failed to delete structured key: %v", err)
|
|
}
|
|
}
|
|
|
|
// After deleting all batch 2 keys, we should be back to the original branch depth
|
|
leaves, finalLongestBranch := tree.GetMetadata()
|
|
expectedLeaves = 1000 // Back to just batch 1
|
|
if leaves != expectedLeaves {
|
|
t.Errorf("Expected %d leaves after deleting batch 2, got %d", expectedLeaves, leaves)
|
|
}
|
|
|
|
// Longest branch should have decreased since we removed the structured keys
|
|
if finalLongestBranch > newLongestBranch {
|
|
t.Errorf("Expected longest branch to decrease after deleting structured keys, "+
|
|
"previous: %d, current: %d", newLongestBranch, finalLongestBranch)
|
|
}
|
|
|
|
// Must be the same
|
|
expectedDiffFromOriginal := 0
|
|
diff := int(finalLongestBranch) - int(longestBranch)
|
|
if diff < 0 {
|
|
diff = -diff // Absolute value
|
|
}
|
|
if diff != expectedDiffFromOriginal {
|
|
t.Logf("Note: Longest branch after deleting batch 2 (%d) differs significantly from "+
|
|
"original batch 1 longest branch (%d)", finalLongestBranch, longestBranch)
|
|
}
|
|
}
|
|
|
|
// TestTreeNoStaleNodesAfterDelete tests that deleting nodes does not leave
|
|
// orphaned/stale tree nodes in the database. This specifically tests the case
|
|
// where branch merging occurs during deletion.
|
|
func TestTreeNoStaleNodesAfterDeleteNoBLS(t *testing.T) {
|
|
l, _ := zap.NewProduction()
|
|
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
|
|
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
|
|
shardKey := tries.ShardKey{}
|
|
tree := &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: shardKey}
|
|
|
|
// Create keys that will force branch creation and then merging on deletion.
|
|
// We want keys that share a prefix so that:
|
|
// 1. Inserting them creates branch nodes
|
|
// 2. Deleting some of them causes branch merging (childCount == 1 case)
|
|
|
|
// Create 3 keys with same first 8 bytes, different after
|
|
commonPrefix := make([]byte, 8)
|
|
rand.Read(commonPrefix)
|
|
|
|
keys := make([][]byte, 3)
|
|
values := make([][]byte, 3)
|
|
|
|
for i := 0; i < 3; i++ {
|
|
key := make([]byte, 64)
|
|
copy(key[:8], commonPrefix)
|
|
// Vary byte 8 to create branching
|
|
key[8] = byte(i * 64) // 0x00, 0x40, 0x80
|
|
rand.Read(key[9:])
|
|
keys[i] = key
|
|
|
|
val := make([]byte, 32)
|
|
rand.Read(val)
|
|
values[i] = val
|
|
|
|
err := tree.Insert(nil, key, val, nil, big.NewInt(1))
|
|
if err != nil {
|
|
t.Fatalf("Failed to insert key %d: %v", i, err)
|
|
}
|
|
}
|
|
|
|
// Verify all 3 keys exist
|
|
for i, key := range keys {
|
|
_, err := tree.Get(key)
|
|
if err != nil {
|
|
t.Fatalf("Key %d not found after insert: %v", i, err)
|
|
}
|
|
}
|
|
|
|
// Count tree nodes before deletion
|
|
nodesBefore := countTreeNodes(t, s, shardKey)
|
|
t.Logf("Tree nodes before deletion: %d", nodesBefore)
|
|
|
|
// Delete one key - this should trigger branch merging
|
|
err := tree.Delete(nil, keys[1])
|
|
if err != nil {
|
|
t.Fatalf("Failed to delete key 1: %v", err)
|
|
}
|
|
|
|
// Verify key 1 is gone
|
|
_, err = tree.Get(keys[1])
|
|
if err == nil {
|
|
t.Fatal("Key 1 should not exist after deletion")
|
|
}
|
|
|
|
// Verify keys 0 and 2 still exist
|
|
for _, i := range []int{0, 2} {
|
|
_, err := tree.Get(keys[i])
|
|
if err != nil {
|
|
t.Fatalf("Key %d not found after deleting key 1: %v", i, err)
|
|
}
|
|
}
|
|
|
|
// Count tree nodes after deletion
|
|
nodesAfter := countTreeNodes(t, s, shardKey)
|
|
t.Logf("Tree nodes after deletion: %d", nodesAfter)
|
|
|
|
// Now verify that all stored nodes are actually reachable from the root.
|
|
// This is the critical check - any unreachable nodes are "stale".
|
|
reachableNodes := countReachableNodes(t, tree)
|
|
t.Logf("Reachable nodes from root: %d", reachableNodes)
|
|
|
|
if nodesAfter != reachableNodes {
|
|
t.Errorf("STALE NODES DETECTED: stored=%d, reachable=%d, stale=%d",
|
|
nodesAfter, reachableNodes, nodesAfter-reachableNodes)
|
|
}
|
|
|
|
// More aggressive test: delete all but one key
|
|
err = tree.Delete(nil, keys[2])
|
|
if err != nil {
|
|
t.Fatalf("Failed to delete key 2: %v", err)
|
|
}
|
|
|
|
nodesAfterSecondDelete := countTreeNodes(t, s, shardKey)
|
|
reachableAfterSecondDelete := countReachableNodes(t, tree)
|
|
t.Logf("After second delete: stored=%d, reachable=%d", nodesAfterSecondDelete, reachableAfterSecondDelete)
|
|
|
|
if nodesAfterSecondDelete != reachableAfterSecondDelete {
|
|
t.Errorf("STALE NODES DETECTED after second delete: stored=%d, reachable=%d, stale=%d",
|
|
nodesAfterSecondDelete, reachableAfterSecondDelete, nodesAfterSecondDelete-reachableAfterSecondDelete)
|
|
}
|
|
|
|
// Delete the last key - tree should be empty
|
|
err = tree.Delete(nil, keys[0])
|
|
if err != nil {
|
|
t.Fatalf("Failed to delete key 0: %v", err)
|
|
}
|
|
|
|
nodesAfterAllDeleted := countTreeNodes(t, s, shardKey)
|
|
t.Logf("After all deleted: stored=%d", nodesAfterAllDeleted)
|
|
|
|
// There should be no tree nodes left (except possibly the root marker)
|
|
if nodesAfterAllDeleted > 1 {
|
|
t.Errorf("STALE NODES DETECTED after all deleted: stored=%d (expected 0 or 1)",
|
|
nodesAfterAllDeleted)
|
|
}
|
|
}
|
|
|
|
// TestTreeNoStaleNodesAfterBranchMerge specifically tests the case where
|
|
// deleting a node causes a branch to merge with its only remaining child branch.
|
|
// This tests the FullPrefix update bug hypothesis.
|
|
func TestTreeNoStaleNodesAfterBranchMergeNoBLS(t *testing.T) {
|
|
l, _ := zap.NewProduction()
|
|
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
|
|
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
|
|
shardKey := tries.ShardKey{}
|
|
tree := &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: shardKey}
|
|
|
|
// To trigger a branch-to-branch merge during deletion, we need:
|
|
// 1. A parent branch with exactly 2 children
|
|
// 2. One child is a leaf (to be deleted)
|
|
// 3. The other child is a branch node
|
|
//
|
|
// Structure we want to create:
|
|
// Root (branch)
|
|
// ├── Child A (leaf) - will be deleted
|
|
// └── Child B (branch)
|
|
// ├── Grandchild 1 (leaf)
|
|
// └── Grandchild 2 (leaf)
|
|
//
|
|
// After deleting Child A, Root should merge with Child B.
|
|
|
|
// Keys with controlled nibbles to create specific tree structure
|
|
// Nibble = 6 bits, so first nibble is bits 0-5 of first byte
|
|
|
|
// Key A: first nibble = 0 (byte[0] bits 7-2 = 000000)
|
|
keyA := make([]byte, 64)
|
|
keyA[0] = 0x00 // First nibble = 0
|
|
rand.Read(keyA[1:])
|
|
|
|
// Keys for branch B children: first nibble = 1 (byte[0] bits 7-2 = 000001)
|
|
// They share first nibble (1) but differ at second nibble
|
|
keyB1 := make([]byte, 64)
|
|
keyB1[0] = 0x04 // First nibble = 1 (binary: 000001 << 2 = 00000100)
|
|
keyB1[1] = 0x00 // Second nibble differs
|
|
rand.Read(keyB1[2:])
|
|
|
|
keyB2 := make([]byte, 64)
|
|
keyB2[0] = 0x04 // First nibble = 1
|
|
keyB2[1] = 0x40 // Second nibble differs (binary: 010000...)
|
|
rand.Read(keyB2[2:])
|
|
|
|
// Insert all keys
|
|
val := make([]byte, 32)
|
|
rand.Read(val)
|
|
if err := tree.Insert(nil, keyA, val, nil, big.NewInt(1)); err != nil {
|
|
t.Fatalf("Failed to insert keyA: %v", err)
|
|
}
|
|
|
|
rand.Read(val)
|
|
if err := tree.Insert(nil, keyB1, val, nil, big.NewInt(1)); err != nil {
|
|
t.Fatalf("Failed to insert keyB1: %v", err)
|
|
}
|
|
|
|
rand.Read(val)
|
|
if err := tree.Insert(nil, keyB2, val, nil, big.NewInt(1)); err != nil {
|
|
t.Fatalf("Failed to insert keyB2: %v", err)
|
|
}
|
|
|
|
// Verify structure
|
|
nodesBefore := countTreeNodes(t, s, shardKey)
|
|
reachableBefore := countReachableNodes(t, tree)
|
|
t.Logf("Before deletion: stored=%d, reachable=%d", nodesBefore, reachableBefore)
|
|
|
|
if nodesBefore != reachableBefore {
|
|
t.Errorf("STALE NODES before deletion: stored=%d, reachable=%d", nodesBefore, reachableBefore)
|
|
}
|
|
|
|
// Delete keyA - this should trigger the merge of root with child B
|
|
if err := tree.Delete(nil, keyA); err != nil {
|
|
t.Fatalf("Failed to delete keyA: %v", err)
|
|
}
|
|
|
|
// Verify keyA is gone
|
|
if _, err := tree.Get(keyA); err == nil {
|
|
t.Fatal("keyA should not exist after deletion")
|
|
}
|
|
|
|
// Verify B keys still exist
|
|
if _, err := tree.Get(keyB1); err != nil {
|
|
t.Fatalf("keyB1 not found after deletion: %v", err)
|
|
}
|
|
if _, err := tree.Get(keyB2); err != nil {
|
|
t.Fatalf("keyB2 not found after deletion: %v", err)
|
|
}
|
|
|
|
// Check for stale nodes
|
|
nodesAfter := countTreeNodes(t, s, shardKey)
|
|
reachableAfter := countReachableNodes(t, tree)
|
|
t.Logf("After deletion: stored=%d, reachable=%d", nodesAfter, reachableAfter)
|
|
|
|
if nodesAfter != reachableAfter {
|
|
t.Errorf("STALE NODES DETECTED after branch merge: stored=%d, reachable=%d, stale=%d",
|
|
nodesAfter, reachableAfter, nodesAfter-reachableAfter)
|
|
}
|
|
}
|
|
|
|
// TestTreeNoStaleNodesAfterMassDelete tests stale node detection with many keys
|
|
func TestTreeNoStaleNodesAfterMassDeleteNoBLS(t *testing.T) {
|
|
l, _ := zap.NewProduction()
|
|
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
|
|
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
|
|
shardKey := tries.ShardKey{}
|
|
tree := &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: shardKey}
|
|
|
|
// Insert 1000 random keys
|
|
numKeys := 1000
|
|
keys := make([][]byte, numKeys)
|
|
|
|
for i := 0; i < numKeys; i++ {
|
|
key := make([]byte, 64)
|
|
rand.Read(key)
|
|
keys[i] = key
|
|
|
|
val := make([]byte, 32)
|
|
rand.Read(val)
|
|
|
|
err := tree.Insert(nil, key, val, nil, big.NewInt(1))
|
|
if err != nil {
|
|
t.Fatalf("Failed to insert key %d: %v", i, err)
|
|
}
|
|
}
|
|
|
|
nodesBefore := countTreeNodes(t, s, shardKey)
|
|
reachableBefore := countReachableNodes(t, tree)
|
|
t.Logf("Before deletion: stored=%d, reachable=%d", nodesBefore, reachableBefore)
|
|
|
|
if nodesBefore != reachableBefore {
|
|
t.Errorf("STALE NODES before deletion: stored=%d, reachable=%d", nodesBefore, reachableBefore)
|
|
}
|
|
|
|
// Delete half the keys in random order
|
|
mrand.Shuffle(numKeys, func(i, j int) {
|
|
keys[i], keys[j] = keys[j], keys[i]
|
|
})
|
|
|
|
for i := 0; i < numKeys/2; i++ {
|
|
err := tree.Delete(nil, keys[i])
|
|
if err != nil {
|
|
t.Fatalf("Failed to delete key %d: %v", i, err)
|
|
}
|
|
}
|
|
|
|
nodesAfter := countTreeNodes(t, s, shardKey)
|
|
reachableAfter := countReachableNodes(t, tree)
|
|
t.Logf("After deleting half: stored=%d, reachable=%d", nodesAfter, reachableAfter)
|
|
|
|
if nodesAfter != reachableAfter {
|
|
t.Errorf("STALE NODES DETECTED: stored=%d, reachable=%d, stale=%d",
|
|
nodesAfter, reachableAfter, nodesAfter-reachableAfter)
|
|
}
|
|
|
|
// Verify remaining keys still accessible
|
|
for i := numKeys / 2; i < numKeys; i++ {
|
|
_, err := tree.Get(keys[i])
|
|
if err != nil {
|
|
t.Errorf("Key %d not found after mass deletion: %v", i, err)
|
|
}
|
|
}
|
|
}
|
|
|
|
// countTreeNodes counts all tree nodes stored in the database for a given shard
|
|
func countTreeNodes(t *testing.T, s *store.PebbleHypergraphStore, shardKey tries.ShardKey) int {
|
|
count := 0
|
|
|
|
// Use the store's iterator to count nodes
|
|
iter, err := s.IterateRawLeaves("vertex", "adds", shardKey)
|
|
if err != nil {
|
|
t.Fatalf("Failed to create iterator: %v", err)
|
|
}
|
|
defer iter.Close()
|
|
|
|
// Count all entries (both branch and leaf nodes)
|
|
for valid := iter.First(); valid; valid = iter.Next() {
|
|
count++
|
|
}
|
|
|
|
return count
|
|
}
|
|
|
|
// countReachableNodes counts nodes reachable from the tree root by walking the tree
|
|
func countReachableNodes(t *testing.T, tree *tries.LazyVectorCommitmentTree) int {
|
|
if tree.Root == nil {
|
|
return 0
|
|
}
|
|
|
|
count := 0
|
|
var walk func(node tries.LazyVectorCommitmentNode)
|
|
walk = func(node tries.LazyVectorCommitmentNode) {
|
|
if node == nil {
|
|
return
|
|
}
|
|
count++
|
|
|
|
switch n := node.(type) {
|
|
case *tries.LazyVectorCommitmentBranchNode:
|
|
for i := 0; i < 64; i++ {
|
|
child := n.Children[i]
|
|
if child == nil {
|
|
// Try to load from store
|
|
var err error
|
|
child, err = tree.Store.GetNodeByPath(
|
|
tree.SetType,
|
|
tree.PhaseType,
|
|
tree.ShardKey,
|
|
append(n.FullPrefix, i),
|
|
)
|
|
if err != nil {
|
|
continue
|
|
}
|
|
}
|
|
if child != nil {
|
|
walk(child)
|
|
}
|
|
}
|
|
case *tries.LazyVectorCommitmentLeafNode:
|
|
// Leaf node, nothing more to walk
|
|
}
|
|
}
|
|
|
|
walk(tree.Root)
|
|
return count
|
|
}
|
|
|
|
// TestTreeBranchStructure tests that the tree structure is preserved after
|
|
// adding and removing leaves that cause branch creation due to shared prefixes.
|
|
func TestTreeBranchStructureNoBLS(t *testing.T) {
|
|
l, _ := zap.NewProduction()
|
|
db := store.NewPebbleDB(l, &config.Config{DB: &config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}}, 0)
|
|
s := store.NewPebbleHypergraphStore(&config.DBConfig{InMemoryDONOTUSE: true, Path: ".configtest/store"}, db, l, verencr, nil)
|
|
tree := &tries.LazyVectorCommitmentTree{Store: s, SetType: "vertex", PhaseType: "adds", ShardKey: tries.ShardKey{}}
|
|
|
|
// Create three base keys with 64-byte size
|
|
numBaseKeys := 3
|
|
baseKeys := make([][]byte, numBaseKeys)
|
|
baseValues := make([][]byte, numBaseKeys)
|
|
|
|
// Create base keys that share same first byte and have a controlled second and
|
|
// third, but are otherwise random
|
|
for i := 0; i < numBaseKeys; i++ {
|
|
key := make([]byte, 64)
|
|
// 101000 001010 0000
|
|
key[0] = 0xA0 // Common first byte
|
|
key[1] = 0xA0
|
|
// finalizes the third path nibble as i -> 0, 1, 2
|
|
key[2] = byte((i << 6) & 0xFF)
|
|
rand.Read(key[3:])
|
|
baseKeys[i] = key
|
|
|
|
val := make([]byte, 32)
|
|
rand.Read(val)
|
|
baseValues[i] = val
|
|
|
|
err := tree.Insert(nil, key, val, nil, big.NewInt(1))
|
|
if err != nil {
|
|
t.Errorf("Failed to insert base key %d: %v", i, err)
|
|
}
|
|
}
|
|
|
|
// Copy the size value to avoid aliasing (GetSize returns pointer to internal big.Int)
|
|
initialSize := new(big.Int).Set(tree.GetSize())
|
|
|
|
// Confirm initial state
|
|
if initialSize.Cmp(big.NewInt(3)) != 0 {
|
|
t.Errorf("Expected initial size of 3, got %s", initialSize.String())
|
|
}
|
|
|
|
// Add a key that forces a new branch creation due to shared prefix with
|
|
// baseKeys[0]
|
|
branchKey := make([]byte, 64)
|
|
copy(branchKey, baseKeys[0]) // Start with same bytes as baseKeys[0]
|
|
// Modify just one byte in the middle to create branch point
|
|
branchKey[32] ^= 0xFF
|
|
branchValue := make([]byte, 32)
|
|
rand.Read(branchValue)
|
|
|
|
err := tree.Insert(nil, branchKey, branchValue, nil, big.NewInt(1))
|
|
if err != nil {
|
|
t.Errorf("Failed to insert branch-creating key: %v", err)
|
|
}
|
|
|
|
// Commit after adding the branch-creating key
|
|
branchSize := tree.GetSize()
|
|
|
|
// Confirm size increased
|
|
if branchSize.Cmp(big.NewInt(4)) != 0 {
|
|
t.Errorf("Expected size of 4 after adding branch key, got %s", branchSize.String())
|
|
}
|
|
|
|
// Remove the key that created the branch
|
|
err = tree.Delete(nil, branchKey)
|
|
if err != nil {
|
|
t.Errorf("Failed to delete branch-creating key: %v", err)
|
|
}
|
|
|
|
// Commit after removing the branch-creating key
|
|
restoredSize := tree.GetSize()
|
|
// Confirm size returned to original
|
|
if restoredSize.Cmp(initialSize) != 0 {
|
|
t.Errorf("Expected size to return to %s after removing branch key, got %s",
|
|
initialSize.String(), restoredSize.String())
|
|
}
|
|
|
|
// More complex test with multiple branch levels
|
|
// Create groups of keys with controlled prefixes
|
|
numGroups := int64(2)
|
|
keysPerGroup := int64(500)
|
|
groupPrefixLength := 16 // First 16 bytes should be identical within a group
|
|
|
|
groupKeys := make([][][]byte, numGroups)
|
|
for i := int64(0); i < numGroups; i++ {
|
|
// Create group prefix - first 16 bytes are the same within each group
|
|
groupPrefix := make([]byte, groupPrefixLength)
|
|
rand.Read(groupPrefix)
|
|
// Ensure that the group is out of band of the initial set.
|
|
groupPrefix[0] = 0xFF
|
|
|
|
// Now create the keys for this group
|
|
groupKeys[i] = make([][]byte, keysPerGroup)
|
|
for j := int64(0); j < keysPerGroup; j++ {
|
|
key := make([]byte, 64)
|
|
// Copy the group prefix
|
|
copy(key[:groupPrefixLength], groupPrefix)
|
|
// Fill the rest with random bytes
|
|
rand.Read(key[groupPrefixLength:])
|
|
groupKeys[i][j] = key
|
|
|
|
val := make([]byte, 32)
|
|
rand.Read(val)
|
|
|
|
err := tree.Insert(nil, key, val, nil, big.NewInt(1))
|
|
if err != nil {
|
|
t.Errorf("Failed to insert complex key [group %d, key %d]: %v", i, j, err)
|
|
}
|
|
}
|
|
}
|
|
|
|
complexSize := tree.GetSize()
|
|
expectedComplexSize := big.NewInt(3 + numGroups*keysPerGroup)
|
|
if complexSize.Cmp(expectedComplexSize) != 0 {
|
|
t.Errorf("Expected complex tree size of %s, got %s",
|
|
expectedComplexSize.String(), complexSize.String())
|
|
}
|
|
|
|
// Remove just one group
|
|
for j := int64(0); j < keysPerGroup; j++ {
|
|
err := tree.Delete(nil, groupKeys[0][j])
|
|
if err != nil {
|
|
t.Errorf("Failed to delete key from group 0: %v", err)
|
|
}
|
|
}
|
|
|
|
afterGroupRemoval := tree.GetSize()
|
|
expectedAfterRemoval := big.NewInt(3 + keysPerGroup)
|
|
if afterGroupRemoval.Cmp(expectedAfterRemoval) != 0 {
|
|
t.Errorf("Expected tree size of %s after group removal, got %s",
|
|
expectedAfterRemoval.String(), afterGroupRemoval.String())
|
|
}
|
|
}
|