mirror of
https://github.com/QuilibriumNetwork/ceremonyclient.git
synced 2026-02-21 18:37:26 +08:00
perf grinder
This commit is contained in:
parent
ccf1990a81
commit
e896a0fb60
@ -548,14 +548,5 @@ func PrintLogo() {
|
||||
}
|
||||
|
||||
func PrintVersion(network uint8) {
|
||||
patch := GetPatchNumber()
|
||||
patchString := ""
|
||||
if patch != 0x00 {
|
||||
patchString = fmt.Sprintf("-p%d", patch)
|
||||
}
|
||||
if network != 0 {
|
||||
patchString = fmt.Sprintf("-b%d", GetRCNumber())
|
||||
}
|
||||
fmt.Println(" ")
|
||||
fmt.Println(" Quilibrium Node - v" + GetVersionString() + patchString + " – Dusk")
|
||||
fmt.Println("Quilibrium Perf Grinder")
|
||||
}
|
||||
|
||||
509
node/crypto/proof_tree.go
Normal file
509
node/crypto/proof_tree.go
Normal file
@ -0,0 +1,509 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha512"
|
||||
"encoding/binary"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
rbls48581 "source.quilibrium.com/quilibrium/monorepo/bls48581"
|
||||
)
|
||||
|
||||
func init() {
|
||||
gob.Register(&VectorCommitmentLeafNode{})
|
||||
gob.Register(&VectorCommitmentBranchNode{})
|
||||
}
|
||||
|
||||
const (
|
||||
BranchNodes = 1024
|
||||
BranchBits = 10 // log2(1024)
|
||||
BranchMask = BranchNodes - 1
|
||||
)
|
||||
|
||||
type VectorCommitmentNode interface {
|
||||
Commit() []byte
|
||||
}
|
||||
|
||||
type VectorCommitmentLeafNode struct {
|
||||
Key []byte
|
||||
Value []byte
|
||||
Commitment []byte
|
||||
}
|
||||
|
||||
type VectorCommitmentBranchNode struct {
|
||||
Prefix []int
|
||||
Children [BranchNodes]VectorCommitmentNode
|
||||
Commitment []byte
|
||||
}
|
||||
|
||||
func (n *VectorCommitmentLeafNode) Commit() []byte {
|
||||
if n.Commitment == nil {
|
||||
h := sha512.New()
|
||||
h.Write([]byte{0})
|
||||
h.Write(n.Key)
|
||||
h.Write(n.Value)
|
||||
n.Commitment = h.Sum(nil)
|
||||
}
|
||||
return n.Commitment
|
||||
}
|
||||
|
||||
func (n *VectorCommitmentBranchNode) Commit() []byte {
|
||||
if n.Commitment == nil {
|
||||
data := []byte{}
|
||||
for _, child := range n.Children {
|
||||
if child != nil {
|
||||
out := child.Commit()
|
||||
switch c := child.(type) {
|
||||
case *VectorCommitmentBranchNode:
|
||||
h := sha512.New()
|
||||
h.Write([]byte{1})
|
||||
for _, p := range c.Prefix {
|
||||
h.Write(binary.BigEndian.AppendUint32([]byte{}, uint32(p)))
|
||||
}
|
||||
h.Write(out)
|
||||
out = h.Sum(nil)
|
||||
case *VectorCommitmentLeafNode:
|
||||
// do nothing
|
||||
}
|
||||
data = append(data, out...)
|
||||
} else {
|
||||
data = append(data, make([]byte, 64)...)
|
||||
}
|
||||
}
|
||||
|
||||
n.Commitment = rbls48581.CommitRaw(data, 1024)
|
||||
}
|
||||
|
||||
return n.Commitment
|
||||
}
|
||||
|
||||
func (n *VectorCommitmentBranchNode) Verify(index int, proof []byte) bool {
|
||||
data := []byte{}
|
||||
if n.Commitment == nil {
|
||||
for _, child := range n.Children {
|
||||
if child != nil {
|
||||
out := child.Commit()
|
||||
switch c := child.(type) {
|
||||
case *VectorCommitmentBranchNode:
|
||||
h := sha512.New()
|
||||
h.Write([]byte{1})
|
||||
for _, p := range c.Prefix {
|
||||
h.Write(binary.BigEndian.AppendUint32([]byte{}, uint32(p)))
|
||||
}
|
||||
h.Write(out)
|
||||
out = h.Sum(nil)
|
||||
case *VectorCommitmentLeafNode:
|
||||
// do nothing
|
||||
}
|
||||
data = append(data, out...)
|
||||
} else {
|
||||
data = append(data, make([]byte, 64)...)
|
||||
}
|
||||
}
|
||||
|
||||
n.Commitment = rbls48581.CommitRaw(data, 1024)
|
||||
data = data[64*index : 64*(index+1)]
|
||||
} else {
|
||||
child := n.Children[index]
|
||||
if child != nil {
|
||||
out := child.Commit()
|
||||
switch c := child.(type) {
|
||||
case *VectorCommitmentBranchNode:
|
||||
h := sha512.New()
|
||||
h.Write([]byte{1})
|
||||
for _, p := range c.Prefix {
|
||||
h.Write(binary.BigEndian.AppendUint32([]byte{}, uint32(p)))
|
||||
}
|
||||
h.Write(out)
|
||||
out = h.Sum(nil)
|
||||
case *VectorCommitmentLeafNode:
|
||||
// do nothing
|
||||
}
|
||||
data = append(data, out...)
|
||||
} else {
|
||||
data = append(data, make([]byte, 64)...)
|
||||
}
|
||||
}
|
||||
|
||||
return rbls48581.VerifyRaw(data, n.Commitment, uint64(index), proof, 1024)
|
||||
}
|
||||
|
||||
func (n *VectorCommitmentBranchNode) Prove(index int) []byte {
|
||||
data := []byte{}
|
||||
for _, child := range n.Children {
|
||||
if child != nil {
|
||||
out := child.Commit()
|
||||
switch c := child.(type) {
|
||||
case *VectorCommitmentBranchNode:
|
||||
h := sha512.New()
|
||||
h.Write([]byte{1})
|
||||
for _, p := range c.Prefix {
|
||||
h.Write(binary.BigEndian.AppendUint32([]byte{}, uint32(p)))
|
||||
}
|
||||
h.Write(out)
|
||||
out = h.Sum(nil)
|
||||
case *VectorCommitmentLeafNode:
|
||||
// do nothing
|
||||
}
|
||||
data = append(data, out...)
|
||||
} else {
|
||||
data = append(data, make([]byte, 64)...)
|
||||
}
|
||||
}
|
||||
|
||||
return rbls48581.ProveRaw(data, uint64(index), 1024)
|
||||
}
|
||||
|
||||
type VectorCommitmentTree struct {
|
||||
Root VectorCommitmentNode
|
||||
}
|
||||
|
||||
// getNextNibble returns the next BranchBits bits from the key starting at pos
|
||||
func getNextNibble(key []byte, pos int) int {
|
||||
startByte := pos / 8
|
||||
if startByte >= len(key) {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Calculate how many bits we need from the current byte
|
||||
startBit := pos % 8
|
||||
bitsFromCurrentByte := 8 - startBit
|
||||
|
||||
result := int(key[startByte] & ((1 << bitsFromCurrentByte) - 1))
|
||||
|
||||
if bitsFromCurrentByte >= BranchBits {
|
||||
// We have enough bits in the current byte
|
||||
return (result >> (bitsFromCurrentByte - BranchBits)) & BranchMask
|
||||
}
|
||||
|
||||
// We need bits from the next byte
|
||||
result = result << (BranchBits - bitsFromCurrentByte)
|
||||
if startByte+1 < len(key) {
|
||||
remainingBits := BranchBits - bitsFromCurrentByte
|
||||
nextByte := int(key[startByte+1])
|
||||
result |= (nextByte >> (8 - remainingBits))
|
||||
}
|
||||
|
||||
return result & BranchMask
|
||||
}
|
||||
|
||||
func getNibblesUntilDiverge(key1, key2 []byte, startDepth int) ([]int, int) {
|
||||
var nibbles []int
|
||||
depth := startDepth
|
||||
|
||||
for {
|
||||
n1 := getNextNibble(key1, depth)
|
||||
n2 := getNextNibble(key2, depth)
|
||||
if n1 != n2 {
|
||||
return nibbles, depth
|
||||
}
|
||||
nibbles = append(nibbles, n1)
|
||||
depth += BranchBits
|
||||
}
|
||||
}
|
||||
|
||||
// getLastNibble returns the final nibble after applying a prefix
|
||||
func getLastNibble(key []byte, prefixLen int) int {
|
||||
return getNextNibble(key, prefixLen*BranchBits)
|
||||
}
|
||||
|
||||
// Insert adds or updates a key-value pair in the tree
|
||||
func (t *VectorCommitmentTree) Insert(key, value []byte) error {
|
||||
if len(key) == 0 {
|
||||
return errors.New("empty key not allowed")
|
||||
}
|
||||
|
||||
var insert func(node VectorCommitmentNode, depth int) VectorCommitmentNode
|
||||
insert = func(node VectorCommitmentNode, depth int) VectorCommitmentNode {
|
||||
if node == nil {
|
||||
return &VectorCommitmentLeafNode{Key: key, Value: value}
|
||||
}
|
||||
|
||||
switch n := node.(type) {
|
||||
case *VectorCommitmentLeafNode:
|
||||
if bytes.Equal(n.Key, key) {
|
||||
n.Value = value
|
||||
n.Commitment = nil
|
||||
return n
|
||||
}
|
||||
|
||||
// Get common prefix nibbles and divergence point
|
||||
sharedNibbles, divergeDepth := getNibblesUntilDiverge(n.Key, key, depth)
|
||||
|
||||
// Create single branch node with shared prefix
|
||||
branch := &VectorCommitmentBranchNode{
|
||||
Prefix: sharedNibbles,
|
||||
}
|
||||
|
||||
// Add both leaves at their final positions
|
||||
finalOldNibble := getNextNibble(n.Key, divergeDepth)
|
||||
finalNewNibble := getNextNibble(key, divergeDepth)
|
||||
branch.Children[finalOldNibble] = n
|
||||
branch.Children[finalNewNibble] = &VectorCommitmentLeafNode{Key: key, Value: value}
|
||||
|
||||
return branch
|
||||
|
||||
case *VectorCommitmentBranchNode:
|
||||
if len(n.Prefix) > 0 {
|
||||
// Check if the new key matches the prefix
|
||||
for i, expectedNibble := range n.Prefix {
|
||||
actualNibble := getNextNibble(key, depth+i*BranchBits)
|
||||
if actualNibble != expectedNibble {
|
||||
// Create new branch with shared prefix subset
|
||||
newBranch := &VectorCommitmentBranchNode{
|
||||
Prefix: n.Prefix[:i],
|
||||
}
|
||||
// Position old branch and new leaf
|
||||
newBranch.Children[expectedNibble] = n
|
||||
n.Prefix = n.Prefix[i+1:] // remove shared prefix from old branch
|
||||
newBranch.Children[actualNibble] = &VectorCommitmentLeafNode{Key: key, Value: value}
|
||||
return newBranch
|
||||
}
|
||||
}
|
||||
// Key matches prefix, continue with final nibble
|
||||
finalNibble := getNextNibble(key, depth+len(n.Prefix)*BranchBits)
|
||||
n.Children[finalNibble] = insert(n.Children[finalNibble], depth+len(n.Prefix)*BranchBits+BranchBits)
|
||||
n.Commitment = nil
|
||||
return n
|
||||
} else {
|
||||
// Simple branch without prefix
|
||||
nibble := getNextNibble(key, depth)
|
||||
n.Children[nibble] = insert(n.Children[nibble], depth+BranchBits)
|
||||
n.Commitment = nil
|
||||
return n
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
t.Root = insert(t.Root, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *VectorCommitmentTree) Verify(key []byte, proofs [][]byte) bool {
|
||||
if len(key) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var verify func(node VectorCommitmentNode, proofs [][]byte, depth int) bool
|
||||
verify = func(node VectorCommitmentNode, proofs [][]byte, depth int) bool {
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(proofs) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
switch n := node.(type) {
|
||||
case *VectorCommitmentLeafNode:
|
||||
if bytes.Equal(n.Key, key) {
|
||||
return bytes.Equal(n.Value, proofs[0])
|
||||
}
|
||||
return false
|
||||
|
||||
case *VectorCommitmentBranchNode:
|
||||
// Check prefix match
|
||||
for i, expectedNibble := range n.Prefix {
|
||||
if getNextNibble(key, depth+i*BranchBits) != expectedNibble {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Get final nibble after prefix
|
||||
finalNibble := getNextNibble(key, depth+len(n.Prefix)*BranchBits)
|
||||
|
||||
if !n.Verify(finalNibble, proofs[0]) {
|
||||
return false
|
||||
}
|
||||
|
||||
return verify(n.Children[finalNibble], proofs[1:], depth+len(n.Prefix)*BranchBits+BranchBits)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
return verify(t.Root, proofs, 0)
|
||||
}
|
||||
|
||||
func (t *VectorCommitmentTree) Prove(key []byte) [][]byte {
|
||||
if len(key) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var prove func(node VectorCommitmentNode, depth int) [][]byte
|
||||
prove = func(node VectorCommitmentNode, depth int) [][]byte {
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch n := node.(type) {
|
||||
case *VectorCommitmentLeafNode:
|
||||
if bytes.Equal(n.Key, key) {
|
||||
return [][]byte{n.Value}
|
||||
}
|
||||
return nil
|
||||
|
||||
case *VectorCommitmentBranchNode:
|
||||
// Check prefix match
|
||||
for i, expectedNibble := range n.Prefix {
|
||||
if getNextNibble(key, depth+i*BranchBits) != expectedNibble {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Get final nibble after prefix
|
||||
finalNibble := getNextNibble(key, depth+len(n.Prefix)*BranchBits)
|
||||
|
||||
proofs := [][]byte{n.Prove(finalNibble)}
|
||||
|
||||
return append(proofs, prove(n.Children[finalNibble], depth+len(n.Prefix)*BranchBits+BranchBits)...)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return prove(t.Root, 0)
|
||||
}
|
||||
|
||||
// Get retrieves a value from the tree by key
|
||||
func (t *VectorCommitmentTree) Get(key []byte) ([]byte, error) {
|
||||
if len(key) == 0 {
|
||||
return nil, errors.New("empty key not allowed")
|
||||
}
|
||||
|
||||
var get func(node VectorCommitmentNode, depth int) []byte
|
||||
get = func(node VectorCommitmentNode, depth int) []byte {
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch n := node.(type) {
|
||||
case *VectorCommitmentLeafNode:
|
||||
if bytes.Equal(n.Key, key) {
|
||||
return n.Value
|
||||
}
|
||||
return nil
|
||||
|
||||
case *VectorCommitmentBranchNode:
|
||||
// Check prefix match
|
||||
for i, expectedNibble := range n.Prefix {
|
||||
if getNextNibble(key, depth+i*BranchBits) != expectedNibble {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Get final nibble after prefix
|
||||
finalNibble := getNextNibble(key, depth+len(n.Prefix)*BranchBits)
|
||||
return get(n.Children[finalNibble], depth+len(n.Prefix)*BranchBits+BranchBits)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
value := get(t.Root, 0)
|
||||
if value == nil {
|
||||
return nil, errors.New("key not found")
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// Delete removes a key-value pair from the tree
|
||||
func (t *VectorCommitmentTree) Delete(key []byte) error {
|
||||
if len(key) == 0 {
|
||||
return errors.New("empty key not allowed")
|
||||
}
|
||||
|
||||
var delete func(node VectorCommitmentNode, depth int) VectorCommitmentNode
|
||||
delete = func(node VectorCommitmentNode, depth int) VectorCommitmentNode {
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch n := node.(type) {
|
||||
case *VectorCommitmentLeafNode:
|
||||
if bytes.Equal(n.Key, key) {
|
||||
return nil
|
||||
}
|
||||
return n
|
||||
|
||||
case *VectorCommitmentBranchNode:
|
||||
// Check prefix match
|
||||
for i, expectedNibble := range n.Prefix {
|
||||
currentNibble := getNextNibble(key, depth+i*BranchBits)
|
||||
if currentNibble != expectedNibble {
|
||||
return n // Key doesn't match prefix, nothing to delete
|
||||
}
|
||||
}
|
||||
|
||||
// Delete at final position after prefix
|
||||
finalNibble := getNextNibble(key, depth+len(n.Prefix)*BranchBits)
|
||||
n.Children[finalNibble] = delete(n.Children[finalNibble], depth+len(n.Prefix)*BranchBits+BranchBits)
|
||||
n.Commitment = nil
|
||||
|
||||
// Count remaining children
|
||||
childCount := 0
|
||||
var lastChild VectorCommitmentNode
|
||||
var lastIndex int
|
||||
for i, child := range n.Children {
|
||||
if child != nil {
|
||||
childCount++
|
||||
lastChild = child
|
||||
lastIndex = i
|
||||
}
|
||||
}
|
||||
|
||||
if childCount == 0 {
|
||||
return nil
|
||||
} else if childCount == 1 {
|
||||
// If the only child is a leaf, keep structure if its path matches
|
||||
if leaf, ok := lastChild.(*VectorCommitmentLeafNode); ok {
|
||||
if lastIndex == getLastNibble(leaf.Key, len(n.Prefix)) {
|
||||
return n
|
||||
}
|
||||
return leaf
|
||||
}
|
||||
// If it's a branch, merge the prefixes
|
||||
if branch, ok := lastChild.(*VectorCommitmentBranchNode); ok {
|
||||
branch.Prefix = append(n.Prefix, branch.Prefix...)
|
||||
return branch
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
t.Root = delete(t.Root, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit returns the root of the tree
|
||||
func (t *VectorCommitmentTree) Commit() []byte {
|
||||
if t.Root == nil {
|
||||
return make([]byte, 64)
|
||||
}
|
||||
return t.Root.Commit()
|
||||
}
|
||||
|
||||
func debugNode(node VectorCommitmentNode, depth int, prefix string) {
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch n := node.(type) {
|
||||
case *VectorCommitmentLeafNode:
|
||||
fmt.Printf("%sLeaf: key=%x value=%x\n", prefix, n.Key, n.Value)
|
||||
case *VectorCommitmentBranchNode:
|
||||
fmt.Printf("%sBranch %v:\n", prefix, n.Prefix)
|
||||
for i, child := range n.Children {
|
||||
if child != nil {
|
||||
fmt.Printf("%s [%d]:\n", prefix, i)
|
||||
debugNode(child, depth+1, prefix+" ")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
312
node/crypto/proof_tree_test.go
Normal file
312
node/crypto/proof_tree_test.go
Normal file
@ -0,0 +1,312 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"source.quilibrium.com/quilibrium/monorepo/bls48581/generated/bls48581"
|
||||
)
|
||||
|
||||
func BenchmarkVectorCommitmentTreeInsert(b *testing.B) {
|
||||
tree := &VectorCommitmentTree{}
|
||||
addresses := [][]byte{}
|
||||
|
||||
for i := range b.N {
|
||||
d := make([]byte, 32)
|
||||
rand.Read(d)
|
||||
addresses = append(addresses, d)
|
||||
err := tree.Insert(d, d)
|
||||
if err != nil {
|
||||
b.Errorf("Failed to insert item %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkVectorCommitmentTreeCommit(b *testing.B) {
|
||||
tree := &VectorCommitmentTree{}
|
||||
addresses := [][]byte{}
|
||||
|
||||
for i := range b.N {
|
||||
d := make([]byte, 32)
|
||||
rand.Read(d)
|
||||
addresses = append(addresses, d)
|
||||
err := tree.Insert(d, d)
|
||||
if err != nil {
|
||||
b.Errorf("Failed to insert item %d: %v", i, err)
|
||||
}
|
||||
tree.Commit()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkVectorCommitmentTreeProve(b *testing.B) {
|
||||
tree := &VectorCommitmentTree{}
|
||||
addresses := [][]byte{}
|
||||
|
||||
for i := range b.N {
|
||||
d := make([]byte, 32)
|
||||
rand.Read(d)
|
||||
addresses = append(addresses, d)
|
||||
err := tree.Insert(d, d)
|
||||
if err != nil {
|
||||
b.Errorf("Failed to insert item %d: %v", i, err)
|
||||
}
|
||||
tree.Prove(d)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkVectorCommitmentTreeVerify(b *testing.B) {
|
||||
tree := &VectorCommitmentTree{}
|
||||
addresses := [][]byte{}
|
||||
|
||||
for i := range b.N {
|
||||
d := make([]byte, 32)
|
||||
rand.Read(d)
|
||||
addresses = append(addresses, d)
|
||||
err := tree.Insert(d, d)
|
||||
if err != nil {
|
||||
b.Errorf("Failed to insert item %d: %v", i, err)
|
||||
}
|
||||
p := tree.Prove(d)
|
||||
if !tree.Verify(d, p) {
|
||||
b.Errorf("bad proof")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestVectorCommitmentTrees(t *testing.T) {
|
||||
bls48581.Init()
|
||||
tree := &VectorCommitmentTree{}
|
||||
|
||||
// Test single insert
|
||||
err := tree.Insert([]byte("key1"), []byte("value1"))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to insert: %v", err)
|
||||
}
|
||||
|
||||
// Test duplicate key
|
||||
err = tree.Insert([]byte("key1"), []byte("value2"))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to update existing key: %v", err)
|
||||
}
|
||||
|
||||
value, err := tree.Get([]byte("key1"))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get value: %v", err)
|
||||
}
|
||||
if !bytes.Equal(value, []byte("value2")) {
|
||||
t.Errorf("Expected value2, got %s", string(value))
|
||||
}
|
||||
|
||||
// Test empty key
|
||||
err = tree.Insert([]byte{}, []byte("value"))
|
||||
if err == nil {
|
||||
t.Error("Expected error for empty key, got none")
|
||||
}
|
||||
|
||||
tree = &VectorCommitmentTree{}
|
||||
|
||||
// Test get on empty tree
|
||||
_, err = tree.Get([]byte("nonexistent"))
|
||||
if err == nil {
|
||||
t.Error("Expected error for nonexistent key, got none")
|
||||
}
|
||||
|
||||
// Insert and get
|
||||
tree.Insert([]byte("key1"), []byte("value1"))
|
||||
value, err = tree.Get([]byte("key1"))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get value: %v", err)
|
||||
}
|
||||
if !bytes.Equal(value, []byte("value1")) {
|
||||
t.Errorf("Expected value1, got %s", string(value))
|
||||
}
|
||||
|
||||
// Test empty key
|
||||
_, err = tree.Get([]byte{})
|
||||
if err == nil {
|
||||
t.Error("Expected error for empty key, got none")
|
||||
}
|
||||
|
||||
tree = &VectorCommitmentTree{}
|
||||
|
||||
// Test delete on empty tree
|
||||
err = tree.Delete([]byte("nonexistent"))
|
||||
if err != nil {
|
||||
t.Errorf("Delete on empty tree should not return error: %v", err)
|
||||
}
|
||||
|
||||
// Insert and delete
|
||||
tree.Insert([]byte("key1"), []byte("value1"))
|
||||
err = tree.Delete([]byte("key1"))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete: %v", err)
|
||||
}
|
||||
|
||||
// Verify deletion
|
||||
_, err = tree.Get([]byte("key1"))
|
||||
if err == nil {
|
||||
t.Error("Expected error for deleted key, got none")
|
||||
}
|
||||
|
||||
// Test empty key
|
||||
err = tree.Delete([]byte{})
|
||||
if err == nil {
|
||||
t.Error("Expected error for empty key, got none")
|
||||
}
|
||||
|
||||
tree = &VectorCommitmentTree{}
|
||||
|
||||
// Insert keys that share common prefix
|
||||
keys := []string{
|
||||
"key1",
|
||||
"key2",
|
||||
"key3",
|
||||
"completely_different",
|
||||
}
|
||||
|
||||
for i, key := range keys {
|
||||
err := tree.Insert([]byte(key), []byte("value"+string(rune('1'+i))))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to insert key %s: %v", key, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all values
|
||||
for i, key := range keys {
|
||||
value, err := tree.Get([]byte(key))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get key %s: %v", key, err)
|
||||
}
|
||||
expected := []byte("value" + string(rune('1'+i)))
|
||||
if !bytes.Equal(value, expected) {
|
||||
t.Errorf("Expected %s, got %s", string(expected), string(value))
|
||||
}
|
||||
}
|
||||
|
||||
// Delete middle key
|
||||
err = tree.Delete([]byte("key2"))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete key2: %v", err)
|
||||
}
|
||||
|
||||
// Verify key2 is gone but others remain
|
||||
_, err = tree.Get([]byte("key2"))
|
||||
if err == nil {
|
||||
t.Error("Expected error for deleted key2, got none")
|
||||
}
|
||||
|
||||
// Check remaining keys
|
||||
remainingKeys := []string{"key1", "key3", "completely_different"}
|
||||
remainingValues := []string{"value1", "value3", "value4"}
|
||||
for i, key := range remainingKeys {
|
||||
value, err := tree.Get([]byte(key))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get key %s after deletion: %v", key, err)
|
||||
}
|
||||
expected := []byte(remainingValues[i])
|
||||
if !bytes.Equal(value, expected) {
|
||||
t.Errorf("Expected %s, got %s", string(expected), string(value))
|
||||
}
|
||||
}
|
||||
|
||||
tree = &VectorCommitmentTree{}
|
||||
|
||||
// Empty tree should be empty
|
||||
emptyRoot := tree.Root
|
||||
if emptyRoot != nil {
|
||||
t.Errorf("Expected empty root")
|
||||
}
|
||||
|
||||
// Root should change after insert
|
||||
tree.Insert([]byte("key1"), []byte("value1"))
|
||||
firstRoot := tree.Root.Commit()
|
||||
|
||||
if bytes.Equal(firstRoot, bytes.Repeat([]byte{0x00}, 64)) {
|
||||
t.Error("Root hash should change after insert")
|
||||
}
|
||||
|
||||
// Root should change after update
|
||||
tree.Insert([]byte("key1"), []byte("value2"))
|
||||
secondRoot := tree.Root.Commit()
|
||||
|
||||
if bytes.Equal(secondRoot, firstRoot) {
|
||||
t.Error("Root hash should change after update")
|
||||
}
|
||||
|
||||
// Root should change after delete
|
||||
tree.Delete([]byte("key1"))
|
||||
thirdRoot := tree.Root
|
||||
|
||||
if thirdRoot != nil {
|
||||
t.Error("Root hash should match empty tree after deleting all entries")
|
||||
}
|
||||
|
||||
tree = &VectorCommitmentTree{}
|
||||
cmptree := &VectorCommitmentTree{}
|
||||
|
||||
addresses := [][]byte{}
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
d := make([]byte, 32)
|
||||
rand.Read(d)
|
||||
addresses = append(addresses, d)
|
||||
}
|
||||
|
||||
// Insert 1000 items
|
||||
for i := 0; i < 1000; i++ {
|
||||
key := addresses[i]
|
||||
value := addresses[i]
|
||||
err := tree.Insert(key, value)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to insert item %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Insert 1000 items in reverse
|
||||
for i := 999; i >= 0; i-- {
|
||||
key := addresses[i]
|
||||
value := addresses[i]
|
||||
err := cmptree.Insert(key, value)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to insert item %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all items
|
||||
for i := 0; i < 1000; i++ {
|
||||
key := addresses[i]
|
||||
expected := addresses[i]
|
||||
value, err := tree.Get(key)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get item %d: %v", i, err)
|
||||
}
|
||||
cmpvalue, err := cmptree.Get(key)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get item %d: %v", i, err)
|
||||
}
|
||||
if !bytes.Equal(value, expected) {
|
||||
t.Errorf("Item %d: expected %x, got %x", i, string(expected), string(value))
|
||||
}
|
||||
if !bytes.Equal(value, cmpvalue) {
|
||||
t.Errorf("Item %d: expected %x, got %x", i, string(value), string(cmpvalue))
|
||||
}
|
||||
}
|
||||
|
||||
tcommit := tree.Root.Commit()
|
||||
cmptcommit := cmptree.Root.Commit()
|
||||
|
||||
if !bytes.Equal(tcommit, cmptcommit) {
|
||||
t.Errorf("tree mismatch, %x, %x", tcommit, cmptcommit)
|
||||
}
|
||||
|
||||
proofs := tree.Prove(addresses[500])
|
||||
if !tree.Verify(addresses[500], proofs) {
|
||||
t.Errorf("proof failed")
|
||||
}
|
||||
|
||||
for _, p := range proofs {
|
||||
fmt.Printf("%x\n", p)
|
||||
}
|
||||
}
|
||||
506
node/main.go
506
node/main.go
@ -4,6 +4,7 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
_ "embed"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
@ -13,30 +14,23 @@ import (
|
||||
"io/fs"
|
||||
"log"
|
||||
"math/big"
|
||||
"net/http"
|
||||
npprof "net/http/pprof"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
rdebug "runtime/debug"
|
||||
"runtime/pprof"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/circl/sign/ed448"
|
||||
"github.com/iden3/go-iden3-crypto/poseidon"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pbnjay/memory"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/crypto/sha3"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/app"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/config"
|
||||
@ -44,10 +38,8 @@ import (
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/crypto/kzg"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token/application"
|
||||
qruntime "source.quilibrium.com/quilibrium/monorepo/node/internal/runtime"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/rpc"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/store"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/tries"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/utils"
|
||||
@ -168,76 +160,6 @@ func signatureCheckDefault() bool {
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if *signatureCheck {
|
||||
if runtime.GOOS == "windows" {
|
||||
fmt.Println("Signature check not available for windows yet, skipping...")
|
||||
} else {
|
||||
ex, err := os.Executable()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b, err := os.ReadFile(ex)
|
||||
if err != nil {
|
||||
fmt.Println(
|
||||
"Error encountered during signature check – are you running this " +
|
||||
"from source? (use --signature-check=false)",
|
||||
)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
checksum := sha3.Sum256(b)
|
||||
digest, err := os.ReadFile(ex + ".dgst")
|
||||
if err != nil {
|
||||
fmt.Println("Digest file not found")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
parts := strings.Split(string(digest), " ")
|
||||
if len(parts) != 2 {
|
||||
fmt.Println("Invalid digest file format")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
digestBytes, err := hex.DecodeString(parts[1][:64])
|
||||
if err != nil {
|
||||
fmt.Println("Invalid digest file format")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if !bytes.Equal(checksum[:], digestBytes) {
|
||||
fmt.Println("Invalid digest for node")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
count := 0
|
||||
|
||||
for i := 1; i <= len(config.Signatories); i++ {
|
||||
signatureFile := fmt.Sprintf(ex+".dgst.sig.%d", i)
|
||||
sig, err := os.ReadFile(signatureFile)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
pubkey, _ := hex.DecodeString(config.Signatories[i-1])
|
||||
if !ed448.Verify(pubkey, digest, sig, "") {
|
||||
fmt.Printf("Failed signature check for signatory #%d\n", i)
|
||||
os.Exit(1)
|
||||
}
|
||||
count++
|
||||
}
|
||||
|
||||
if count < ((len(config.Signatories)-4)/2)+((len(config.Signatories)-4)%2) {
|
||||
fmt.Printf("Quorum on signatures not met")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Println("Signature check passed")
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Signature check disabled, skipping...")
|
||||
}
|
||||
|
||||
if *memprofile != "" && *core == 0 {
|
||||
go func() {
|
||||
for {
|
||||
@ -262,291 +184,177 @@ func main() {
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
if *pprofServer != "" && *core == 0 {
|
||||
go func() {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/debug/pprof/", npprof.Index)
|
||||
mux.HandleFunc("/debug/pprof/cmdline", npprof.Cmdline)
|
||||
mux.HandleFunc("/debug/pprof/profile", npprof.Profile)
|
||||
mux.HandleFunc("/debug/pprof/symbol", npprof.Symbol)
|
||||
mux.HandleFunc("/debug/pprof/trace", npprof.Trace)
|
||||
log.Fatal(http.ListenAndServe(*pprofServer, mux))
|
||||
}()
|
||||
}
|
||||
|
||||
if *prometheusServer != "" && *core == 0 {
|
||||
go func() {
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
log.Fatal(http.ListenAndServe(*prometheusServer, mux))
|
||||
}()
|
||||
}
|
||||
|
||||
if *balance {
|
||||
config, err := config.LoadConfig(*configDirectory, "", false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
printBalance(config)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if *peerId {
|
||||
config, err := config.LoadConfig(*configDirectory, "", false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
printPeerID(config.P2P)
|
||||
return
|
||||
}
|
||||
|
||||
if *importPrivKey != "" {
|
||||
config, err := config.LoadConfig(*configDirectory, *importPrivKey, false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
printPeerID(config.P2P)
|
||||
fmt.Println("Import completed, you are ready for the launch.")
|
||||
return
|
||||
}
|
||||
|
||||
if *nodeInfo {
|
||||
config, err := config.LoadConfig(*configDirectory, "", false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
printNodeInfo(config)
|
||||
return
|
||||
}
|
||||
|
||||
if !*dbConsole && *core == 0 {
|
||||
config.PrintLogo()
|
||||
config.PrintVersion(uint8(*network))
|
||||
fmt.Println(" ")
|
||||
}
|
||||
|
||||
nodeConfig, err := config.LoadConfig(*configDirectory, "", false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if *compactDB && *core == 0 {
|
||||
db := store.NewPebbleDB(nodeConfig.DB)
|
||||
if err := db.CompactAll(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := db.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if *network != 0 {
|
||||
if nodeConfig.P2P.BootstrapPeers[0] == config.BootstrapPeers[0] {
|
||||
fmt.Println(
|
||||
"Node has specified to run outside of mainnet but is still " +
|
||||
"using default bootstrap list. This will fail. Exiting.",
|
||||
)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
nodeConfig.Engine.GenesisSeed = fmt.Sprintf(
|
||||
"%02x%s",
|
||||
byte(*network),
|
||||
nodeConfig.Engine.GenesisSeed,
|
||||
)
|
||||
nodeConfig.P2P.Network = uint8(*network)
|
||||
fmt.Println(
|
||||
"Node is operating outside of mainnet – be sure you intended to do this.",
|
||||
)
|
||||
}
|
||||
|
||||
// If it's not explicitly set to true, we should defer to flags
|
||||
if !nodeConfig.Engine.FullProver {
|
||||
nodeConfig.Engine.FullProver = !*lightProver
|
||||
}
|
||||
|
||||
clearIfTestData(*configDirectory, nodeConfig)
|
||||
|
||||
if *dbConsole {
|
||||
console, err := app.NewDBConsole(nodeConfig)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
console.Run()
|
||||
return
|
||||
}
|
||||
|
||||
if *dhtOnly {
|
||||
done := make(chan os.Signal, 1)
|
||||
signal.Notify(done, syscall.SIGINT, syscall.SIGTERM)
|
||||
dht, err := app.NewDHTNode(nodeConfig)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
dht.Start()
|
||||
}()
|
||||
|
||||
<-done
|
||||
dht.Stop()
|
||||
return
|
||||
}
|
||||
|
||||
if len(nodeConfig.Engine.DataWorkerMultiaddrs) == 0 {
|
||||
maxProcs, numCPU := runtime.GOMAXPROCS(0), runtime.NumCPU()
|
||||
if maxProcs > numCPU && !nodeConfig.Engine.AllowExcessiveGOMAXPROCS {
|
||||
fmt.Println("GOMAXPROCS is set higher than the number of available CPUs.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
nodeConfig.Engine.DataWorkerCount = qruntime.WorkerCount(
|
||||
nodeConfig.Engine.DataWorkerCount, true,
|
||||
)
|
||||
}
|
||||
|
||||
if *core != 0 {
|
||||
rdebug.SetMemoryLimit(nodeConfig.Engine.DataWorkerMemoryLimit)
|
||||
|
||||
if *parentProcess == 0 && len(nodeConfig.Engine.DataWorkerMultiaddrs) == 0 {
|
||||
panic("parent process pid not specified")
|
||||
}
|
||||
|
||||
l, err := zap.NewProduction()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
rpcMultiaddr := fmt.Sprintf(
|
||||
nodeConfig.Engine.DataWorkerBaseListenMultiaddr,
|
||||
int(nodeConfig.Engine.DataWorkerBaseListenPort)+*core-1,
|
||||
)
|
||||
|
||||
if len(nodeConfig.Engine.DataWorkerMultiaddrs) != 0 {
|
||||
rpcMultiaddr = nodeConfig.Engine.DataWorkerMultiaddrs[*core-1]
|
||||
}
|
||||
|
||||
srv, err := rpc.NewDataWorkerIPCServer(
|
||||
rpcMultiaddr,
|
||||
l,
|
||||
uint32(*core)-1,
|
||||
qcrypto.NewWesolowskiFrameProver(l),
|
||||
nodeConfig,
|
||||
*parentProcess,
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = srv.Start()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
} else {
|
||||
totalMemory := int64(memory.TotalMemory())
|
||||
dataWorkerReservedMemory := int64(0)
|
||||
if len(nodeConfig.Engine.DataWorkerMultiaddrs) == 0 {
|
||||
dataWorkerReservedMemory = nodeConfig.Engine.DataWorkerMemoryLimit * int64(nodeConfig.Engine.DataWorkerCount)
|
||||
}
|
||||
switch availableOverhead := totalMemory - dataWorkerReservedMemory; {
|
||||
case totalMemory < dataWorkerReservedMemory:
|
||||
fmt.Println("The memory allocated to data workers exceeds the total system memory.")
|
||||
fmt.Println("You are at risk of running out of memory during runtime.")
|
||||
case availableOverhead < 8*1024*1024*1024:
|
||||
fmt.Println("The memory available to the node, unallocated to the data workers, is less than 8GiB.")
|
||||
fmt.Println("You are at risk of running out of memory during runtime.")
|
||||
default:
|
||||
if _, explicitGOMEMLIMIT := os.LookupEnv("GOMEMLIMIT"); !explicitGOMEMLIMIT {
|
||||
rdebug.SetMemoryLimit(availableOverhead * 8 / 10)
|
||||
}
|
||||
if _, explicitGOGC := os.LookupEnv("GOGC"); !explicitGOGC {
|
||||
rdebug.SetGCPercent(10)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("Loading ceremony state and starting node...")
|
||||
|
||||
if !*integrityCheck {
|
||||
go spawnDataWorkers(nodeConfig)
|
||||
defer stopDataWorkers()
|
||||
}
|
||||
maxProcs := runtime.GOMAXPROCS(0)
|
||||
|
||||
kzg.Init()
|
||||
|
||||
report := RunSelfTestIfNeeded(*configDirectory, nodeConfig)
|
||||
fmt.Println("Max Cores:", maxProcs)
|
||||
fmt.Println("Performing proof tree tests...")
|
||||
|
||||
if *core == 0 {
|
||||
for {
|
||||
genesis, err := config.DownloadAndVerifyGenesis(uint(nodeConfig.P2P.Network))
|
||||
if err != nil {
|
||||
time.Sleep(10 * time.Minute)
|
||||
continue
|
||||
fmt.Println("\nTree Insertion")
|
||||
sets := []int{1000, 10000, 100000, 1000000, 10000000, 100000000}
|
||||
for _, set := range sets {
|
||||
for i := 1; i <= maxProcs; i *= 2 {
|
||||
fmt.Println("Total Parallelism:", i)
|
||||
var total atomic.Int64
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(i)
|
||||
for j := 0; j < i; j++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
vecTree := &qcrypto.VectorCommitmentTree{}
|
||||
for k := 0; k < set; k++ {
|
||||
d := make([]byte, 32)
|
||||
rand.Read(d)
|
||||
start := time.Now()
|
||||
err := vecTree.Insert(d, d)
|
||||
total.Add(int64(time.Since(start)))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
nodeConfig.Engine.GenesisSeed = genesis.GenesisSeedHex
|
||||
break
|
||||
wg.Wait()
|
||||
fmt.Println("Size: ", set, "Op Speed: ", time.Duration(total.Load())/time.Duration(set)/time.Duration(i))
|
||||
}
|
||||
}
|
||||
|
||||
RunForkRepairIfNeeded(nodeConfig)
|
||||
|
||||
done := make(chan os.Signal, 1)
|
||||
signal.Notify(done, syscall.SIGINT, syscall.SIGTERM)
|
||||
var node *app.Node
|
||||
if *debug {
|
||||
node, err = app.NewDebugNode(nodeConfig, report)
|
||||
} else {
|
||||
node, err = app.NewNode(nodeConfig, report)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if *integrityCheck {
|
||||
fmt.Println("Running integrity check...")
|
||||
node.VerifyProofIntegrity()
|
||||
fmt.Println("Integrity check passed!")
|
||||
return
|
||||
}
|
||||
|
||||
// runtime.GOMAXPROCS(1)
|
||||
|
||||
node.Start()
|
||||
defer node.Stop()
|
||||
|
||||
if nodeConfig.ListenGRPCMultiaddr != "" {
|
||||
srv, err := rpc.NewRPCServer(
|
||||
nodeConfig.ListenGRPCMultiaddr,
|
||||
nodeConfig.ListenRestMultiaddr,
|
||||
node.GetLogger(),
|
||||
node.GetDataProofStore(),
|
||||
node.GetClockStore(),
|
||||
node.GetCoinStore(),
|
||||
node.GetKeyManager(),
|
||||
node.GetPubSub(),
|
||||
node.GetMasterClock(),
|
||||
node.GetExecutionEngines(),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
fmt.Println("\nTree Deletion")
|
||||
for _, set := range sets {
|
||||
for i := 1; i <= maxProcs; i *= 2 {
|
||||
fmt.Println("Total Parallelism:", i)
|
||||
var total atomic.Int64
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(i)
|
||||
for j := 0; j < i; j++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
vecTree := &qcrypto.VectorCommitmentTree{}
|
||||
data := make([][]byte, set)
|
||||
for k := 0; k < set; k++ {
|
||||
d := make([]byte, 32)
|
||||
rand.Read(d)
|
||||
data[k] = d
|
||||
err := vecTree.Insert(d, d)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
for k := 0; k < set; k++ {
|
||||
start := time.Now()
|
||||
err := vecTree.Delete(data[k])
|
||||
total.Add(int64(time.Since(start)))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
fmt.Println("Size: ", set, "Op Speed: ", time.Duration(total.Load())/time.Duration(set)/time.Duration(i))
|
||||
}
|
||||
if err := srv.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer srv.Stop()
|
||||
}
|
||||
|
||||
<-done
|
||||
fmt.Println("\nTree Commit")
|
||||
for _, set := range sets {
|
||||
for i := 1; i <= maxProcs; i *= 2 {
|
||||
fmt.Println("Total Parallelism:", i)
|
||||
var total atomic.Int64
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(i)
|
||||
for j := 0; j < i; j++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
vecTree := &qcrypto.VectorCommitmentTree{}
|
||||
data := make([][]byte, set)
|
||||
for k := 0; k < set; k++ {
|
||||
d := make([]byte, 32)
|
||||
rand.Read(d)
|
||||
data[k] = d
|
||||
err := vecTree.Insert(d, d)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
vecTree.Commit()
|
||||
total.Add(int64(time.Since(start)))
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
fmt.Println("Size: ", set, "Op Speed: ", time.Duration(total.Load())/time.Duration(i))
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("\nTree Proof")
|
||||
for _, set := range sets {
|
||||
for i := 1; i <= maxProcs; i *= 2 {
|
||||
fmt.Println("Total Parallelism:", i)
|
||||
var total atomic.Int64
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(i)
|
||||
for j := 0; j < i; j++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
vecTree := &qcrypto.VectorCommitmentTree{}
|
||||
data := make([][]byte, set)
|
||||
for k := 0; k < set; k++ {
|
||||
d := make([]byte, 32)
|
||||
rand.Read(d)
|
||||
data[k] = d
|
||||
err := vecTree.Insert(d, d)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
vecTree.Commit()
|
||||
for k := 0; k < set; k++ {
|
||||
start := time.Now()
|
||||
vecTree.Prove(data[k])
|
||||
total.Add(int64(time.Since(start)))
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
fmt.Println("Size: ", set, "Op Speed: ", time.Duration(total.Load())/time.Duration(set)/time.Duration(i))
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("\nVDF Prove")
|
||||
log, _ := zap.NewProduction()
|
||||
prover := qcrypto.NewWesolowskiFrameProver(log)
|
||||
sets = []int{100000, 200000, 500000, 1000000, 2000000, 5000000}
|
||||
for _, set := range sets {
|
||||
for i := 1; i <= maxProcs; i *= 2 {
|
||||
fmt.Println("Total Parallelism:", i)
|
||||
var total atomic.Int64
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(i)
|
||||
for j := 0; j < i; j++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
data := make([]byte, 516)
|
||||
rand.Read(data)
|
||||
start := time.Now()
|
||||
_, err := prover.CalculateChallengeProof(data, uint32(set))
|
||||
total.Add(int64(time.Since(start)))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
fmt.Println("Size: ", set, "Op Speed: ", time.Duration(total.Load())/time.Duration(i))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var dataWorkers []*exec.Cmd
|
||||
|
||||
Loading…
Reference in New Issue
Block a user