ceremonyclient/node/store/hypergraph_test.go
Cassandra Heart 1b2660b7df
v2.1.0.20 (#516)
* .20 testing

* Read in the debug by env variable (#514)

* v2.1.0.19

* enhanced error logging, fix seniority marker join blocker, fix sync message size limit defaults

* resolve signature failure

* additional error logging for merge-related signatures

* fix: one-shot sync message size, app shard TC signature size, collector/hotstuff race condition, expired joins blocking new joins due to pruning disable

* remove compat with old 2.0.0 blossomsub

* fix: resolve abandoned prover joins

* reload prover registry

* fix stale worker proposal edge

* add full sanity check on join before submitting to identify bug

* resolve non-fallthrough condition that should be fallthrough

* fix: resolve rare SIGFPE, fix orphan expired joins blocking workers from reallocating

* add reconnect fallback if no peers are found with variable reconnect time (#511)

Co-authored-by: Tyler Sturos <55340199+tjsturos@users.noreply.github.com>

* update base peer count to 1 (#513)

* fix: expired prover join frames, starting port ranges, proposer getting stuck, and seniority on joins

* fix: panic on shutdown, libp2p discovery picking inaccessible peers, coverage event check not in shutdown logic, amend app shard worker behavior to mirror global for prover root reconciliation

* fix: shutdown scenario quirks, reload hanging

* fix: do not bailout early on shutdown of coverage check

* fix: force registry refresh on worker waiting for registration

* add more logging to wait for prover

* fix: worker manager refreshes the filter on allocation, snapshots blocking close on shutdown

* tweak: force shutdown after five seconds for app worker

* fix: don't loop when shutting down

* fix: slight reordering, also added named workers to trace hanging shutdowns

* use deterministic key for peer id of workers to stop flagging workers as sybil attacks

* fix: remove pubsub stop from app consensus engine as it shouldn't manage pubsub lifecycle, integrate shutdown context to PerformSync to prevent stuck syncs from halting respawn

* fix: blossomsub pubsub interface does not properly track subscription status

* fix: subscribe order to avoid nil panic

* switch from dnsaddr to dns4

* add missing quic-v1

* additional logging to isolate respawn quirks

* fix: dnsaddr -> dns4 for blossomsub

* allow debug env var to be read

---------

Co-authored-by: Cassandra Heart <cassandra@quilibrium.com>
Co-authored-by: Tyler Sturos <55340199+tjsturos@users.noreply.github.com>
Co-authored-by: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com>

* fix newPebbleDB constructor config param (#517)

* fix: high CPU overhead in initial worker behaviors/ongoing sync

* faster docker builds with better caching

* qol: add extra data to node info, and query metrics from command line

* leave proposals for overcrowded shards

* hub-and-spoke global message broadcasts

* small tweaks to cli output for join frames

---------

Co-authored-by: winged-pegasus <55340199+winged-pegasus@users.noreply.github.com>
Co-authored-by: Tyler Sturos <55340199+tjsturos@users.noreply.github.com>
Co-authored-by: Black Swan <3999712+blacks1ne@users.noreply.github.com>
2026-03-04 01:37:04 -06:00

126 lines
4.0 KiB
Go

package store
import (
"bytes"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/config"
"source.quilibrium.com/quilibrium/monorepo/types/tries"
)
func setupTestHypergraphStore(t *testing.T) *PebbleHypergraphStore {
logger := zap.NewNop()
cfg := &config.Config{DB: &config.DBConfig{
InMemoryDONOTUSE: true,
Path: ".test/hypergraph",
}}
db := NewPebbleDB(logger, cfg, 0)
require.NotNil(t, db)
t.Cleanup(func() { db.Close() })
return NewPebbleHypergraphStore(cfg.DB, db, logger, nil, nil)
}
func TestGetRootCommits_IncludesAllCommitTypes(t *testing.T) {
hgStore := setupTestHypergraphStore(t)
// Create a test shard address
shardAddress := bytes.Repeat([]byte{0x42}, 32)
frameNumber := uint64(100)
// Create test commits (64 bytes each)
vertexAddsCommit := bytes.Repeat([]byte{0xAA}, 64)
vertexRemovesCommit := bytes.Repeat([]byte{0xBB}, 64)
hyperedgeAddsCommit := bytes.Repeat([]byte{0xCC}, 64)
hyperedgeRemovesCommit := bytes.Repeat([]byte{0xDD}, 64)
// Start a transaction and write all four commit types
txn, err := hgStore.NewTransaction(false)
require.NoError(t, err)
err = hgStore.SetShardCommit(txn, frameNumber, "adds", "vertex", shardAddress, vertexAddsCommit)
require.NoError(t, err)
err = hgStore.SetShardCommit(txn, frameNumber, "removes", "vertex", shardAddress, vertexRemovesCommit)
require.NoError(t, err)
err = hgStore.SetShardCommit(txn, frameNumber, "adds", "hyperedge", shardAddress, hyperedgeAddsCommit)
require.NoError(t, err)
err = hgStore.SetShardCommit(txn, frameNumber, "removes", "hyperedge", shardAddress, hyperedgeRemovesCommit)
require.NoError(t, err)
err = txn.Commit()
require.NoError(t, err)
// Now retrieve all commits using GetRootCommits
commits, err := hgStore.GetRootCommits(frameNumber)
require.NoError(t, err)
// Find the shard key for our test address
var foundShardKey *tries.ShardKey
for sk := range commits {
if bytes.Equal(sk.L2[:], shardAddress) {
foundShardKey = &sk
break
}
}
require.NotNil(t, foundShardKey, "Should find the shard in commits")
shardCommits := commits[*foundShardKey]
require.Len(t, shardCommits, 4, "Should have 4 commit slots")
// Verify each commit type was retrieved
assert.Equal(t, vertexAddsCommit, shardCommits[0], "Vertex adds commit should match")
assert.Equal(t, vertexRemovesCommit, shardCommits[1], "Vertex removes commit should match")
assert.Equal(t, hyperedgeAddsCommit, shardCommits[2], "Hyperedge adds commit should match")
assert.Equal(t, hyperedgeRemovesCommit, shardCommits[3], "Hyperedge removes commit should match")
}
func TestGetRootCommits_HyperedgeRemovesOnly(t *testing.T) {
// This test specifically checks if hyperedge removes are retrieved
// when they are the only commit type for a shard
hgStore := setupTestHypergraphStore(t)
// Create a test shard address
shardAddress := bytes.Repeat([]byte{0x99}, 32)
frameNumber := uint64(200)
// Only write hyperedge removes commit
hyperedgeRemovesCommit := bytes.Repeat([]byte{0xEE}, 64)
txn, err := hgStore.NewTransaction(false)
require.NoError(t, err)
err = hgStore.SetShardCommit(txn, frameNumber, "removes", "hyperedge", shardAddress, hyperedgeRemovesCommit)
require.NoError(t, err)
err = txn.Commit()
require.NoError(t, err)
// Now retrieve all commits using GetRootCommits
commits, err := hgStore.GetRootCommits(frameNumber)
require.NoError(t, err)
// Find the shard key for our test address
var foundShardKey *tries.ShardKey
for sk := range commits {
if bytes.Equal(sk.L2[:], shardAddress) {
foundShardKey = &sk
break
}
}
// This assertion will fail if hyperedge removes are not included in the range scan
require.NotNil(t, foundShardKey, "Should find the shard with only hyperedge removes in commits")
shardCommits := commits[*foundShardKey]
require.Len(t, shardCommits, 4, "Should have 4 commit slots")
// The hyperedge removes should be at index 3
assert.Equal(t, hyperedgeRemovesCommit, shardCommits[3], "Hyperedge removes commit should match")
}