ceremonyclient/node/store/pebble_test.go
Cassandra Heart 1b2660b7df
v2.1.0.20 (#516)
* .20 testing

* Read in the debug by env variable (#514)

* v2.1.0.19

* enhanced error logging, fix seniority marker join blocker, fix sync message size limit defaults

* resolve signature failure

* additional error logging for merge-related signatures

* fix: one-shot sync message size, app shard TC signature size, collector/hotstuff race condition, expired joins blocking new joins due to pruning disable

* remove compat with old 2.0.0 blossomsub

* fix: resolve abandoned prover joins

* reload prover registry

* fix stale worker proposal edge

* add full sanity check on join before submitting to identify bug

* resolve non-fallthrough condition that should be fallthrough

* fix: resolve rare SIGFPE, fix orphan expired joins blocking workers from reallocating

* add reconnect fallback if no peers are found with variable reconnect time (#511)

Co-authored-by: Tyler Sturos <55340199+tjsturos@users.noreply.github.com>

* update base peer count to 1 (#513)

* fix: expired prover join frames, starting port ranges, proposer getting stuck, and seniority on joins

* fix: panic on shutdown, libp2p discovery picking inaccessible peers, coverage event check not in shutdown logic, amend app shard worker behavior to mirror global for prover root reconciliation

* fix: shutdown scenario quirks, reload hanging

* fix: do not bailout early on shutdown of coverage check

* fix: force registry refresh on worker waiting for registration

* add more logging to wait for prover

* fix: worker manager refreshes the filter on allocation, snapshots blocking close on shutdown

* tweak: force shutdown after five seconds for app worker

* fix: don't loop when shutting down

* fix: slight reordering, also added named workers to trace hanging shutdowns

* use deterministic key for peer id of workers to stop flagging workers as sybil attacks

* fix: remove pubsub stop from app consensus engine as it shouldn't manage pubsub lifecycle, integrate shutdown context to PerformSync to prevent stuck syncs from halting respawn

* fix: blossomsub pubsub interface does not properly track subscription status

* fix: subscribe order to avoid nil panic

* switch from dnsaddr to dns4

* add missing quic-v1

* additional logging to isolate respawn quirks

* fix: dnsaddr -> dns4 for blossomsub

* allow debug env var to be read

---------

Co-authored-by: Cassandra Heart <cassandra@quilibrium.com>
Co-authored-by: Tyler Sturos <55340199+tjsturos@users.noreply.github.com>
Co-authored-by: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com>

* fix newPebbleDB constructor config param (#517)

* fix: high CPU overhead in initial worker behaviors/ongoing sync

* faster docker builds with better caching

* qol: add extra data to node info, and query metrics from command line

* leave proposals for overcrowded shards

* hub-and-spoke global message broadcasts

* small tweaks to cli output for join frames

---------

Co-authored-by: winged-pegasus <55340199+winged-pegasus@users.noreply.github.com>
Co-authored-by: Tyler Sturos <55340199+tjsturos@users.noreply.github.com>
Co-authored-by: Black Swan <3999712+blacks1ne@users.noreply.github.com>
2026-03-04 01:37:04 -06:00

178 lines
4.5 KiB
Go

package store
import (
"encoding/hex"
"fmt"
"os"
"path/filepath"
"testing"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"go.uber.org/zap/zaptest/observer"
"source.quilibrium.com/quilibrium/monorepo/config"
)
func TestPoseidon(t *testing.T) {
bi, err := poseidon.HashBytes([]byte("testvector"))
require.NoError(t, err)
fmt.Println(hex.EncodeToString(bi.FillBytes(make([]byte, 32))))
assert.FailNow(t, "")
}
func TestNewPebbleDB_ExistingDirectory(t *testing.T) {
testDir, err := os.MkdirTemp("", "pebble-test-existing-*")
require.NoError(t, err)
defer os.RemoveAll(testDir)
core, logs := observer.New(zap.InfoLevel)
testLogger := zap.New(core)
cfg := &config.Config{DB: &config.DBConfig{
Path: testDir,
}}
db := NewPebbleDB(testLogger, cfg, 0)
require.NotNil(t, db)
defer db.Close()
foundInfoLog := false
for _, log := range logs.All() {
if log.Message == "store found" {
foundInfoLog = true
assert.Equal(t, testDir, log.ContextMap()["path"])
break
}
}
assert.True(t, foundInfoLog, "Expected 'store found' info log")
}
func TestNewPebbleDB_ExistingDirectoryWorker(t *testing.T) {
testDir, err := os.MkdirTemp("", "pebble-test-existing-worker-*")
require.NoError(t, err)
defer os.RemoveAll(testDir)
core, logs := observer.New(zap.InfoLevel)
testLogger := zap.New(core)
cfg := &config.Config{DB: &config.DBConfig{
WorkerPaths: []string{testDir},
}}
db := NewPebbleDB(testLogger, cfg, 1)
require.NotNil(t, db)
defer db.Close()
foundInfoLog := false
for _, log := range logs.All() {
if log.Message == "worker store found" {
foundInfoLog = true
assert.Equal(t, testDir, log.ContextMap()["path"])
assert.Equal(t, uint64(1), log.ContextMap()["core_id"])
break
}
}
assert.True(t, foundInfoLog, "Expected 'worker store found' info log")
}
func TestNewPebbleDB_NonExistingDirectory(t *testing.T) {
baseDir, err := os.MkdirTemp("", "pebble-test-nonexisting-*")
require.NoError(t, err)
defer os.RemoveAll(baseDir)
testDir := filepath.Join(baseDir, "nonexisting")
core, logs := observer.New(zap.WarnLevel)
testLogger := zap.New(core)
cfg := &config.Config{DB: &config.DBConfig{
Path: testDir,
}}
db := NewPebbleDB(testLogger, cfg, 0)
require.NotNil(t, db)
defer db.Close()
_, err = os.Stat(testDir)
assert.NoError(t, err, "Directory should have been created")
foundWarnLog := false
for _, log := range logs.All() {
if log.Message == "store not found, creating" {
foundWarnLog = true
assert.Equal(t, testDir, log.ContextMap()["path"])
break
}
}
assert.True(t, foundWarnLog, "Expected 'store not found, creating' warning log")
}
func TestNewPebbleDB_NonExistingDirectoryWorker(t *testing.T) {
baseDir, err := os.MkdirTemp("", "pebble-test-nonexisting-worker-*")
require.NoError(t, err)
defer os.RemoveAll(baseDir)
testDir := filepath.Join(baseDir, "nonexisting-worker")
core, logs := observer.New(zap.WarnLevel)
testLogger := zap.New(core)
cfg := &config.Config{DB: &config.DBConfig{
WorkerPaths: []string{testDir},
}}
db := NewPebbleDB(testLogger, cfg, 1)
require.NotNil(t, db)
defer db.Close()
_, err = os.Stat(testDir)
assert.NoError(t, err, "Directory should have been created")
foundWarnLog := false
for _, log := range logs.All() {
if log.Message == "worker store not found, creating" {
foundWarnLog = true
assert.Equal(t, testDir, log.ContextMap()["path"])
assert.Equal(t, uint64(1), log.ContextMap()["core_id"])
break
}
}
assert.True(t, foundWarnLog, "Expected 'worker store not found, creating' warning log")
}
func TestNewPebbleDB_WorkerPathPrefix(t *testing.T) {
baseDir, err := os.MkdirTemp("", "pebble-test-prefix-*")
require.NoError(t, err)
defer os.RemoveAll(baseDir)
core, logs := observer.New(zap.WarnLevel)
testLogger := zap.New(core)
pathFormat := filepath.Join(baseDir, "worker-%d")
cfg := &config.Config{DB: &config.DBConfig{
WorkerPathPrefix: pathFormat,
}}
db := NewPebbleDB(testLogger, cfg, 2)
require.NotNil(t, db)
defer db.Close()
expectedPath := filepath.Join(baseDir, "worker-2")
_, err = os.Stat(expectedPath)
assert.NoError(t, err, "Directory should have been created")
foundWarnLog := false
for _, log := range logs.All() {
if log.Message == "worker store not found, creating" {
foundWarnLog = true
assert.Equal(t, expectedPath, log.ContextMap()["path"])
assert.Equal(t, uint64(2), log.ContextMap()["core_id"])
break
}
}
assert.True(t, foundWarnLog, "Expected 'worker store not found, creating' warning log")
}