mirror of
https://github.com/QuilibriumNetwork/ceremonyclient.git
synced 2026-02-21 10:27:26 +08:00
* v2.1.0.2 * restore tweaks to simlibp2p * fix: nil ref on size calc * fix: panic should induce shutdown from event_distributor * fix: friendlier initialization that requires less manual kickstarting for test/devnets * fix: fewer available shards than provers should choose shard length * fix: update stored worker registry, improve logging for debug mode * fix: shut the fuck up, peer log * qol: log value should be snake cased * fix:non-archive snap sync issues * fix: separate X448/Decaf448 signed keys, add onion key to registry * fix: overflow arithmetic on frame number comparison * fix: worker registration should be idempotent if inputs are same, otherwise permit updated records * fix: remove global prover state from size calculation * fix: divide by zero case * fix: eager prover * fix: broadcast listener default * qol: diagnostic data for peer authenticator * fix: master/worker connectivity issue in sparse networks tight coupling of peer and workers can sometimes interfere if mesh is sparse, so give workers a pseudoidentity but publish messages with the proper peer key * fix: reorder steps of join creation * fix: join verify frame source + ensure domain is properly padded (unnecessary but good for consistency) * fix: add delegate to protobuf <-> reified join conversion * fix: preempt prover from planning with no workers * fix: use the unallocated workers to generate a proof * qol: underflow causes join fail in first ten frames on test/devnets * qol: small logging tweaks for easier log correlation in debug mode * qol: use fisher-yates shuffle to ensure prover allocations are evenly distributed when scores are equal * qol: separate decisional logic on post-enrollment confirmation into consensus engine, proposer, and worker manager where relevant, refactor out scoring * reuse shard descriptors for both join planning and confirm/reject decisions * fix: add missing interface method and amend test blossomsub to use new peer id basis * fix: only check allocations if they exist * fix: pomw mint proof data needs to be hierarchically under global intrinsic domain * staging temporary state under diagnostics * fix: first phase of distributed lock refactoring * fix: compute intrinsic locking * fix: hypergraph intrinsic locking * fix: token intrinsic locking * fix: update execution engines to support new locking model * fix: adjust tests with new execution shape * fix: weave in lock/unlock semantics to liveness provider * fix lock fallthrough, add missing allocation update * qol: additional logging for diagnostics, also testnet/devnet handling for confirmations * fix: establish grace period on halt scenario to permit recovery * fix: support test/devnet defaults for coverage scenarios * fix: nil ref on consensus halts for non-archive nodes * fix: remove unnecessary prefix from prover ref * add test coverage for fork choice behaviors and replay – once passing, blocker (2) is resolved * fix: no fork replay on repeat for non-archive nodes, snap now behaves correctly * rollup of pre-liveness check lock interactions * ahead of tests, get the protobuf/metrics-related changes out so teams can prepare * add test coverage for distributed lock behaviors – once passing, blocker (3) is resolved * fix: blocker (3) * Dev docs improvements (#445) * Make install deps script more robust * Improve testing instructions * Worker node should stop upon OS SIGINT/SIGTERM signal (#447) * move pebble close to Stop() * move deferred Stop() to Start() * add core id to worker stop log message * create done os signal channel and stop worker upon message to it --------- Co-authored-by: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com> --------- Co-authored-by: Daz <daz_the_corgi@proton.me> Co-authored-by: Black Swan <3999712+blacks1ne@users.noreply.github.com>
202 lines
5.4 KiB
Go
202 lines
5.4 KiB
Go
package vdf_test
|
|
|
|
import (
|
|
"bytes"
|
|
"fmt"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"golang.org/x/crypto/sha3"
|
|
"source.quilibrium.com/quilibrium/monorepo/vdf"
|
|
)
|
|
|
|
func getChallenge(seed string) [32]byte {
|
|
return sha3.Sum256([]byte(seed))
|
|
}
|
|
|
|
func TestProveVerify(t *testing.T) {
|
|
difficulty := uint32(160000)
|
|
challenge := getChallenge("TestProveVerify")
|
|
solution := vdf.WesolowskiSolve(challenge, difficulty)
|
|
now := time.Now()
|
|
isOk := vdf.WesolowskiVerify(challenge, difficulty, solution)
|
|
fmt.Printf("%v\n", time.Since(now))
|
|
if !isOk {
|
|
t.Fatalf("Verification failed")
|
|
}
|
|
}
|
|
|
|
func TestProveVerifyMulti_Succeeds(t *testing.T) {
|
|
difficulty := uint32(160000)
|
|
challenge := getChallenge("TestProveVerifyMulti_Succeeds")
|
|
|
|
ids := [][]byte{
|
|
[]byte("worker-A"),
|
|
[]byte("worker-B"),
|
|
[]byte("worker-C"),
|
|
}
|
|
|
|
blobs := make([][516]byte, len(ids))
|
|
wg := sync.WaitGroup{}
|
|
for i := range ids {
|
|
wg.Add(1)
|
|
go func() {
|
|
defer wg.Done()
|
|
blobs[i] = vdf.WesolowskiSolveMulti(challenge, difficulty, ids, uint32(i))
|
|
}()
|
|
}
|
|
wg.Wait()
|
|
|
|
now := time.Now()
|
|
if ok := vdf.WesolowskiVerifyMulti(challenge, difficulty, ids, blobs); !ok {
|
|
t.Fatalf("Multi verification failed")
|
|
}
|
|
fmt.Printf("%v\n", time.Since(now))
|
|
wg = sync.WaitGroup{}
|
|
|
|
ids = [][]byte{
|
|
[]byte("worker-A"),
|
|
[]byte("worker-B"),
|
|
[]byte("worker-C"),
|
|
[]byte("worker-D"),
|
|
[]byte("worker-E"),
|
|
[]byte("worker-F"),
|
|
[]byte("worker-G"),
|
|
}
|
|
|
|
blobs = make([][516]byte, len(ids))
|
|
for i := range ids {
|
|
wg.Add(1)
|
|
go func() {
|
|
defer wg.Done()
|
|
blobs[i] = vdf.WesolowskiSolveMulti(challenge, difficulty, ids, uint32(i))
|
|
}()
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
now = time.Now()
|
|
if ok := vdf.WesolowskiVerifyMulti(challenge, difficulty, ids, blobs); !ok {
|
|
t.Fatalf("Multi verification failed")
|
|
}
|
|
fmt.Printf("%v\n", time.Since(now))
|
|
}
|
|
|
|
func TestProveVerifyMulti_OrderInsensitive(t *testing.T) {
|
|
difficulty := uint32(50000)
|
|
challenge := getChallenge("TestProveVerifyMulti_OrderInsensitive")
|
|
|
|
ids := [][]byte{
|
|
[]byte("alice"),
|
|
[]byte("bob"),
|
|
[]byte("carol"),
|
|
[]byte("dave"),
|
|
}
|
|
|
|
blobs := make([][516]byte, len(ids))
|
|
for i := range ids {
|
|
blobs[i] = vdf.WesolowskiSolveMulti(challenge, difficulty, ids, uint32(i))
|
|
}
|
|
|
|
permIdx := []int{2, 0, 3, 1}
|
|
idsPerm := make([][]byte, len(ids))
|
|
blobsPerm := make([][516]byte, len(ids))
|
|
for i, j := range permIdx {
|
|
idsPerm[i] = ids[j]
|
|
blobsPerm[i] = blobs[j]
|
|
}
|
|
|
|
if ok := vdf.WesolowskiVerifyMulti(challenge, difficulty, idsPerm, blobsPerm); !ok {
|
|
t.Fatalf("Multi verification failed under permutation")
|
|
}
|
|
}
|
|
|
|
func TestProveVerifyMulti_TamperFails(t *testing.T) {
|
|
difficulty := uint32(30000)
|
|
challenge := getChallenge("TestProveVerifyMulti_TamperFails")
|
|
|
|
ids := [][]byte{[]byte("w1"), []byte("w2")}
|
|
blobs := make([][516]byte, len(ids))
|
|
for i := range ids {
|
|
blobs[i] = vdf.WesolowskiSolveMulti(challenge, difficulty, ids, uint32(i))
|
|
}
|
|
|
|
tampered := blobs
|
|
tampered[1][100] ^= 0x01
|
|
|
|
if ok := vdf.WesolowskiVerifyMulti(challenge, difficulty, ids, tampered); ok {
|
|
t.Fatalf("Expected tampered multi verification to fail")
|
|
}
|
|
}
|
|
|
|
func TestProveVerifyMulti_MissingOrWrongIDsFail(t *testing.T) {
|
|
difficulty := uint32(30000)
|
|
challenge := getChallenge("TestProveVerifyMulti_MissingOrWrongIDsFail")
|
|
|
|
ids := [][]byte{[]byte("w1"), []byte("w2"), []byte("w3")}
|
|
blobs := make([][516]byte, len(ids))
|
|
for i := range ids {
|
|
blobs[i] = vdf.WesolowskiSolveMulti(challenge, difficulty, ids, uint32(i))
|
|
}
|
|
|
|
idsSubset := ids[:2]
|
|
blobsSubset := blobs[:2]
|
|
if ok := vdf.WesolowskiVerifyMulti(challenge, difficulty, idsSubset, blobsSubset); ok {
|
|
t.Fatalf("Expected subset verification to fail (b and S bound to full ID set)")
|
|
}
|
|
|
|
idsWrong := make([][]byte, len(ids))
|
|
copy(idsWrong, ids)
|
|
idsWrong[1] = []byte("w2-CHANGED")
|
|
if ok := vdf.WesolowskiVerifyMulti(challenge, difficulty, idsWrong, blobs); ok {
|
|
t.Fatalf("Expected verification to fail with mismatched IDs")
|
|
}
|
|
|
|
idsExtra := append(ids, []byte("w4"))
|
|
if ok := vdf.WesolowskiVerifyMulti(challenge, difficulty, idsExtra, blobs); ok {
|
|
t.Fatalf("Expected verification to fail on mismatched lengths")
|
|
}
|
|
|
|
idsShuffled := [][]byte{ids[2], ids[0], ids[1]}
|
|
blobsWrongPairing := [][516]byte{blobs[0], blobs[1], blobs[2]}
|
|
|
|
// Shuffled set should still succeed, because it gets reordered
|
|
if ok := vdf.WesolowskiVerifyMulti(challenge, difficulty, idsShuffled, blobsWrongPairing); !ok {
|
|
t.Fatalf("Expected verification to succeed with wrong ID/blob pairing")
|
|
}
|
|
|
|
if ok := vdf.WesolowskiVerifyMulti(challenge, difficulty, ids, blobs); !ok {
|
|
t.Fatalf("Original multi verification should pass")
|
|
}
|
|
}
|
|
|
|
func TestProveVerifyMulti_DifferentChallengesFail(t *testing.T) {
|
|
difficulty := uint32(30000)
|
|
challengeA := getChallenge("A")
|
|
challengeB := getChallenge("B")
|
|
|
|
ids := [][]byte{[]byte("wa"), []byte("wb")}
|
|
blobs := make([][516]byte, len(ids))
|
|
for i := range ids {
|
|
blobs[i] = vdf.WesolowskiSolveMulti(challengeA, difficulty, ids, uint32(i))
|
|
}
|
|
|
|
// Verify against a different challenge — should fail.
|
|
if ok := vdf.WesolowskiVerifyMulti(challengeB, difficulty, ids, blobs); ok {
|
|
t.Fatalf("Expected verification to fail for different challenge")
|
|
}
|
|
}
|
|
|
|
func TestProveVerifyMulti_Determinism(t *testing.T) {
|
|
difficulty := uint32(20000)
|
|
challenge := getChallenge("determinism-multi")
|
|
ids := [][]byte{[]byte("x"), []byte("y")}
|
|
|
|
b1 := vdf.WesolowskiSolveMulti(challenge, difficulty, ids, 0)
|
|
b2 := vdf.WesolowskiSolveMulti(challenge, difficulty, ids, 0)
|
|
if !bytes.Equal(b1[:], b2[:]) {
|
|
t.Fatalf("Expected deterministic blob for same inputs")
|
|
}
|
|
}
|