This commit is contained in:
Cassandra Heart 2025-10-25 02:55:12 -05:00 committed by GitHub
parent 0053dcb5e0
commit 19ca2cc553
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 1796 additions and 325 deletions

View File

@ -446,7 +446,7 @@ type setter struct {
ver string
}
func (s setter) String() string { return *s.dst }
func (s setter) String() string { return "" }
func (s setter) Set(_ string) error {
*s.dst = s.value
*s.dstver = s.ver

View File

@ -13,7 +13,7 @@ func GetMinimumVersionCutoff() time.Time {
// if there is something in the patch update that is needed to cut off
// unupgraded peers. Be sure to update this to 0x00 for any new minor release.
func GetMinimumPatchVersion() byte {
return 0x02
return 0x04
}
func GetMinimumVersion() []byte {
@ -43,7 +43,7 @@ func FormatVersion(version []byte) string {
}
func GetPatchNumber() byte {
return 0x03
return 0x04
}
func GetRCNumber() byte {

View File

@ -605,96 +605,32 @@ func (sm *StateMachine[
return
}
peers, err := sm.leaderProvider.GetNextLeaders(data, ctx)
proposal, err := sm.leaderProvider.ProveNextState(
data,
*collected,
ctx,
)
if err != nil {
sm.traceLogger.Error("could not obtain leaders", err)
sm.traceLogger.Error(
fmt.Sprintf("error encountered in %s", sm.machineState),
err,
)
sm.SendEvent(EventInduceSync)
return
}
proposalCh := make(chan *StateT)
go func() {
proposal, err := sm.leaderProvider.ProveNextState(
data,
*collected,
ctx,
)
if err != nil {
sm.traceLogger.Error(
fmt.Sprintf("error encountered in %s", sm.machineState),
err,
)
proposalCh <- nil
return
}
proposalCh <- proposal
}()
timer := time.NewTicker(1 * time.Second)
checks := 0
for {
select {
case proposal, ok := <-proposalCh:
if !ok || proposal == nil {
sm.SendEvent(EventInduceSync)
return
}
sm.mu.Lock()
sm.traceLogger.Trace(
fmt.Sprintf("adding proposal with rank %d", (*proposal).Rank()),
)
if _, ok := sm.proposals[(*proposal).Rank()]; !ok {
sm.proposals[(*proposal).Rank()] = make(map[Identity]*StateT)
}
sm.proposals[(*proposal).Rank()][sm.id.Identity()] = proposal
sm.mu.Unlock()
sm.SendEvent(EventProofComplete)
return
case <-timer.C:
checks++
sm.mu.Lock()
proposals, ok := sm.proposals[(*data).Rank()+1]
if !ok {
sm.mu.Unlock()
continue
}
// We have the winner, move on
if _, ok := proposals[peers[0].Identity()]; ok {
sm.mu.Unlock()
sm.SendEvent(EventPublishTimeout)
return
}
// Reverse decay acceptance on target time
for i := range peers {
if i == 0 {
// already checked
continue
}
checkTime := i + 10
if checkTime <= checks {
if _, ok := proposals[peers[i].Identity()]; ok {
sm.mu.Unlock()
sm.SendEvent(EventPublishTimeout)
return
}
}
}
sm.mu.Unlock()
case <-ctx.Done():
sm.traceLogger.Trace("context canceled")
return
}
sm.mu.Lock()
sm.traceLogger.Trace(
fmt.Sprintf("adding proposal with rank %d", (*proposal).Rank()),
)
if _, ok := sm.proposals[(*proposal).Rank()]; !ok {
sm.proposals[(*proposal).Rank()] = make(map[Identity]*StateT)
}
sm.proposals[(*proposal).Rank()][sm.id.Identity()] = proposal
sm.mu.Unlock()
sm.SendEvent(EventProofComplete)
},
Timeout: 120 * time.Second,
OnTimeout: EventPublishTimeout,
@ -751,6 +687,18 @@ func (sm *StateMachine[
}
}
if len(sm.proposals[(*sm.activeState).Rank()+1]) < int(sm.minimumProvers()) {
sm.traceLogger.Trace(
fmt.Sprintf(
"insufficient proposal count: %d, need %d",
len(sm.proposals[(*sm.activeState).Rank()+1]),
int(sm.minimumProvers()),
),
)
sm.mu.Unlock()
return
}
if ctx == nil {
sm.traceLogger.Trace("context null")
sm.mu.Unlock()
@ -770,16 +718,6 @@ func (sm *StateMachine[
proposals[k] = &state
}
if len(proposals) == 0 {
sm.mu.Unlock()
sm.traceLogger.Error(
"no proposals to vote on",
errors.New("no proposals"),
)
sm.SendEvent(EventInduceSync)
break
}
sm.mu.Unlock()
selectedPeer, vote, err := sm.votingProvider.DecideAndSendVote(
proposals,
@ -809,64 +747,39 @@ func (sm *StateMachine[
}
} else {
sm.traceLogger.Trace("proposal chosen, checking for quorum")
for {
proposalVotes := map[Identity]*VoteT{}
for p, vp := range sm.votes[(*sm.activeState).Rank()+1] {
vclone := (*vp).Clone().(VoteT)
proposalVotes[p] = &vclone
}
sm.mu.Unlock()
isQuorum, err := sm.votingProvider.IsQuorum(proposalVotes, ctx)
if err != nil {
sm.traceLogger.Error(
fmt.Sprintf("error encountered in %s", sm.machineState),
err,
)
sm.SendEvent(EventInduceSync)
return
}
proposalVotes := map[Identity]*VoteT{}
for p, vp := range sm.votes[(*sm.activeState).Rank()+1] {
vclone := (*vp).Clone().(VoteT)
proposalVotes[p] = &vclone
}
haveEnoughProposals := len(sm.proposals[(*sm.activeState).Rank()+1]) >=
int(sm.minimumProvers())
sm.mu.Unlock()
isQuorum, err := sm.votingProvider.IsQuorum(proposalVotes, ctx)
if err != nil {
sm.traceLogger.Error(
fmt.Sprintf("error encountered in %s", sm.machineState),
err,
)
sm.SendEvent(EventInduceSync)
return
}
if isQuorum {
sm.traceLogger.Trace("quorum reached")
sm.SendEvent(EventQuorumReached)
return
} else {
select {
case <-time.After(1 * time.Second):
vote, ok := proposalVotes[sm.id.Identity()]
if !ok {
sm.traceLogger.Error(
"no vote found",
errors.New("prover has no vote"),
)
sm.SendEvent(EventInduceSync)
return
}
_, err := sm.votingProvider.SendVote(vote, ctx)
if err != nil {
sm.traceLogger.Error(
fmt.Sprintf("error encountered in %s", sm.machineState),
err,
)
sm.SendEvent(EventInduceSync)
return
}
case <-ctx.Done():
return
}
sm.traceLogger.Trace(
fmt.Sprintf(
"quorum not reached: votes: %d, needed: %d",
len(proposalVotes),
sm.minimumProvers(),
),
)
}
sm.mu.Lock()
if isQuorum && haveEnoughProposals {
sm.traceLogger.Trace("quorum reached")
sm.SendEvent(EventQuorumReached)
} else {
sm.traceLogger.Trace(
fmt.Sprintf(
"quorum not reached: proposals: %d, needed: %d",
len(sm.proposals[(*sm.activeState).Rank()+1]),
sm.minimumProvers(),
),
)
}
}
},
Timeout: 16 * time.Second,
Timeout: 1 * time.Second,
OnTimeout: EventVotingTimeout,
}

View File

@ -4,7 +4,6 @@ import (
"bytes"
"context"
_ "embed"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"fmt"
@ -29,7 +28,6 @@ import (
"source.quilibrium.com/quilibrium/monorepo/config"
"source.quilibrium.com/quilibrium/monorepo/consensus"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
qhypergraph "source.quilibrium.com/quilibrium/monorepo/hypergraph"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/provers"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/reward"
consensustime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time"
@ -42,7 +40,6 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/p2p/onion"
qstore "source.quilibrium.com/quilibrium/monorepo/node/store"
mgr "source.quilibrium.com/quilibrium/monorepo/node/worker"
"source.quilibrium.com/quilibrium/monorepo/protobufs"
"source.quilibrium.com/quilibrium/monorepo/types/channel"
@ -499,170 +496,7 @@ func (e *GlobalConsensusEngine) Start(quit chan struct{}) <-chan error {
var initialState **protobufs.GlobalFrame = nil
if frame != nil {
// HACK: fix-up incorrect prover info
if e.config.P2P.Network == 0 && frame.Header.FrameNumber < 244205 {
e.logger.Debug("fixing prover info")
set := e.hypergraph.(*qhypergraph.HypergraphCRDT).GetVertexAddsSet(
tries.ShardKey{
L1: [3]byte{0x00, 0x00, 0x00},
L2: intrinsics.GLOBAL_INTRINSIC_ADDRESS,
},
)
hset := e.hypergraph.(*qhypergraph.HypergraphCRDT).GetHyperedgeAddsSet(
tries.ShardKey{
L1: [3]byte{0x00, 0x00, 0x00},
L2: intrinsics.GLOBAL_INTRINSIC_ADDRESS,
},
)
txn, err := e.hypergraph.NewTransaction(false)
if err != nil {
panic(err)
}
genesisData := e.getMainnetGenesisJSON()
e.proverRegistry.Refresh()
e.logger.Debug("loaded genesis info and prover registry")
globalProvers, _ := e.proverRegistry.GetActiveProvers(nil)
sen := uint64(0)
toAdd := [][]byte{}
archivePeers := [][]byte{}
bpub, err := base64.StdEncoding.DecodeString(
genesisData.BeaconBLS48581Key,
)
archivePeers = append(archivePeers, bpub)
for _, pubkeyhex := range genesisData.ArchivePeers {
pubkey, err := hex.DecodeString(pubkeyhex)
if err != nil {
panic(err)
}
archivePeers = append(archivePeers, pubkey)
}
for _, pubkey := range archivePeers {
found := false
for _, p := range globalProvers {
if sen == 0 {
sen = p.Seniority
}
if bytes.Equal(p.PublicKey, pubkey) {
found = true
break
}
}
if !found {
e.logger.Debug(
"adding prover",
zap.String("pubkey", hex.EncodeToString(pubkey)),
)
toAdd = append(toAdd, pubkey)
}
}
toRemove := []*typesconsensus.ProverInfo{}
for _, p := range globalProvers {
found := false
for _, pubkey := range archivePeers {
if bytes.Equal(p.PublicKey, pubkey) {
found = true
break
}
}
if !found {
e.logger.Debug(
"removing prover",
zap.String("pubkey", hex.EncodeToString(p.Address)),
)
toRemove = append(toRemove, p)
}
}
for _, p := range toRemove {
proverAddress := slices.Concat(
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
p.Address[:],
)
err = set.GetTree().Delete(txn, proverAddress)
if err != nil {
txn.Abort()
panic(err)
}
allocationAddressBI, err := poseidon.HashBytes(
slices.Concat([]byte("PROVER_ALLOCATION"), p.PublicKey, nil),
)
if err != nil {
panic(err)
}
allocationAddress := slices.Concat(
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
allocationAddressBI.FillBytes(make([]byte, 32)),
)
err = set.GetTree().Delete(txn, allocationAddress)
if err != nil {
txn.Abort()
panic(err)
}
err = txn.Delete(slices.Concat(
[]byte{qstore.HYPERGRAPH_SHARD, qstore.VERTEX_DATA},
proverAddress,
))
if err != nil {
txn.Abort()
panic(err)
}
err = txn.Delete(slices.Concat(
[]byte{qstore.HYPERGRAPH_SHARD, qstore.VERTEX_DATA},
allocationAddress,
))
if err != nil {
txn.Abort()
panic(err)
}
err = hset.GetTree().Delete(txn, proverAddress)
if err != nil {
txn.Abort()
panic(err)
}
}
e.logger.Debug("commiting state")
if err = txn.Commit(); err != nil {
panic(err)
}
state := hgstate.NewHypergraphState(e.hypergraph)
for _, p := range toAdd {
err = e.addGenesisProver(
schema.NewRDFMultiprover(
&schema.TurtleRDFParser{},
e.inclusionProver,
),
state,
p,
sen,
0,
)
if err != nil {
panic(err)
}
}
if err = state.Commit(); err != nil {
panic(err)
}
e.logger.Debug("refreshing registry")
if err = e.proverRegistry.Refresh(); err != nil {
panic(err)
}
} else {
initialState = &frame
}
initialState = &frame
}
if e.config.P2P.Network == 99 || e.config.Engine.ArchiveMode {

View File

@ -555,7 +555,7 @@ func createIntegrationTestGlobalConsensusEngineWithHypergraphAndKey(
&bulletproofs.Decaf448KeyConstructor{}, // decafConstructor
compiler.NewBedlamCompiler(),
nil,
nil,
qp2p.NewInMemoryPeerInfoManager(logger),
)
require.NoError(t, err)
@ -1101,7 +1101,7 @@ func TestGlobalConsensusEngine_Integration_NoProversStaysInVerifying(t *testing.
&bulletproofs.Decaf448KeyConstructor{}, // decafConstructor
compiler.NewBedlamCompiler(),
nil, // blsConstructor
nil,
qp2p.NewInMemoryPeerInfoManager(logger),
)
require.NoError(t, err)

1454
node/dbscan/main.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -14,6 +14,7 @@ const (
HYPERGRAPH_SHARD = 0x09
SHARD = 0x0A
INBOX = 0x0B
MIGRATION = 0xF0
WORKER = 0xFF
)
@ -59,10 +60,7 @@ const (
// Hypergraph store indexes:
const (
VERTEX_ADDS = 0x00
VERTEX_REMOVES = 0x10
VERTEX_DATA = 0xF0
VERTEX_TOMBSTONE = 0xF1
SHARD_COMMIT = 0x00
HYPEREDGE_ADDS = 0x01
HYPEREDGE_REMOVES = 0x11
VERTEX_ADDS_TREE_NODE = 0x02
@ -81,6 +79,8 @@ const (
HYPERGRAPH_VERTEX_REMOVES_SHARD_COMMIT = 0xE1
HYPERGRAPH_HYPEREDGE_ADDS_SHARD_COMMIT = 0xE2
HYPERGRAPH_HYPEREDGE_REMOVES_SHARD_COMMIT = 0xE3
VERTEX_DATA = 0xF0
VERTEX_TOMBSTONE = 0xF1
HYPERGRAPH_COVERED_PREFIX = 0xFA
HYPERGRAPH_COMPLETE = 0xFB
VERTEX_ADDS_TREE_ROOT = 0xFC

View File

@ -303,6 +303,8 @@ func hypergraphVertexAddsShardCommitKey(
shardAddress []byte,
) []byte {
key := []byte{HYPERGRAPH_SHARD}
// The first byte is technically reserved but in practicality won't be
// non-zero (SHARD_COMMMIT)
key = binary.BigEndian.AppendUint64(key, frameNumber)
key = append(key, HYPERGRAPH_VERTEX_ADDS_SHARD_COMMIT)
key = append(key, shardAddress...)
@ -314,6 +316,8 @@ func hypergraphVertexRemovesShardCommitKey(
shardAddress []byte,
) []byte {
key := []byte{HYPERGRAPH_SHARD}
// The first byte is technically reserved but in practicality won't be
// non-zero (SHARD_COMMMIT)
key = binary.BigEndian.AppendUint64(key, frameNumber)
key = append(key, HYPERGRAPH_VERTEX_REMOVES_SHARD_COMMIT)
key = append(key, shardAddress...)
@ -325,6 +329,8 @@ func hypergraphHyperedgeAddsShardCommitKey(
shardAddress []byte,
) []byte {
key := []byte{HYPERGRAPH_SHARD}
// The first byte is technically reserved but in practicality won't be
// non-zero (SHARD_COMMMIT)
key = binary.BigEndian.AppendUint64(key, frameNumber)
key = append(key, HYPERGRAPH_HYPEREDGE_ADDS_SHARD_COMMIT)
key = append(key, shardAddress...)
@ -336,6 +342,8 @@ func hypergraphHyperedgeRemovesShardCommitKey(
shardAddress []byte,
) []byte {
key := []byte{HYPERGRAPH_SHARD}
// The first byte is technically reserved but in practicality won't be
// non-zero (SHARD_COMMMIT)
key = binary.BigEndian.AppendUint64(key, frameNumber)
key = append(key, HYPERGRAPH_HYPEREDGE_REMOVES_SHARD_COMMIT)
key = append(key, shardAddress...)

View File

@ -1,6 +1,8 @@
package store
import (
"encoding/binary"
"encoding/hex"
"fmt"
"io"
"os"
@ -17,6 +19,12 @@ type PebbleDB struct {
db *pebble.DB
}
// pebbleMigrations contains ordered migration steps. New migrations append to
// the end.
var pebbleMigrations = []func(*pebble.Batch) error{
migration_2_1_0_4,
}
func NewPebbleDB(
logger *zap.Logger,
config *config.DBConfig,
@ -84,7 +92,112 @@ func NewPebbleDB(
os.Exit(1)
}
return &PebbleDB{db}
pebbleDB := &PebbleDB{db}
if err := pebbleDB.migrate(logger); err != nil {
logger.Error(
fmt.Sprintf("failed to migrate %s", storeType),
zap.Error(err),
zap.String("path", path),
zap.Uint("core_id", coreId),
)
pebbleDB.Close()
os.Exit(1)
}
return pebbleDB
}
func (p *PebbleDB) migrate(logger *zap.Logger) error {
currentVersion := uint64(len(pebbleMigrations))
var storedVersion uint64
var foundVersion bool
value, closer, err := p.db.Get([]byte{MIGRATION})
switch {
case err == pebble.ErrNotFound:
// missing version implies zero
case err != nil:
return errors.Wrap(err, "load migration version")
default:
foundVersion = true
if len(value) != 8 {
if closer != nil {
_ = closer.Close()
}
return errors.Errorf(
"invalid migration version length: %d",
len(value),
)
}
storedVersion = binary.BigEndian.Uint64(value)
if closer != nil {
if err := closer.Close(); err != nil {
logger.Warn("failed to close migration version reader", zap.Error(err))
}
}
}
if storedVersion > currentVersion {
return errors.Errorf(
"store migration version %d ahead of binary %d running a migrated db "+
"with an earlier version can cause irreparable corruption, shutting down",
storedVersion,
currentVersion,
)
}
needsUpdate := !foundVersion || storedVersion < currentVersion
if !needsUpdate {
logger.Info("no pebble store migrations required")
return nil
}
batch := p.db.NewBatch()
for i := int(storedVersion); i < len(pebbleMigrations); i++ {
logger.Warn(
"performing pebble store migration",
zap.Int("from_version", int(storedVersion)),
zap.Int("to_version", int(storedVersion+1)),
)
if err := pebbleMigrations[i](batch); err != nil {
batch.Close()
logger.Error("migration failed", zap.Error(err))
return errors.Wrapf(err, "apply migration %d", i+1)
}
logger.Info(
"migration step completed",
zap.Int("from_version", int(storedVersion)),
zap.Int("to_version", int(storedVersion+1)),
)
}
var versionBuf [8]byte
binary.BigEndian.PutUint64(versionBuf[:], currentVersion)
if err := batch.Set([]byte{MIGRATION}, versionBuf[:], nil); err != nil {
batch.Close()
return errors.Wrap(err, "set migration version")
}
if err := batch.Commit(&pebble.WriteOptions{Sync: true}); err != nil {
batch.Close()
return errors.Wrap(err, "commit migration batch")
}
if currentVersion != storedVersion {
logger.Info(
"applied pebble store migrations",
zap.Uint64("from_version", storedVersion),
zap.Uint64("to_version", currentVersion),
)
} else {
logger.Info(
"initialized pebble store migration version",
zap.Uint64("version", currentVersion),
)
}
return nil
}
func (p *PebbleDB) Get(key []byte) ([]byte, io.Closer, error) {
@ -221,3 +334,106 @@ func rightAlign(data []byte, size int) []byte {
copy(pad[size-l:], data)
return pad
}
// Resolves all the variations of store issues from any series of upgrade steps
// in 2.1.0.1->2.1.0.3
func migration_2_1_0_4(b *pebble.Batch) error {
// batches don't use this but for backcompat the parameter is required
wo := &pebble.WriteOptions{}
frame_start, _ := hex.DecodeString("0000000000000003b9e8")
frame_end, _ := hex.DecodeString("0000000000000003b9ec")
err := b.DeleteRange(frame_start, frame_end, wo)
if err != nil {
return errors.Wrap(err, "frame removal")
}
frame_first_index, _ := hex.DecodeString("0010")
frame_last_index, _ := hex.DecodeString("0020")
err = b.Delete(frame_first_index, wo)
if err != nil {
return errors.Wrap(err, "frame first index removal")
}
err = b.Delete(frame_last_index, wo)
if err != nil {
return errors.Wrap(err, "frame last index removal")
}
shard_commits_hex := []string{
"090000000000000000e0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
"090000000000000000e1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
"090000000000000000e2ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
"090000000000000000e3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
}
for _, shard_commit_hex := range shard_commits_hex {
shard_commit, _ := hex.DecodeString(shard_commit_hex)
err = b.Delete(shard_commit, wo)
if err != nil {
return errors.Wrap(err, "shard commit removal")
}
}
vertex_adds_tree_start, _ := hex.DecodeString("0902000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
vertex_adds_tree_end, _ := hex.DecodeString("0902000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
err = b.DeleteRange(vertex_adds_tree_start, vertex_adds_tree_end, wo)
if err != nil {
return errors.Wrap(err, "vertex adds tree removal")
}
hyperedge_adds_tree_start, _ := hex.DecodeString("0903000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
hyperedge_adds_tree_end, _ := hex.DecodeString("0903000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
err = b.DeleteRange(hyperedge_adds_tree_start, hyperedge_adds_tree_end, wo)
if err != nil {
return errors.Wrap(err, "hyperedge adds tree removal")
}
vertex_adds_by_path_start, _ := hex.DecodeString("0922000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
vertex_adds_by_path_end, _ := hex.DecodeString("0922000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
err = b.DeleteRange(vertex_adds_by_path_start, vertex_adds_by_path_end, wo)
if err != nil {
return errors.Wrap(err, "vertex adds by path removal")
}
hyperedge_adds_by_path_start, _ := hex.DecodeString("0923000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
hyperedge_adds_by_path_end, _ := hex.DecodeString("0923000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
err = b.DeleteRange(hyperedge_adds_by_path_start, hyperedge_adds_by_path_end, wo)
if err != nil {
return errors.Wrap(err, "hyperedge adds by path removal")
}
vertex_adds_change_record_start, _ := hex.DecodeString("0942000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
vertex_adds_change_record_end, _ := hex.DecodeString("0942000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
hyperedge_adds_change_record_start, _ := hex.DecodeString("0943000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
hyperedge_adds_change_record_end, _ := hex.DecodeString("0943000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
err = b.DeleteRange(vertex_adds_change_record_start, vertex_adds_change_record_end, wo)
if err != nil {
return errors.Wrap(err, "vertex adds change record removal")
}
err = b.DeleteRange(hyperedge_adds_change_record_start, hyperedge_adds_change_record_end, wo)
if err != nil {
return errors.Wrap(err, "hyperedge adds change record removal")
}
vertex_data_start, _ := hex.DecodeString("09f0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
vertex_data_end, _ := hex.DecodeString("09f0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
err = b.DeleteRange(vertex_data_start, vertex_data_end, wo)
if err != nil {
return errors.Wrap(err, "vertex data removal")
}
vertex_add_root, _ := hex.DecodeString("09fc000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
hyperedge_add_root, _ := hex.DecodeString("09fe000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
err = b.Delete(vertex_add_root, wo)
if err != nil {
return errors.Wrap(err, "vertex add root removal")
}
err = b.Delete(hyperedge_add_root, wo)
if err != nil {
return errors.Wrap(err, "hyperedge add root removal")
}
return nil
}

View File

@ -0,0 +1,46 @@
package mocks
import (
"github.com/stretchr/testify/mock"
"source.quilibrium.com/quilibrium/monorepo/protobufs"
"source.quilibrium.com/quilibrium/monorepo/types/p2p"
)
var _ p2p.PeerInfoManager = (*MockPeerInfoManager)(nil)
type MockPeerInfoManager struct {
mock.Mock
}
// AddPeerInfo implements p2p.PeerInfoManager.
func (m *MockPeerInfoManager) AddPeerInfo(info *protobufs.PeerInfo) {
m.Called(info)
}
// GetPeerInfo implements p2p.PeerInfoManager.
func (m *MockPeerInfoManager) GetPeerInfo(peerId []byte) *p2p.PeerInfo {
args := m.Called(peerId)
return args.Get(0).(*p2p.PeerInfo)
}
// GetPeerMap implements p2p.PeerInfoManager.
func (m *MockPeerInfoManager) GetPeerMap() map[string]*p2p.PeerInfo {
args := m.Called()
return args.Get(0).(map[string]*p2p.PeerInfo)
}
// GetPeersBySpeed implements p2p.PeerInfoManager.
func (m *MockPeerInfoManager) GetPeersBySpeed() [][]byte {
args := m.Called()
return args.Get(0).([][]byte)
}
// Start implements p2p.PeerInfoManager.
func (m *MockPeerInfoManager) Start() {
m.Called()
}
// Stop implements p2p.PeerInfoManager.
func (m *MockPeerInfoManager) Stop() {
m.Called()
}