Merge branch 'develop-2.1-milestone5-consensus' into feat/2.1-qclient-factor-and-node-install

This commit is contained in:
Cassandra Heart 2025-04-11 19:42:53 -07:00 committed by GitHub
commit eb51f6093e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 344 additions and 87 deletions

View File

@ -42,7 +42,7 @@ RUN --mount=type=cache,target=/usr/local/,id=usr-local-${TARGETOS}-${TARGETARCH}
RUN --mount=type=cache,target=/usr/local/,id=usr-local-${TARGETOS}-${TARGETARCH} \
cd emp-ot && mkdir build && cd build && cmake .. -DCMAKE_INSTALL_PREFIX=/usr/local && cd .. && make && make install && cd ..
ARG GO_VERSION=1.22.5
ARG GO_VERSION=1.23.5
RUN --mount=type=cache,target=/usr/local,id=usr-local-${TARGETOS}-${TARGETARCH} \
apt update && apt install -y wget && \
ARCH=$(dpkg --print-architecture) && \

View File

@ -29,10 +29,10 @@ RUN apt update && apt install -y wget && \
arm64) GOARCH=arm64 ;; \
*) echo "Unsupported architecture: ${ARCH}" && exit 1 ;; \
esac && \
wget https://go.dev/dl/go1.22.0.linux-${GOARCH}.tar.gz && \
wget https://go.dev/dl/go1.23.5.linux-${GOARCH}.tar.gz && \
rm -rf /usr/local/go && \
tar -C /usr/local -xzf go1.22.0.linux-${GOARCH}.tar.gz && \
rm go1.22.0.linux-${GOARCH}.tar.gz
tar -C /usr/local -xzf go1.23.5.linux-${GOARCH}.tar.gz && \
rm go1.23.5.linux-${GOARCH}.tar.gz
ENV PATH=$PATH:/usr/local/go/bin

View File

@ -175,53 +175,54 @@ func NewDataClockConsensusEngine(
if logger == nil {
panic(errors.New("logger is nil"))
}
slogger := logger.With(zap.String("stage", "data-clock-consensus"))
if cfg == nil {
panic(errors.New("engine config is nil"))
slogger.Panic("engine config is nil")
}
if keyManager == nil {
panic(errors.New("key manager is nil"))
slogger.Panic("key manager is nil")
}
if clockStore == nil {
panic(errors.New("clock store is nil"))
slogger.Panic("clock store is nil")
}
if coinStore == nil {
panic(errors.New("coin store is nil"))
slogger.Panic("coin store is nil")
}
if dataProofStore == nil {
panic(errors.New("data proof store is nil"))
slogger.Panic("data proof store is nil")
}
if keyStore == nil {
panic(errors.New("key store is nil"))
slogger.Panic("key store is nil")
}
if pubSub == nil {
panic(errors.New("pubsub is nil"))
slogger.Panic("pubsub is nil")
}
if frameProver == nil {
panic(errors.New("frame prover is nil"))
slogger.Panic("frame prover is nil")
}
if inclusionProver == nil {
panic(errors.New("inclusion prover is nil"))
slogger.Panic("frame inclusion prover is nil")
}
if masterTimeReel == nil {
panic(errors.New("master time reel is nil"))
slogger.Panic("master time reel is nil")
}
if dataTimeReel == nil {
panic(errors.New("data time reel is nil"))
slogger.Panic("data time reel is nil")
}
if peerInfoManager == nil {
panic(errors.New("peer info manager is nil"))
slogger.Panic("peer info manager is nil")
}
difficulty := cfg.Engine.Difficulty
@ -234,12 +235,12 @@ func NewDataClockConsensusEngine(
16,
)
if err != nil {
panic(err)
slogger.Panic("error creating clock frame fragment buffer", zap.Error(err))
}
cache, err := lru.New[string, struct{}](25)
if err != nil {
panic(err)
slogger.Panic("error creating lru cache", zap.Error(err))
}
ctx, cancel := context.WithCancel(context.Background())
@ -247,7 +248,7 @@ func NewDataClockConsensusEngine(
ctx: ctx,
cancel: cancel,
difficulty: difficulty,
logger: logger.With(zap.String("stage", "data-clock-consensus")),
logger: slogger,
state: consensus.EngineStateStopped,
clockStore: clockStore,
coinStore: coinStore,

View File

@ -0,0 +1,100 @@
package master
import (
"crypto/rand"
"strings"
"testing"
"github.com/cloudflare/circl/sign/ed448"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/stretchr/testify/assert"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
"source.quilibrium.com/quilibrium/monorepo/node/config"
qtime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time"
qcrypto "source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
)
func TestHandleMessage(t *testing.T) {
t.Run("test handle message", func(t *testing.T) {
logger := utils.GetDebugLogger()
engine := &MasterClockConsensusEngine{
logger: logger,
}
anyPb := &anypb.Any{}
anyBytes, err := proto.Marshal(anyPb)
assert.NoError(t, err)
msg := &protobufs.Message{
Payload: anyBytes,
}
msgBytes, err := proto.Marshal(msg)
assert.NoError(t, err)
message := &pb.Message{
Data: msgBytes,
From: []byte("test from"),
Signature: []byte("test signature"),
}
if err := engine.handleMessage(message); err != nil {
assert.Equal(t, err.Error(), "handle message: invalid message")
}
})
}
func TestHandleClockFrameData(t *testing.T) {
t.Run("test handle clock frame data", func(t *testing.T) {
logger := utils.GetDebugLogger()
config.DownloadAndVerifyGenesis(1)
filter := "0000000000000000000000000000000000000000000000000000000000000000"
difficulty := uint32(160000)
engineConfig := &config.EngineConfig{
ProvingKeyId: "default-proving-key",
Filter: filter,
GenesisSeed: strings.Repeat("00", 516),
Difficulty: difficulty,
}
kvStore := store.NewInMemKVDB()
cs := store.NewPebbleClockStore(kvStore, logger)
km := keys.NewInMemoryKeyManager()
bpub, bprivKey, _ := ed448.GenerateKey(rand.Reader)
ps := &pubsub{
privkey: bprivKey,
pubkey: bpub,
}
dataProver := qcrypto.NewKZGInclusionProver(logger)
frameProver := qcrypto.NewWesolowskiFrameProver(logger)
masterTimeReel := qtime.NewMasterTimeReel(logger, cs, engineConfig, frameProver)
peerInfoManager := p2p.NewInMemoryPeerInfoManager(logger)
report := &protobufs.SelfTestReport{}
mockFrameProver := &mockFrameProver{
verifyMasterClockFrame: func(frame *protobufs.ClockFrame) error {
return nil
},
}
engine := NewMasterClockConsensusEngine(
engineConfig, logger, cs, km, ps, dataProver, mockFrameProver,
masterTimeReel, peerInfoManager, report)
engine.Start()
anyPb := &anypb.Any{}
frameNumber := uint64(1)
frame := &protobufs.ClockFrame{
FrameNumber: frameNumber,
Difficulty: difficulty,
}
err := anyPb.MarshalFrom(frame)
assert.NoError(t, err)
peerID, err := peer.Decode("QmNSGavG2DfJwGpHmzKjVmTD6CVSyJsUFTXsW4JXt2eySR")
assert.NoError(t, err)
err = engine.handleClockFrameData([]byte(peerID), anyPb)
assert.NoError(t, err)
})
}

View File

@ -88,38 +88,40 @@ func NewMasterClockConsensusEngine(
panic(errors.New("logger is nil"))
}
slogger := logger.With(zap.String("stage", "master-clock-consensus"))
if engineConfig == nil {
panic(errors.New("engine config is nil"))
slogger.Panic("engine config is nil")
}
if keyManager == nil {
panic(errors.New("key manager is nil"))
slogger.Panic("key manager is nil")
}
if pubSub == nil {
panic(errors.New("pubsub is nil"))
slogger.Panic("pubsub is nil")
}
if dataProver == nil {
panic(errors.New("data prover is nil"))
slogger.Panic("data prover is nil")
}
if frameProver == nil {
panic(errors.New("frame prover is nil"))
slogger.Panic("frame prover is nil")
}
if masterTimeReel == nil {
panic(errors.New("master time reel is nil"))
slogger.Panic("master time reel is nil")
}
seed, err := hex.DecodeString(engineConfig.GenesisSeed)
if err != nil {
panic(errors.New("genesis seed is nil"))
slogger.Panic("genesis seed is nil", zap.Error(err))
}
e := &MasterClockConsensusEngine{
difficulty: MASTER_CLOCK_RATE,
logger: logger.With(zap.String("stage", "master-clock-consensus")),
logger: slogger,
state: consensus.EngineStateStopped,
keyManager: keyManager,
pubSub: pubSub,
@ -144,16 +146,16 @@ func NewMasterClockConsensusEngine(
if e.filter, err = hex.DecodeString(
"0000000000000000000000000000000000000000000000000000000000000000",
); err != nil {
panic(errors.Wrap(err, "could not parse filter value"))
slogger.Panic("could not decode filter", zap.Error(err))
}
e.getProvingKey(engineConfig)
if err := e.createCommunicationKeys(); err != nil {
panic(err)
slogger.Panic("could not create communication keys", zap.Error(err))
}
logger.Info("constructing consensus engine")
slogger.Info("constructing consensus engine")
return e
}
@ -170,19 +172,19 @@ func (e *MasterClockConsensusEngine) Start() <-chan error {
err := e.masterTimeReel.Start()
if err != nil {
panic(err)
e.logger.Panic("could not start master time reel", zap.Error(err))
}
beaconPubKey, err := pcrypto.UnmarshalEd448PublicKey(
config.GetGenesis().Beacon,
)
if err != nil {
panic(err)
e.logger.Panic("could not unmarshal beacon public key", zap.Error(err))
}
e.beacon, err = peer.IDFromPublicKey(beaconPubKey)
if err != nil {
panic(err)
e.logger.Panic("could not get beacon peer id", zap.Error(err))
}
go func() {
@ -191,7 +193,7 @@ func (e *MasterClockConsensusEngine) Start() <-chan error {
case newFrame := <-e.frameValidationCh:
head, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
e.logger.Panic("failed to get head", zap.Error(err))
}
if head.FrameNumber > newFrame.FrameNumber ||
@ -237,7 +239,7 @@ func (e *MasterClockConsensusEngine) Start() <-chan error {
for e.state < consensus.EngineStateStopping {
frame, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
e.logger.Panic("failed to get head", zap.Error(err))
}
if frame, err = e.prove(frame); err != nil {
@ -340,7 +342,7 @@ func (e *MasterClockConsensusEngine) GetDifficulty() uint32 {
func (e *MasterClockConsensusEngine) GetFrame() *protobufs.ClockFrame {
frame, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
e.logger.Panic("failed to get head", zap.Error(err))
}
return frame
@ -398,22 +400,19 @@ func (e *MasterClockConsensusEngine) getProvingKey(
}
if err != nil {
e.logger.Error("could not get proving key", zap.Error(err))
panic(err)
e.logger.Panic("could not get proving key", zap.Error(err))
}
rawKey, err := e.keyManager.GetRawKey(engineConfig.ProvingKeyId)
if err != nil {
e.logger.Error("could not get proving key type", zap.Error(err))
panic(err)
e.logger.Panic("could not get proving key type", zap.Error(err))
}
provingKeyType := rawKey.Type
h, err := poseidon.HashBytes(rawKey.PublicKey)
if err != nil {
e.logger.Error("could not hash proving key", zap.Error(err))
panic(err)
e.logger.Panic("could not hash proving key", zap.Error(err))
}
provingKeyAddress := h.Bytes()

View File

@ -0,0 +1,154 @@
package master
import (
"context"
gocrypto "crypto"
"crypto/rand"
"strings"
"sync"
"testing"
"time"
"github.com/cloudflare/circl/sign/ed448"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/wrapperspb"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
"source.quilibrium.com/quilibrium/monorepo/node/config"
qtime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time"
qcrypto "source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
)
type pubsub struct {
privkey ed448.PrivateKey
pubkey []byte
}
func (pubsub) GetBitmaskPeers() map[string][]string { return nil }
func (pubsub) Publish(address []byte, data []byte) error { return nil }
func (pubsub) PublishToBitmask(bitmask []byte, data []byte) error { return nil }
func (pubsub) Subscribe(bitmask []byte, handler func(message *pb.Message) error) error { return nil }
func (pubsub) Unsubscribe(bitmask []byte, raw bool) {}
func (pubsub) RegisterValidator(bitmask []byte, validator func(peerID peer.ID, message *pb.Message) p2p.ValidationResult, sync bool) error {
return nil
}
func (pubsub) UnregisterValidator(bitmask []byte) error { return nil }
func (pubsub) GetPeerID() []byte { return nil }
func (pubsub) GetPeerstoreCount() int { return 0 }
func (pubsub) GetNetworkPeersCount() int { return 0 }
func (pubsub) GetRandomPeer(bitmask []byte) ([]byte, error) { return nil, nil }
func (pubsub) GetMultiaddrOfPeerStream(ctx context.Context, peerId []byte) <-chan multiaddr.Multiaddr {
return nil
}
func (pubsub) GetMultiaddrOfPeer(peerId []byte) string { return "" }
func (pubsub) GetNetwork() uint { return 1 }
func (pubsub) StartDirectChannelListener(
key []byte,
purpose string,
server *grpc.Server,
) error {
return nil
}
func (pubsub) GetDirectChannel(ctx context.Context, peerId []byte, purpose string) (*grpc.ClientConn, error) {
return nil, nil
}
func (pubsub) GetNetworkInfo() *protobufs.NetworkInfoResponse {
return nil
}
func (p pubsub) SignMessage(msg []byte) ([]byte, error) {
return p.privkey.Sign(rand.Reader, msg, gocrypto.Hash(0))
}
func (p pubsub) GetPublicKey() []byte { return p.pubkey }
func (pubsub) GetPeerScore(peerId []byte) int64 { return 0 }
func (pubsub) SetPeerScore(peerId []byte, score int64) {}
func (pubsub) AddPeerScore(peerId []byte, scoreDelta int64) {}
func (pubsub) Reconnect(peerId []byte) error { return nil }
func (pubsub) Bootstrap(context.Context) error { return nil }
func (pubsub) DiscoverPeers(context.Context) error { return nil }
func (pubsub) IsPeerConnected(peerId []byte) bool { return false }
func (pubsub) Reachability() *wrapperspb.BoolValue { return nil }
var _ p2p.PubSub = (*pubsub)(nil)
type mockFrameProver struct {
qcrypto.FrameProver
verifyMasterClockFrame func(frame *protobufs.ClockFrame) error
}
var _ qcrypto.FrameProver = (*mockFrameProver)(nil)
func (m *mockFrameProver) VerifyMasterClockFrame(frame *protobufs.ClockFrame) error {
return m.verifyMasterClockFrame(frame)
}
func TestStartMasterClockConsensusEngine(t *testing.T) {
t.Run("test validate and storage", func(t *testing.T) {
logger := utils.GetDebugLogger()
config.DownloadAndVerifyGenesis(1)
filter := "0000000000000000000000000000000000000000000000000000000000000000"
engineConfig := &config.EngineConfig{
ProvingKeyId: "default-proving-key",
Filter: filter,
GenesisSeed: strings.Repeat("00", 516),
Difficulty: 10,
}
kvStore := store.NewInMemKVDB()
cs := store.NewPebbleClockStore(kvStore, logger)
km := keys.NewInMemoryKeyManager()
bpub, bprivKey, _ := ed448.GenerateKey(rand.Reader)
ps := &pubsub{
privkey: bprivKey,
pubkey: bpub,
}
dataProver := qcrypto.NewKZGInclusionProver(logger)
frameProver := qcrypto.NewWesolowskiFrameProver(logger)
masterTimeReel := qtime.NewMasterTimeReel(logger, cs, engineConfig, frameProver)
peerInfoManager := p2p.NewInMemoryPeerInfoManager(logger)
testFrameNumber := uint64(1)
report := &protobufs.SelfTestReport{}
var wg sync.WaitGroup
wg.Add(1)
mockFrameProver := &mockFrameProver{
verifyMasterClockFrame: func(frame *protobufs.ClockFrame) error {
assert.Equal(t, testFrameNumber, frame.FrameNumber)
logger.Info("frame verified", zap.Uint64("frame_number", frame.FrameNumber))
defer wg.Done()
return nil
},
}
engine := NewMasterClockConsensusEngine(engineConfig, logger, cs, km, ps, dataProver, mockFrameProver, masterTimeReel, peerInfoManager, report)
engine.Start()
head1, err := masterTimeReel.Head()
assert.NoError(t, err)
assert.Equal(t, uint64(0), head1.FrameNumber)
head1Selector, err := head1.GetSelector()
assert.NoError(t, err)
newFrame := &protobufs.ClockFrame{
FrameNumber: testFrameNumber,
ParentSelector: head1Selector.FillBytes(make([]byte, 32)),
}
engine.frameValidationCh <- newFrame
wg.Wait()
// Wait for the frame to be stored, this is a bit of a hack, because
// masterTimeReel.Insert is async in Start() function, and this function not return
// the channel, remove this time.Sleep after refactor Start()
time.Sleep(1 * time.Second)
newHeadFrame, err := masterTimeReel.Head()
assert.NoError(t, err)
assert.Equal(t, testFrameNumber, newHeadFrame.FrameNumber)
})
}

View File

@ -90,40 +90,41 @@ func NewDataTimeReel(
initialProverKeys [][]byte,
alwaysSend bool,
) *DataTimeReel {
if filter == nil {
panic("filter is nil")
}
if logger == nil {
panic("logger is nil")
}
slogger := logger.With(zap.String("stage", "data-time-reel"))
if filter == nil {
slogger.Panic("filter is nil")
}
if clockStore == nil {
panic("clock store is nil")
slogger.Panic("clock store is nil")
}
if engineConfig == nil {
panic("engine config is nil")
slogger.Panic("engine config is nil")
}
if exec == nil {
panic("execution function is nil")
slogger.Panic("execution function is nil")
}
if frameProver == nil {
panic("frame prover is nil")
slogger.Panic("frame prover is nil")
}
cache, err := lru.New[string, string](10000)
if err != nil {
panic(err)
slogger.Panic("failed to create LRU cache", zap.Error(err))
}
ctx, cancel := context.WithCancel(context.Background())
return &DataTimeReel{
ctx: ctx,
cancel: cancel,
logger: logger.With(zap.String("stage", "data-time-reel")),
logger: slogger,
filter: filter,
engineConfig: engineConfig,
clockStore: clockStore,
@ -148,15 +149,15 @@ func (d *DataTimeReel) createGenesisFrame() (
[]*tries.RollingFrecencyCritbitTrie,
) {
if d.origin == nil {
panic("origin is nil")
d.logger.Panic("origin is nil")
}
if d.initialInclusionProof == nil {
panic("initial inclusion proof is nil")
d.logger.Panic("initial inclusion proof is nil")
}
if d.initialProverKeys == nil {
panic("initial prover keys is nil")
d.logger.Panic("initial prover keys is nil")
}
difficulty := d.engineConfig.Difficulty
if difficulty == 0 || difficulty == 10000 {
@ -170,15 +171,15 @@ func (d *DataTimeReel) createGenesisFrame() (
d.initialProverKeys,
)
if err != nil {
panic(err)
d.logger.Panic("failed to create genesis frame", zap.Error(err))
}
selector, err := frame.GetSelector()
if err != nil {
panic(err)
d.logger.Panic("failed to get selector", zap.Error(err))
}
txn, err := d.clockStore.NewTransaction(false)
if err != nil {
panic(err)
d.logger.Panic("failed to create transaction", zap.Error(err))
}
err = d.clockStore.StageDataClockFrame(
selector.FillBytes(make([]byte, 32)),
@ -187,16 +188,16 @@ func (d *DataTimeReel) createGenesisFrame() (
)
if err != nil {
txn.Abort()
panic(err)
d.logger.Panic("failed to stage genesis frame", zap.Error(err))
}
err = txn.Commit()
if err != nil {
txn.Abort()
panic(err)
d.logger.Panic("failed to commit genesis frame", zap.Error(err))
}
txn, err = d.clockStore.NewTransaction(false)
if err != nil {
panic(err)
d.logger.Panic("failed to create transaction", zap.Error(err))
}
if err := d.clockStore.CommitDataClockFrame(
d.filter,
@ -206,10 +207,10 @@ func (d *DataTimeReel) createGenesisFrame() (
txn,
false,
); err != nil {
panic(err)
d.logger.Panic("failed to commit data clock frame", zap.Error(err))
}
if err := txn.Commit(); err != nil {
panic(err)
d.logger.Panic("failed to commit transaction", zap.Error(err))
}
return frame, tries
}
@ -217,7 +218,7 @@ func (d *DataTimeReel) createGenesisFrame() (
func (d *DataTimeReel) Start() error {
frame, tries, err := d.clockStore.GetLatestDataClockFrame(d.filter)
if err != nil && !errors.Is(err, store.ErrNotFound) {
panic(err)
d.logger.Panic("failed to get latest data clock frame", zap.Error(err))
}
if frame == nil {
@ -227,7 +228,7 @@ func (d *DataTimeReel) Start() error {
} else {
d.head = frame
if err != nil {
panic(err)
d.logger.Panic("failed to get latest data clock frame", zap.Error(err))
}
d.totalDistance = big.NewInt(0)
d.proverTries = tries
@ -265,12 +266,12 @@ func (d *DataTimeReel) Insert(
parent := new(big.Int).SetBytes(frame.ParentSelector)
selector, err := frame.GetSelector()
if err != nil {
panic(err)
d.logger.Panic("failed to get selector", zap.Error(err))
}
distance, err := d.GetDistance(frame)
if err != nil && !errors.Is(err, store.ErrNotFound) {
panic(err)
d.logger.Panic("failed to get distance", zap.Error(err))
}
d.storePending(selector, parent, distance, frame)

View File

@ -40,28 +40,29 @@ func NewMasterTimeReel(
if logger == nil {
panic("logger is nil")
}
slogger := logger.With(zap.String("stage", "master-time-reel"))
if clockStore == nil {
panic("clock store is nil")
slogger.Panic("clock store is nil")
}
if engineConfig == nil {
panic("engine config is nil")
slogger.Panic("engine config is nil")
}
if frameProver == nil {
panic("frame prover is nil")
slogger.Panic("frame prover is nil")
}
filter, err := hex.DecodeString(
"0000000000000000000000000000000000000000000000000000000000000000",
)
if err != nil {
panic(err)
slogger.Panic("failed to decode filter", zap.Error(err))
}
return &MasterTimeReel{
logger: logger.With(zap.String("stage", "master-time-reel")),
logger: slogger,
filter: filter,
engineConfig: engineConfig,
clockStore: clockStore,

View File

@ -148,10 +148,11 @@ func NewTokenExecutionEngine(
if logger == nil {
panic(errors.New("logger is nil"))
}
slogger := logger.With(zap.String("stage", "token-execution"))
seed, err := hex.DecodeString(cfg.Engine.GenesisSeed)
if err != nil {
panic(err)
slogger.Panic("failed to decode genesis seed", zap.Error(err))
}
intrinsicFilter := p2p.GetBloomFilter(application.TOKEN_ADDRESS, 256, 3)
@ -182,10 +183,10 @@ func NewTokenExecutionEngine(
if err := coinStore.SetMigrationVersion(
config.GetGenesis().GenesisSeedHex,
); err != nil {
panic(err)
slogger.Panic("failed to set migration version", zap.Error(err))
}
} else if err != nil {
panic(err)
slogger.Panic("failed to get data clock frame", zap.Error(err))
} else {
if pubSub.GetNetwork() == 0 {
err := coinStore.Migrate(
@ -193,7 +194,7 @@ func NewTokenExecutionEngine(
config.GetGenesis().GenesisSeedHex,
)
if err != nil {
panic(err)
slogger.Panic("failed to migrate coins", zap.Error(err))
}
_, err = clockStore.GetEarliestDataClockFrame(intrinsicFilter)
if err != nil && errors.Is(err, store.ErrNotFound) {
@ -216,29 +217,29 @@ func NewTokenExecutionEngine(
if len(peerSeniority) == 0 {
peerSeniority, err = clockStore.GetPeerSeniorityMap(intrinsicFilter)
if err != nil && !errors.Is(err, store.ErrNotFound) {
panic(err)
slogger.Panic("failed to get peer seniority map", zap.Error(err))
}
if len(peerSeniority) == 0 {
peerSeniority, err = RebuildPeerSeniority(uint(cfg.P2P.Network))
if err != nil {
panic(err)
slogger.Panic("failed to rebuild peer seniority", zap.Error(err))
}
txn, err := clockStore.NewTransaction(false)
if err != nil {
panic(err)
slogger.Panic("failed to create transaction", zap.Error(err))
}
err = clockStore.PutPeerSeniorityMap(txn, intrinsicFilter, peerSeniority)
if err != nil {
txn.Abort()
panic(err)
slogger.Panic("failed to put peer seniority map", zap.Error(err))
}
if err = txn.Commit(); err != nil {
txn.Abort()
panic(err)
slogger.Panic("failed to commit transaction", zap.Error(err))
}
}
} else {
@ -249,7 +250,7 @@ func NewTokenExecutionEngine(
e := &TokenExecutionEngine{
ctx: ctx,
cancel: cancel,
logger: logger.With(zap.String("stage", "token-execution")),
logger: slogger,
engineConfig: cfg.Engine,
keyManager: keyManager,
clockStore: clockStore,

View File

@ -499,7 +499,7 @@ func RebuildPeerSeniority(network uint) (map[string]uint64, error) {
// Creates a genesis state for the intrinsic
func CreateGenesisState(
logger *zap.Logger,
parentLogger *zap.Logger,
engineConfig *config.EngineConfig,
testProverKeys [][]byte,
inclusionProver qcrypto.InclusionProver,
@ -515,7 +515,7 @@ func CreateGenesisState(
[][]byte,
map[string]uint64,
) {
logger = logger.With(zap.String("stage", "create-genesis-state"))
logger := parentLogger.With(zap.String("stage", "create-genesis-state"))
genesis := config.GetGenesis()
if genesis == nil {
logger.Panic("genesis is nil")

View File

@ -59,11 +59,11 @@ func (pm *peerMonitor) run(ctx context.Context, logger *zap.Logger) {
logger.Debug("pinging connected peers", zap.Int("peer_count", len(peers)))
wg := &sync.WaitGroup{}
for _, id := range peers {
logger := logger.With(zap.String("peer_id", id.String()))
slogger := logger.With(zap.String("peer_id", id.String()))
for _, conn := range pm.h.Network().ConnsToPeer(id) {
logger := logger.With(zap.String("connection_id", conn.ID()))
sslogger := slogger.With(zap.String("connection_id", conn.ID()))
wg.Add(1)
go pm.ping(ctx, logger, wg, conn)
go pm.ping(ctx, sslogger, wg, conn)
}
}
wg.Wait()