improve logging (#426)

This commit is contained in:
Jiabao Qu 2025-04-06 10:22:48 +08:00 committed by GitHub
parent f2f3c90a1f
commit 0966faee9f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
40 changed files with 470 additions and 401 deletions

View File

@ -17,6 +17,7 @@ import (
"github.com/multiformats/go-multiaddr"
mn "github.com/multiformats/go-multiaddr/net"
"github.com/pkg/errors"
"go.uber.org/zap"
"golang.org/x/term"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
@ -24,6 +25,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/config"
qgrpc "source.quilibrium.com/quilibrium/monorepo/node/internal/grpc"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
)
var (
@ -434,6 +436,7 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
}
func (m model) View() string {
logger := utils.GetLogger().With(zap.String("stage", "db-console-view-model"))
physicalWidth, physicalHeight, _ := term.GetSize(int(os.Stdout.Fd()))
doc := strings.Builder{}
@ -510,7 +513,7 @@ func (m model) View() string {
} else if m.frame != nil {
selector, err := m.frame.GetSelector()
if err != nil {
panic(err)
logger.Panic("error getting selector", zap.Error(err))
}
committed := "Unconfirmed"
if m.committed {
@ -573,20 +576,21 @@ func consoleModel(
nodeConfig *config.Config,
grpcWarn bool,
) model {
logger := utils.GetLogger()
peerPrivKey, err := hex.DecodeString(nodeConfig.P2P.PeerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
logger.Panic("error decode peer private key", zap.Error(err))
}
privKey, err := crypto.UnmarshalEd448PrivateKey(peerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
logger.Panic("error unmarshaling peerkey", zap.Error(err))
}
pub := privKey.GetPublic()
id, err := peer.IDFromPublicKey(pub)
if err != nil {
panic(errors.Wrap(err, "error getting peer id"))
logger.Panic("error getting peer id", zap.Error(err))
}
return model{
@ -612,16 +616,17 @@ var defaultGrpcAddress = "localhost:8337"
// Connect to the node via GRPC
func ConnectToNode(nodeConfig *config.Config) (*grpc.ClientConn, error) {
logger := utils.GetLogger().With(zap.String("stage", "db-console-connect-to-node"))
addr := defaultGrpcAddress
if nodeConfig.ListenGRPCMultiaddr != "" {
ma, err := multiaddr.NewMultiaddr(nodeConfig.ListenGRPCMultiaddr)
if err != nil {
panic(err)
logger.Panic("error parsing multiaddr", zap.Error(err))
}
_, addr, err = mn.DialArgs(ma)
if err != nil {
panic(err)
logger.Panic("error getting dial args", zap.Error(err))
}
}
@ -673,9 +678,10 @@ func FetchNodeInfo(client protobufs.NodeServiceClient) (*protobufs.NodeInfoRespo
// Runs the DB console
func (c *DBConsole) Run() {
logger := utils.GetLogger().With(zap.String("stage", "db-console-run"))
conn, err := ConnectToNode(c.nodeConfig)
if err != nil {
panic(err)
logger.Panic("error connecting to node", zap.Error(err))
}
defer conn.Close()
@ -683,7 +689,7 @@ func (c *DBConsole) Run() {
p := tea.NewProgram(consoleModel(conn, c.nodeConfig, grpcWarn))
if _, err := p.Run(); err != nil {
panic(err)
logger.Panic("error running program", zap.Error(err))
}
}

View File

@ -17,6 +17,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/rpc"
"source.quilibrium.com/quilibrium/monorepo/node/store"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
)
type NodeMode string
@ -136,19 +137,21 @@ func nearestApplicablePowerOfTwo(number uint64) uint64 {
}
func (n *Node) VerifyProofIntegrity() {
logger := utils.GetLogger().With(zap.String("stage", "verify-proof-integrity"))
i, _, _, e := n.dataProofStore.GetLatestDataTimeProof(n.pubSub.GetPeerID())
if e != nil {
panic(e)
logger.Panic("failed to get latest data time proof", zap.Error(e))
}
dataProver := crypto.NewKZGInclusionProver(n.logger)
wesoProver := crypto.NewWesolowskiFrameProver(n.logger)
for j := int(i); j >= 0; j-- {
fmt.Println(j)
loggerWithIncrement := logger.With(zap.Int("increment", j))
loggerWithIncrement.Info("verifying proof")
_, parallelism, input, o, err := n.dataProofStore.GetDataTimeProof(n.pubSub.GetPeerID(), uint32(j))
if err != nil {
panic(err)
loggerWithIncrement.Panic("failed to get data time proof", zap.Error(err))
}
idx, idxProof, idxCommit, idxKP := GetOutputs(o)
@ -162,19 +165,19 @@ func (n *Node) VerifyProofIntegrity() {
nearestApplicablePowerOfTwo(uint64(parallelism)),
)
if err != nil {
panic(err)
loggerWithIncrement.Panic("failed to verify kzg proof", zap.Error(err))
}
if !v {
panic(fmt.Sprintf("bad kzg proof at increment %d", j))
loggerWithIncrement.Panic("bad kzg proof")
}
wp := []byte{}
wp = append(wp, n.pubSub.GetPeerID()...)
wp = append(wp, input...)
fmt.Printf("%x\n", wp)
loggerWithIncrement.Info("build weso proof", zap.String("wp", fmt.Sprintf("%x", wp)))
v = wesoProver.VerifyPreDuskChallengeProof(wp, uint32(j), idx, idxProof)
if !v {
panic(fmt.Sprintf("bad weso proof at increment %d", j))
loggerWithIncrement.Panic("bad weso proof")
}
}
}
@ -190,11 +193,12 @@ func (d *DHTNode) Stop() {
}
func (n *Node) Start() {
logger := utils.GetLogger()
switch n.mode {
case NormalNodeMode:
err := <-n.engine.Start()
if err != nil {
panic(err)
logger.Panic("failed to start engine", zap.Error(err))
}
// TODO: add config mapping to engine name/frame registration
@ -204,7 +208,7 @@ func (n *Node) Start() {
go func(e execution.ExecutionEngine) {
defer wg.Done()
if err := <-n.engine.RegisterExecutor(e, 0); err != nil {
panic(err)
logger.Panic("failed to register executor", zap.Error(err))
}
}(e)
}
@ -215,11 +219,12 @@ func (n *Node) Start() {
}
func (n *Node) Stop() {
logger := utils.GetLogger()
switch n.mode {
case NormalNodeMode:
err := <-n.engine.Stop(false)
if err != nil {
panic(err)
logger.Panic("failed to stop engine", zap.Error(err))
}
case StrictSyncNodeMode:
n.synchronizer.Stop()

View File

@ -18,8 +18,10 @@ import (
"github.com/cloudflare/circl/sign/ed448"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/pkg/errors"
"go.uber.org/zap"
"golang.org/x/crypto/sha3"
"gopkg.in/yaml.v2"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
)
type GRPCMessageLimitsConfig struct {
@ -226,9 +228,10 @@ func DownloadAndVerifyGenesis(network uint) (*SignedGenesisUnlock, error) {
return unlock, nil
}
logger := utils.GetLogger().With(zap.String("stage", "download-and-verify-genesis"))
resp, err := http.Get("https://releases.quilibrium.com/genesisunlock")
if err != nil || resp.StatusCode != 200 {
fmt.Println("Stasis lock not yet released.")
logger.Warn("Stasis lock not yet released.")
return nil, errors.New("stasis lock not yet released")
}
defer resp.Body.Close()
@ -273,14 +276,14 @@ func DownloadAndVerifyGenesis(network uint) (*SignedGenesisUnlock, error) {
opensslMsg := "SHA3-256(genesis)= " + hex.EncodeToString(digest[:])
if !ed448.Verify(pubkey, append([]byte(opensslMsg), 0x0a), sig, "") {
fmt.Printf("Failed signature check for signatory #%d\n", i)
logger.Warn("Failed signature check for signatory", zap.Int("signatory", i))
return nil, errors.New("failed signature check")
}
count++
}
if count < ((len(Signatories)-4)/2)+((len(Signatories)-4)%2) {
fmt.Printf("Quorum on signatures not met")
logger.Warn("Quorum on signatures not met")
return nil, errors.New("quorum on signatures not met")
}
@ -298,19 +301,20 @@ func LoadConfig(configPath string, proverKey string, skipGenesisCheck bool) (
*Config,
error,
) {
logger := utils.GetLogger().With(zap.String("stage", "load-config"))
info, err := os.Stat(configPath)
if os.IsNotExist(err) {
fmt.Println("Creating config directory " + configPath)
logger.Info("Creating config directory", zap.String("path", configPath))
if err = os.Mkdir(configPath, fs.FileMode(0700)); err != nil {
panic(err)
logger.Panic("Failed to create config directory", zap.Error(err))
}
} else {
if err != nil {
panic(err)
logger.Panic("Failed to stat config directory", zap.Error(err))
}
if !info.IsDir() {
panic(configPath + " is not a directory")
logger.Panic("Config path is not a directory", zap.String("path", configPath))
}
}
@ -359,24 +363,24 @@ func LoadConfig(configPath string, proverKey string, skipGenesisCheck bool) (
}
if saveDefaults {
fmt.Println("Generating default config...")
fmt.Println("Generating random host key...")
logger.Info("Generating default config...")
logger.Info("Generating random host key...")
privkey, _, err := crypto.GenerateEd448Key(rand.Reader)
if err != nil {
panic(err)
logger.Panic("Failed to generate host key", zap.Error(err))
}
hostKey, err := privkey.Raw()
if err != nil {
panic(err)
logger.Panic("Failed to get raw host key", zap.Error(err))
}
config.P2P.PeerPrivKey = hex.EncodeToString(hostKey)
fmt.Println("Generating keystore key...")
logger.Info("Generating keystore key...")
keystoreKey := make([]byte, 32)
if _, err := rand.Read(keystoreKey); err != nil {
panic(err)
logger.Panic("Failed to generate keystore key", zap.Error(err))
}
config.Key.KeyStoreFile.EncryptionKey = hex.EncodeToString(keystoreKey)
@ -390,9 +394,9 @@ func LoadConfig(configPath string, proverKey string, skipGenesisCheck bool) (
config.Engine.StatsMultiaddr = multiAddr
}
fmt.Println("Saving config...")
logger.Info("Saving config to", zap.String("path", configPath))
if err = SaveConfig(configPath, config); err != nil {
panic(err)
logger.Panic("Failed to save config", zap.Error(err))
}
keyfile, err := os.OpenFile(
@ -401,13 +405,13 @@ func LoadConfig(configPath string, proverKey string, skipGenesisCheck bool) (
fs.FileMode(0600),
)
if err != nil {
panic(err)
logger.Panic("Failed to open key file", zap.Error(err))
}
if proverKey != "" {
provingKey, err := hex.DecodeString(proverKey)
if err != nil {
panic(err)
logger.Panic("Failed to decode proving key", zap.Error(err))
}
iv := [12]byte{}
@ -440,7 +444,7 @@ func LoadConfig(configPath string, proverKey string, skipGenesisCheck bool) (
if file, err = os.Open(
filepath.Join(configPath, "config.yml"),
); err != nil {
panic(err)
logger.Panic("Failed to open config file", zap.Error(err))
}
}

View File

@ -11,6 +11,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/consensus/data/internal"
"source.quilibrium.com/quilibrium/monorepo/node/internal/frametime"
"source.quilibrium.com/quilibrium/monorepo/node/tries"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/libp2p/go-libp2p/core/peer"
@ -455,6 +456,7 @@ func (e *DataClockConsensusEngine) syncWithPeer(
func (e *DataClockConsensusEngine) initiateProvers(
latestFrame *protobufs.ClockFrame,
) {
logger := utils.GetLogger()
if latestFrame.Timestamp > time.Now().UnixMilli()-60000 {
if !e.IsInProverTrie(e.pubSub.GetPeerID()) {
e.logger.Info("announcing prover join")
@ -470,7 +472,7 @@ func (e *DataClockConsensusEngine) initiateProvers(
h, err := poseidon.HashBytes(e.pubSub.GetPeerID())
if err != nil {
panic(err)
logger.Panic("could not hash peer id", zap.Error(err))
}
peerProvingKeyAddress := h.FillBytes(make([]byte, 32))

View File

@ -247,7 +247,7 @@ func NewDataClockConsensusEngine(
ctx: ctx,
cancel: cancel,
difficulty: difficulty,
logger: logger,
logger: logger.With(zap.String("stage", "data-clock-consensus")),
state: consensus.EngineStateStopped,
clockStore: clockStore,
coinStore: coinStore,
@ -326,14 +326,14 @@ func (e *DataClockConsensusEngine) Start() <-chan error {
e.logger.Info("loading last seen state")
err := e.dataTimeReel.Start()
if err != nil {
panic(err)
e.logger.Panic("error starting data time reel", zap.Error(err))
}
e.frameProverTries = e.dataTimeReel.GetFrameProverTries()
err = e.createCommunicationKeys()
if err != nil {
panic(err)
e.logger.Panic("error creating communication keys", zap.Error(err))
}
e.wg.Add(4)
@ -398,7 +398,7 @@ func (e *DataClockConsensusEngine) Start() <-chan error {
var currentBackoff = 0
lastHead, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
e.logger.Panic("error getting head", zap.Error(err))
}
source := rand.New(rand.NewSource(rand.Int63()))
for {
@ -416,7 +416,7 @@ func (e *DataClockConsensusEngine) Start() <-chan error {
}
currentHead, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
e.logger.Panic("error getting head", zap.Error(err))
}
if currentHead.FrameNumber == lastHead.FrameNumber {
currentBackoff = min(maxBackoff, currentBackoff+1)
@ -434,12 +434,12 @@ func (e *DataClockConsensusEngine) Start() <-chan error {
thresholdBeforeConfirming := 4
frame, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
e.logger.Panic("error getting head", zap.Error(err))
}
for {
nextFrame, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
e.logger.Panic("error getting head", zap.Error(err))
}
if frame.FrameNumber-100 >= nextFrame.FrameNumber ||
@ -681,10 +681,10 @@ func (e *DataClockConsensusEngine) Stop(force bool) <-chan error {
FrameNumber: e.GetFrame().FrameNumber,
}
if err := pause.SignED448(e.pubSub.GetPublicKey(), e.pubSub.SignMessage); err != nil {
panic(err)
e.logger.Panic("error signing prover pause", zap.Error(err))
}
if err := pause.Validate(); err != nil {
panic(err)
e.logger.Panic("error validating prover pause", zap.Error(err))
}
if err := e.publishMessage(e.txFilter, pause.TokenRequest()); err != nil {
@ -699,7 +699,7 @@ func (e *DataClockConsensusEngine) Stop(force bool) <-chan error {
defer wg.Done()
frame, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
e.logger.Panic("error getting head", zap.Error(err))
}
err = <-e.UnregisterExecutor(name, frame.FrameNumber, force)

View File

@ -17,7 +17,7 @@ func (e *DataClockConsensusEngine) RegisterExecutor(
for {
dataFrame, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
e.logger.Panic("failed to get head frame", zap.Error(err))
}
logger.Info(
@ -59,7 +59,7 @@ func (e *DataClockConsensusEngine) UnregisterExecutor(
for {
dataFrame, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
e.logger.Panic("failed to get head frame", zap.Error(err))
}
logger.Info(

View File

@ -16,6 +16,7 @@ import (
"time"
"github.com/pkg/errors"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/store"
)
@ -237,7 +238,7 @@ func (e *DataClockConsensusEngine) applySnapshot(
false,
)
if err != nil {
fmt.Println("not found", max.FrameNumber+1)
e.logger.Error("snapshot not found", zap.Error(err), zap.Uint64("frame", max.FrameNumber+1))
temporaryStore.Close()
return errors.Wrap(
err,

View File

@ -26,12 +26,12 @@ func (
newTrie := &tries.RollingFrecencyCritbitTrie{}
b, err := trie.Serialize()
if err != nil {
panic(err)
e.logger.Panic("failed to serialize trie", zap.Error(err))
}
err = newTrie.Deserialize(b)
if err != nil {
panic(err)
e.logger.Panic("failed to deserialize trie", zap.Error(err))
}
frameProverTries[i] = newTrie
}
@ -49,10 +49,10 @@ func (e *DataClockConsensusEngine) GetFrameProverTrie(i int) *tries.RollingFrece
}
b, err := e.frameProverTries[i].Serialize()
if err != nil {
panic(err)
e.logger.Panic("failed to serialize trie", zap.Error(err))
}
if err := newTrie.Deserialize(b); err != nil {
panic(err)
e.logger.Panic("failed to deserialize trie", zap.Error(err))
}
return newTrie
}
@ -112,7 +112,7 @@ outer:
case <-time.After(1 * time.Minute):
head, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
e.logger.Panic("failed to get head frame", zap.Error(err))
}
if head.FrameNumber <= maxFrames ||
@ -193,14 +193,14 @@ func (e *DataClockConsensusEngine) runLoop() {
} else {
latestFrame, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
e.logger.Panic("failed to get head frame", zap.Error(err))
}
if runOnce {
if e.FrameProverTrieContains(0, e.provingKeyAddress) {
dataFrame, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
e.logger.Panic("failed to get head frame", zap.Error(err))
}
latestFrame = e.processFrame(latestFrame, dataFrame)

View File

@ -17,6 +17,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token/application"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
)
func (e *DataClockConsensusEngine) runFrameMessageHandler() {
@ -255,7 +256,7 @@ func (e *DataClockConsensusEngine) handleClockFrame(
head, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
e.logger.Panic("failed to get head frame", zap.Error(err))
}
if frame.FrameNumber > head.FrameNumber {
@ -475,7 +476,9 @@ func TokenRequestIdentifiers(transition *protobufs.TokenRequest) []string {
case *protobufs.TokenRequest_Resume:
return []string{fmt.Sprintf("resume-%x", t.Resume.GetPublicKeySignatureEd448().PublicKey.KeyValue)}
default:
panic("unhandled transition type")
utils.GetLogger().Panic("unhandled token request type",
zap.String("transitionRequestType", fmt.Sprintf("%T", t)))
panic("unhandled token request type")
}
}

View File

@ -6,6 +6,7 @@ import (
"time"
"github.com/libp2p/go-libp2p/core/peer"
"go.uber.org/zap"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
@ -115,7 +116,7 @@ func (e *DataClockConsensusEngine) validateTxMessage(peerID peer.ID, message *pb
head, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
e.logger.Panic("failed to get head", zap.Error(err))
}
if frameNumber+1 < head.FrameNumber {
return p2p.ValidationResultIgnore

View File

@ -23,22 +23,19 @@ func (e *DataClockConsensusEngine) GetProvingKey(
}
if err != nil {
e.logger.Error("could not get proving key", zap.Error(err))
panic(err)
e.logger.Panic("could not get proving key", zap.Error(err))
}
rawKey, err := e.keyManager.GetRawKey(engineConfig.ProvingKeyId)
if err != nil {
e.logger.Error("could not get proving key type", zap.Error(err))
panic(err)
e.logger.Panic("could not get proving key type", zap.Error(err))
}
provingKeyType := rawKey.Type
h, err := poseidon.HashBytes(rawKey.PublicKey)
if err != nil {
e.logger.Error("could not hash proving key", zap.Error(err))
panic(err)
e.logger.Panic("could not hash proving key", zap.Error(err))
}
provingKeyAddress := h.Bytes()

View File

@ -53,7 +53,7 @@ func (e *MasterClockConsensusEngine) handleClockFrameData(
head, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
e.logger.Panic("failed to get head", zap.Error(err))
}
if frame.FrameNumber < head.FrameNumber {

View File

@ -5,6 +5,7 @@ import (
"time"
"github.com/pkg/errors"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
@ -44,7 +45,7 @@ func (e *MasterClockConsensusEngine) GetMostAheadPeers() (
) {
frame, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
e.logger.Panic("failed to get head", zap.Error(err))
}
// Needs to be enough to make the sync worthwhile:
@ -74,7 +75,7 @@ func (e *MasterClockConsensusEngine) collect(
) (*protobufs.ClockFrame, error) {
latest, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
e.logger.Panic("failed to get head", zap.Error(err))
}
return latest, nil

View File

@ -17,7 +17,7 @@ func (e *MasterClockConsensusEngine) RegisterExecutor(
go func() {
masterFrame, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
logger.Panic("could not get master time reel head", zap.Error(err))
}
logger.Info(
@ -34,7 +34,7 @@ func (e *MasterClockConsensusEngine) RegisterExecutor(
for {
masterFrame, err = e.masterTimeReel.Head()
if err != nil {
panic(err)
logger.Panic("could not get master time reel head", zap.Error(err))
}
logger.Info(
@ -86,7 +86,7 @@ func (e *MasterClockConsensusEngine) UnregisterExecutor(
for {
masterFrame, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
logger.Panic("could not get master time reel head", zap.Error(err))
}
logger.Info(

View File

@ -119,7 +119,7 @@ func NewMasterClockConsensusEngine(
e := &MasterClockConsensusEngine{
difficulty: MASTER_CLOCK_RATE,
logger: logger,
logger: logger.With(zap.String("stage", "master-clock-consensus")),
state: consensus.EngineStateStopped,
keyManager: keyManager,
pubSub: pubSub,

View File

@ -32,7 +32,7 @@ func (e *MasterClockConsensusEngine) Sync(
masterFrame, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
e.logger.Panic("could not get master time reel head", zap.Error(err))
}
if masterFrame.FrameNumber < from {

View File

@ -123,7 +123,7 @@ func NewDataTimeReel(
return &DataTimeReel{
ctx: ctx,
cancel: cancel,
logger: logger,
logger: logger.With(zap.String("stage", "data-time-reel")),
filter: filter,
engineConfig: engineConfig,
clockStore: clockStore,
@ -412,7 +412,7 @@ func (d *DataTimeReel) storePending(
txn, err := d.clockStore.NewTransaction(false)
if err != nil {
panic(err)
d.logger.Panic("failed to create transaction", zap.Error(err))
}
err = d.clockStore.StageDataClockFrame(
selector.FillBytes(make([]byte, 32)),
@ -421,10 +421,10 @@ func (d *DataTimeReel) storePending(
)
if err != nil {
txn.Abort()
panic(err)
d.logger.Panic("failed to stage data clock frame", zap.Error(err))
}
if err = txn.Commit(); err != nil {
panic(err)
d.logger.Panic("failed to commit transaction", zap.Error(err))
}
}
}
@ -453,7 +453,7 @@ func (d *DataTimeReel) runLoop() {
if frameDone != nil {
close(frameDone)
}
panic(err)
d.logger.Panic("failed to get staged data clock frame", zap.Error(err))
}
d.logger.Info(
@ -472,7 +472,7 @@ func (d *DataTimeReel) runLoop() {
if frameDone != nil {
close(frameDone)
}
panic(err)
d.logger.Panic("failed to get distance", zap.Error(err))
}
}
@ -601,7 +601,7 @@ func (d *DataTimeReel) processPending(
headSelector, err := d.head.GetSelector()
if err != nil {
panic(err)
d.logger.Panic("failed to get head selector", zap.Error(err))
}
selectorHex := hex.EncodeToString(headSelector.Bytes())
@ -654,7 +654,7 @@ func (d *DataTimeReel) tryFillGap(startFrame, endFrame uint64) bool {
headSelector, err := d.head.GetSelector()
if err != nil {
panic(err)
d.logger.Panic("failed to get head selector", zap.Error(err))
}
currentSelector := headSelector
@ -741,7 +741,7 @@ func (d *DataTimeReel) tryFillGap(startFrame, endFrame uint64) bool {
currentSelector, err = rawFrame.GetSelector()
if err != nil {
panic(err)
d.logger.Panic("failed to get selector", zap.Error(err))
}
}
@ -794,7 +794,7 @@ func (d *DataTimeReel) processPendingChain() {
headSelector, err := d.head.GetSelector()
if err != nil {
panic(err)
d.logger.Panic("failed to get head selector", zap.Error(err))
}
var bestFrame *protobufs.ClockFrame
@ -900,7 +900,7 @@ func (d *DataTimeReel) setHead(
txn, err := d.clockStore.NewTransaction(false)
if err != nil {
panic(err)
d.logger.Panic("failed to create transaction", zap.Error(err))
}
d.logger.Info(
@ -911,7 +911,7 @@ func (d *DataTimeReel) setHead(
selector, err := frame.GetSelector()
if err != nil {
panic(err)
d.logger.Panic("failed to get selector", zap.Error(err))
}
_, tries, err := d.clockStore.GetDataClockFrame(
@ -937,11 +937,11 @@ func (d *DataTimeReel) setHead(
txn,
false,
); err != nil {
panic(err)
d.logger.Panic("failed to commit data clock frame", zap.Error(err))
}
if err = txn.Commit(); err != nil {
panic(err)
d.logger.Panic("failed to commit transaction", zap.Error(err))
}
d.proverTries = tries
@ -993,7 +993,7 @@ func (d *DataTimeReel) forkChoice(
)
_, selector, err := frame.GetParentAndSelector()
if err != nil {
panic(err)
d.logger.Panic("failed to get parent and selector", zap.Error(err))
}
leftIndex := d.head
@ -1028,7 +1028,7 @@ func (d *DataTimeReel) forkChoice(
d.logger.Info("cannot verify lineage, aborting fork choice")
return
} else {
panic(err)
d.logger.Panic("failed to get staged data clock frame", zap.Error(err))
}
}
@ -1036,7 +1036,7 @@ func (d *DataTimeReel) forkChoice(
rightIndexDistance, err := d.GetDistance(rightIndex)
if err != nil {
panic(err)
d.logger.Panic("failed to get distance", zap.Error(err))
}
// We accumulate right on left when right is longer because we cannot know
@ -1077,7 +1077,7 @@ func (d *DataTimeReel) forkChoice(
),
zap.Uint64("frame_number", leftIndex.FrameNumber-1),
)
panic(err)
d.logger.Panic("failed to get staged data clock frame", zap.Error(err))
}
rightIndex, err = d.clockStore.GetStagedDataClockFrame(
@ -1092,7 +1092,7 @@ func (d *DataTimeReel) forkChoice(
d.logger.Info("cannot verify full lineage, aborting fork choice")
return
} else {
panic(err)
d.logger.Panic("failed to get staged data clock frame", zap.Error(err))
}
}
@ -1101,12 +1101,12 @@ func (d *DataTimeReel) forkChoice(
leftIndexDistance, err := d.GetDistance(leftIndex)
if err != nil {
panic(err)
d.logger.Panic("failed to get distance", zap.Error(err))
}
rightIndexDistance, err := d.GetDistance(rightIndex)
if err != nil {
panic(err)
d.logger.Panic("failed to get distance", zap.Error(err))
}
leftTotal.Add(leftTotal, leftIndexDistance)
@ -1142,7 +1142,7 @@ func (d *DataTimeReel) forkChoice(
txn, err := d.clockStore.NewTransaction(false)
if err != nil {
panic(err)
d.logger.Panic("failed to create transaction", zap.Error(err))
}
if err := d.clockStore.CommitDataClockFrame(
@ -1153,11 +1153,11 @@ func (d *DataTimeReel) forkChoice(
txn,
rightIndex.FrameNumber < d.head.FrameNumber,
); err != nil {
panic(err)
d.logger.Panic("failed to commit data clock frame", zap.Error(err))
}
if err = txn.Commit(); err != nil {
panic(err)
d.logger.Panic("failed to commit transaction", zap.Error(err))
}
frameNumber++
@ -1165,7 +1165,7 @@ func (d *DataTimeReel) forkChoice(
txn, err := d.clockStore.NewTransaction(false)
if err != nil {
panic(err)
d.logger.Panic("failed to create transaction", zap.Error(err))
}
if err := d.clockStore.CommitDataClockFrame(
@ -1176,11 +1176,11 @@ func (d *DataTimeReel) forkChoice(
txn,
false,
); err != nil {
panic(err)
d.logger.Panic("failed to commit data clock frame", zap.Error(err))
}
if err = txn.Commit(); err != nil {
panic(err)
d.logger.Panic("failed to commit transaction", zap.Error(err))
}
d.head = frame
@ -1210,7 +1210,7 @@ func (d *DataTimeReel) forkChoice(
func (d *DataTimeReel) getTotalDistance(frame *protobufs.ClockFrame) *big.Int {
selector, err := frame.GetSelector()
if err != nil {
panic(err)
d.logger.Panic("failed to get selector", zap.Error(err))
}
existingTotal, err := d.clockStore.GetTotalDistance(

View File

@ -61,7 +61,7 @@ func NewMasterTimeReel(
}
return &MasterTimeReel{
logger: logger,
logger: logger.With(zap.String("stage", "master-time-reel")),
filter: filter,
engineConfig: engineConfig,
clockStore: clockStore,
@ -79,13 +79,13 @@ func (m *MasterTimeReel) Start() error {
m.logger.Debug("starting master time reel")
frame, err := m.clockStore.GetLatestMasterClockFrame(m.filter)
if err != nil && !errors.Is(err, store.ErrNotFound) {
panic(err)
m.logger.Panic("failed to get latest master clock frame", zap.Error(err))
}
m.logger.Debug("fetching genesis frame")
genesis, err := m.clockStore.GetMasterClockFrame(m.filter, 0)
if err != nil && !errors.Is(err, store.ErrNotFound) {
panic(err)
m.logger.Panic("failed to get genesis frame", zap.Error(err))
}
rebuildGenesisFrame := false
@ -94,7 +94,7 @@ func (m *MasterTimeReel) Start() error {
err = m.clockStore.ResetMasterClockFrames(m.filter)
if err != nil {
panic(err)
m.logger.Panic("failed to reset master clock frames", zap.Error(err))
}
rebuildGenesisFrame = true
@ -148,7 +148,7 @@ func (m *MasterTimeReel) Stop() {
func (m *MasterTimeReel) createGenesisFrame() *protobufs.ClockFrame {
seed, err := hex.DecodeString(m.engineConfig.GenesisSeed)
if err != nil {
panic(errors.New("genesis seed is nil"))
m.logger.Panic("failed to decode genesis seed, genesis seed is nil", zap.Error(err))
}
difficulty := m.engineConfig.Difficulty
@ -162,20 +162,20 @@ func (m *MasterTimeReel) createGenesisFrame() *protobufs.ClockFrame {
difficulty,
)
if err != nil {
panic(err)
m.logger.Panic("failed to create genesis frame", zap.Error(err))
}
txn, err := m.clockStore.NewTransaction(false)
if err != nil {
panic(err)
m.logger.Panic("failed to create transaction", zap.Error(err))
}
if err = m.clockStore.PutMasterClockFrame(frame, txn); err != nil {
panic(err)
m.logger.Panic("failed to put genesis frame", zap.Error(err))
}
if err = txn.Commit(); err != nil {
panic(err)
m.logger.Panic("failed to commit transaction", zap.Error(err))
}
return frame
@ -195,7 +195,7 @@ func (m *MasterTimeReel) runLoop() {
parent := new(big.Int).SetBytes(frame.ParentSelector)
selector, err := m.head.GetSelector()
if err != nil {
panic(err)
m.logger.Panic("failed to get selector", zap.Error(err))
}
// master frames cannot fork, this is invalid
@ -215,15 +215,15 @@ func (m *MasterTimeReel) runLoop() {
txn, err := m.clockStore.NewTransaction(false)
if err != nil {
panic(err)
m.logger.Panic("failed to create transaction", zap.Error(err))
}
if err := m.clockStore.PutMasterClockFrame(frame, txn); err != nil {
panic(err)
m.logger.Panic("failed to put frame", zap.Error(err))
}
if err = txn.Commit(); err != nil {
panic(err)
m.logger.Panic("failed to commit transaction", zap.Error(err))
}
m.head = frame
@ -264,7 +264,7 @@ func (m *MasterTimeReel) processPending() {
parent := new(big.Int).SetBytes(frame.ParentSelector)
selector, err := m.head.GetSelector()
if err != nil {
panic(err)
m.logger.Panic("failed to get selector", zap.Error(err))
}
// master frames cannot fork, this is invalid
@ -284,15 +284,15 @@ func (m *MasterTimeReel) processPending() {
txn, err := m.clockStore.NewTransaction(false)
if err != nil {
panic(err)
m.logger.Panic("failed to create transaction", zap.Error(err))
}
if err := m.clockStore.PutMasterClockFrame(frame, txn); err != nil {
panic(err)
m.logger.Panic("failed to put frame", zap.Error(err))
}
if err = txn.Commit(); err != nil {
panic(err)
m.logger.Panic("failed to commit transaction", zap.Error(err))
}
m.head = frame

View File

@ -9,7 +9,6 @@ import (
"crypto/subtle"
"encoding/binary"
"encoding/json"
"fmt"
"sort"
"github.com/pkg/errors"
@ -17,6 +16,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
)
const TRIPLE_RATCHET_PROTOCOL_VERSION = 1
@ -651,7 +651,7 @@ func (r *TripleRatchetParticipant) decryptHeader(
); err != nil {
return nil, false, errors.Wrap(err, "could not decrypt header")
}
fmt.Println("should ratchet")
utils.GetLogger().Info("should ratchet")
return header, true, nil
}

View File

@ -12,9 +12,11 @@ import (
"sync"
"github.com/pkg/errors"
"go.uber.org/zap"
"golang.org/x/crypto/sha3"
rbls48581 "source.quilibrium.com/quilibrium/monorepo/bls48581"
"source.quilibrium.com/quilibrium/monorepo/node/internal/runtime"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
)
type ShardKey struct {
@ -82,7 +84,7 @@ func (n *LazyVectorCommitmentLeafNode) Commit(
path,
n,
); err != nil {
panic(err)
utils.GetLogger().Panic("failed to insert node", zap.Error(err))
}
}
return n.Commitment
@ -129,6 +131,7 @@ func commitNode(
recalculate bool,
throttle chan struct{},
) []byte {
logger := utils.GetLogger().With(zap.String("stage", "lazy-proof-tree-commit"))
switch node := n.(type) {
case *LazyVectorCommitmentBranchNode:
if len(node.Commitment) != 0 && !recalculate {
@ -157,7 +160,7 @@ func commitNode(
childPath,
)
if err != nil && !strings.Contains(err.Error(), "item not found") {
panic(err)
logger.Panic("failed to get node by path", zap.Error(err))
}
}
if child != nil {
@ -195,7 +198,7 @@ func commitNode(
childPath,
)
if err != nil && !strings.Contains(err.Error(), "item not found") {
panic(err)
logger.Panic("failed to get node by path", zap.Error(err))
}
}
if child != nil {
@ -242,7 +245,7 @@ func commitNode(
path,
node,
); err != nil {
panic(err)
logger.Panic("failed to insert node", zap.Error(err))
}
return node.Commitment
case *LazyVectorCommitmentLeafNode:
@ -255,7 +258,7 @@ func commitNode(
func (n *LazyVectorCommitmentBranchNode) Verify(index int, proof []byte) bool {
data := []byte{}
if len(n.Commitment) == 0 {
panic("verify cannot be run on nil commitments")
utils.GetLogger().Panic("verify cannot be run on nil commitments")
} else {
child := n.Children[index]
if child != nil {
@ -372,6 +375,7 @@ func (t *LazyVectorCommitmentTree) Insert(
key, value, hashTarget []byte,
size *big.Int,
) error {
logger := utils.GetLogger().With(zap.String("stage", "lazy-proof-tree-insert"))
if len(key) == 0 {
return errors.New("empty key not allowed")
}
@ -395,7 +399,7 @@ func (t *LazyVectorCommitmentTree) Insert(
path,
)
if err != nil && !strings.Contains(err.Error(), "item not found") {
panic(err)
logger.Panic("failed to get node by path", zap.Error(err))
}
}
if node == nil {
@ -418,7 +422,7 @@ func (t *LazyVectorCommitmentTree) Insert(
)
if err != nil {
// todo: no panic
panic(err)
logger.Panic("failed to insert node", zap.Error(err))
}
return 1, newNode
} else {
@ -433,7 +437,7 @@ func (t *LazyVectorCommitmentTree) Insert(
slices.Concat(path, []int{i}),
)
if err != nil && !strings.Contains(err.Error(), "item not found") {
panic(err)
logger.Panic("failed to get node by path", zap.Error(err))
}
}
branch.FullyLoaded = true
@ -459,7 +463,7 @@ func (t *LazyVectorCommitmentTree) Insert(
)
if err != nil {
// todo: no panic
panic(err)
logger.Panic("failed to insert node", zap.Error(err))
}
return 0, n
}
@ -501,7 +505,7 @@ func (t *LazyVectorCommitmentTree) Insert(
)
if err != nil {
// todo: no panic
panic(err)
logger.Panic("failed to insert node", zap.Error(err))
}
err = t.Store.InsertNode(
@ -515,7 +519,7 @@ func (t *LazyVectorCommitmentTree) Insert(
)
if err != nil {
// todo: no panic
panic(err)
logger.Panic("failed to insert node", zap.Error(err))
}
err = t.Store.InsertNode(
@ -529,7 +533,7 @@ func (t *LazyVectorCommitmentTree) Insert(
)
if err != nil {
// todo: no panic
panic(err)
logger.Panic("failed to insert node", zap.Error(err))
}
return 1, branch
@ -572,7 +576,7 @@ func (t *LazyVectorCommitmentTree) Insert(
)
if err != nil {
// todo: no panic
panic(err)
logger.Panic("failed to insert node", zap.Error(err))
}
n.FullPrefix = slices.Concat(
@ -593,7 +597,7 @@ func (t *LazyVectorCommitmentTree) Insert(
)
if err != nil {
// todo: no panic
panic(err)
logger.Panic("failed to insert node", zap.Error(err))
}
err = t.Store.InsertNode(
@ -607,7 +611,7 @@ func (t *LazyVectorCommitmentTree) Insert(
)
if err != nil {
// todo: no panic
panic(err)
logger.Panic("failed to insert node", zap.Error(err))
}
return 1, newBranch
@ -649,7 +653,7 @@ func (t *LazyVectorCommitmentTree) Insert(
)
if err != nil {
// todo: no panic
panic(err)
logger.Panic("failed to insert node", zap.Error(err))
}
return delta, n
@ -685,7 +689,7 @@ func (t *LazyVectorCommitmentTree) Insert(
)
if err != nil {
// todo: no panic
panic(err)
logger.Panic("failed to insert node", zap.Error(err))
}
return delta, n
@ -860,7 +864,7 @@ func (t *LazyVectorCommitmentTree) Commit(recalculate bool) []byte {
err := t.Store.SaveRoot(t.SetType, t.PhaseType, t.ShardKey, t.Root)
if err != nil {
panic(err)
utils.GetLogger().Panic("failed to save root", zap.Error(err))
}
return commitment

View File

@ -5,6 +5,9 @@ import (
"fmt"
"slices"
"strings"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
)
// CompareTreesAtHeight compares two vector commitment trees at each level
@ -67,6 +70,7 @@ func compareLevelCommits(
node1, node2 LazyVectorCommitmentNode,
targetHeight, currentHeight int,
) []ComparisonResult {
logger := utils.GetLogger()
if node1 == nil && node2 == nil {
return nil
}
@ -82,7 +86,7 @@ func compareLevelCommits(
} else if bok {
commit1 = branch1.Commitment
} else {
panic("invalid node type")
logger.Panic("invalid node type")
}
}
if node2 != nil {
@ -93,7 +97,7 @@ func compareLevelCommits(
} else if bok {
commit2 = branch2.Commitment
} else {
panic("invalid node type")
logger.Panic("invalid node type")
}
}
@ -332,7 +336,7 @@ func GetAllLeaves(
slices.Concat(n.FullPrefix, []int{i}),
)
if err != nil && !strings.Contains(err.Error(), "item not found") {
panic(err)
utils.GetLogger().Panic("failed to get node by path", zap.Error(err))
}
}
if child != nil {

View File

@ -301,7 +301,7 @@ func (w *WesolowskiFrameProver) CreateDataGenesisFrame(
for i, s := range proverKeys {
addr, err := poseidon.HashBytes(s)
if err != nil {
panic(err)
w.logger.Panic("could not hash proving key", zap.Error(err))
}
addrBytes := addr.Bytes()

View File

@ -249,7 +249,7 @@ func NewTokenExecutionEngine(
e := &TokenExecutionEngine{
ctx: ctx,
cancel: cancel,
logger: logger,
logger: logger.With(zap.String("stage", "token-execution")),
engineConfig: cfg.Engine,
keyManager: keyManager,
clockStore: clockStore,
@ -329,7 +329,7 @@ func NewTokenExecutionEngine(
peerId := e.pubSub.GetPeerID()
addr, err := poseidon.HashBytes(peerId)
if err != nil {
panic(err)
e.logger.Panic("could not hash peer id", zap.Error(err))
}
addrBytes := addr.FillBytes(make([]byte, 32))
@ -347,7 +347,7 @@ func NewTokenExecutionEngine(
[]byte{0xff},
)
if err != nil {
panic(err)
e.logger.Panic("could not get range coins", zap.Error(err))
}
totalCoins := 0
@ -360,11 +360,11 @@ func NewTokenExecutionEngine(
}
start, err := hex.DecodeString(e.engineConfig.RebuildStart)
if err != nil {
panic(err)
e.logger.Panic("could not decode start", zap.Error(err))
}
end, err := hex.DecodeString(e.engineConfig.RebuildEnd)
if err != nil {
panic(err)
e.logger.Panic("could not decode end", zap.Error(err))
}
includeSet := [][]byte{}
@ -398,7 +398,7 @@ func NewTokenExecutionEngine(
}
if len(e.hypergraph.GetVertexAdds()) == 0 {
panic("hypergraph does not contain id set for application")
e.logger.Panic("hypergraph does not contain id set for application")
}
var vertices *hypergraph.IdSet
@ -407,7 +407,7 @@ func NewTokenExecutionEngine(
}
if vertices == nil {
panic("hypergraph does not contain id set for application")
e.logger.Panic("hypergraph does not contain id set for application")
}
rebuildSet := [][]byte{}
@ -517,10 +517,10 @@ func NewTokenExecutionEngine(
FrameNumber: f.FrameNumber,
}
if err := resume.SignED448(e.pubSub.GetPublicKey(), e.pubSub.SignMessage); err != nil {
panic(err)
e.logger.Panic("error signing resume message", zap.Error(err))
}
if err := resume.Validate(); err != nil {
panic(err)
e.logger.Panic("error validating resume message", zap.Error(err))
}
// need to wait for peering
@ -641,7 +641,7 @@ func (e *TokenExecutionEngine) addBatchToHypergraph(batchKey [][]byte, batchValu
batchTrees := make([]*qcrypto.VectorCommitmentTree, len(batchKey))
txn, err := e.hypergraphStore.NewTransaction(false)
if err != nil {
panic(err)
e.logger.Panic("could not create transaction", zap.Error(err))
}
for i, chunk := range batchValue {
@ -711,7 +711,7 @@ func (e *TokenExecutionEngine) addBatchToHypergraph(batchKey [][]byte, batchValu
err = e.hypergraphStore.SaveVertexTree(txn, id, vertTree)
if err != nil {
txn.Abort()
panic(err)
e.logger.Panic("could not save vertex tree", zap.Error(err))
}
}
@ -720,13 +720,13 @@ func (e *TokenExecutionEngine) addBatchToHypergraph(batchKey [][]byte, batchValu
txn,
batchCompressed[i],
); err != nil {
panic(err)
e.logger.Panic("could not add vertex", zap.Error(err))
}
}
if err := txn.Commit(); err != nil {
txn.Abort()
panic(err)
e.logger.Panic("could not commit transaction", zap.Error(err))
}
}
@ -889,7 +889,7 @@ func (e *TokenExecutionEngine) rebuildMissingSetForHypergraph(set [][]byte) {
frameNumber, coin, err := e.coinStore.GetCoinByAddress(nil, address)
if err != nil {
panic(err)
e.logger.Panic("could not get coin by address", zap.Error(err))
}
value := []byte{}
@ -920,7 +920,7 @@ func (e *TokenExecutionEngine) rebuildMissingSetForHypergraph(set [][]byte) {
txn, err := e.clockStore.NewTransaction(false)
if err != nil {
panic(err)
e.logger.Panic("could not create transaction", zap.Error(err))
}
e.logger.Info("committing hypergraph")
@ -934,7 +934,7 @@ func (e *TokenExecutionEngine) rebuildMissingSetForHypergraph(set [][]byte) {
if err = txn.Commit(); err != nil {
txn.Abort()
panic(err)
e.logger.Panic("could not commit transaction", zap.Error(err))
}
e.hypergraphStore.MarkHypergraphAsComplete()
}
@ -950,18 +950,18 @@ func (e *TokenExecutionEngine) rebuildHypergraph(totalRange int) {
}
start, err := hex.DecodeString(e.engineConfig.RebuildStart)
if err != nil {
panic(err)
e.logger.Panic("could not decode start", zap.Error(err))
}
end, err := hex.DecodeString(e.engineConfig.RebuildEnd)
if err != nil {
panic(err)
e.logger.Panic("could not decode end", zap.Error(err))
}
iter, err := e.coinStore.RangeCoins(
start,
end,
)
if err != nil {
panic(err)
e.logger.Panic("could not get range coins", zap.Error(err))
}
var batchKey, batchValue [][]byte
processed := 0
@ -974,7 +974,7 @@ func (e *TokenExecutionEngine) rebuildHypergraph(totalRange int) {
coin := &protobufs.Coin{}
err := proto.Unmarshal(iter.Value()[8:], coin)
if err != nil {
panic(err)
e.logger.Panic("could not unmarshal coin", zap.Error(err))
}
value := []byte{}
@ -1006,7 +1006,7 @@ func (e *TokenExecutionEngine) rebuildHypergraph(totalRange int) {
txn, err := e.clockStore.NewTransaction(false)
if err != nil {
panic(err)
e.logger.Panic("could not create transaction", zap.Error(err))
}
e.logger.Info("committing hypergraph")
@ -1020,7 +1020,7 @@ func (e *TokenExecutionEngine) rebuildHypergraph(totalRange int) {
if err = txn.Commit(); err != nil {
txn.Abort()
panic(err)
e.logger.Panic("could not commit transaction", zap.Error(err))
}
e.hypergraphStore.MarkHypergraphAsComplete()
}
@ -1049,12 +1049,12 @@ func (e *TokenExecutionEngine) Start() <-chan error {
go func() {
err := <-e.clock.Start()
if err != nil {
panic(err)
e.logger.Panic("could not start clock", zap.Error(err))
}
err = <-e.clock.RegisterExecutor(e, 0)
if err != nil {
panic(err)
e.logger.Panic("could not register executor", zap.Error(err))
}
errChan <- nil
@ -1202,7 +1202,7 @@ func (e *TokenExecutionEngine) ProcessFrame(
hg, err := e.hypergraphStore.LoadHypergraph()
if err != nil {
txn.Abort()
panic(err)
e.logger.Panic("could not load hypergraph", zap.Error(err))
}
for i, output := range app.TokenOutputs.Outputs {
@ -1253,7 +1253,7 @@ func (e *TokenExecutionEngine) ProcessFrame(
)
if err != nil {
txn.Abort()
panic(err)
e.logger.Panic("could not commit and save vertex data", zap.Error(err))
}
if err := hg.AddVertex(
@ -1266,7 +1266,7 @@ func (e *TokenExecutionEngine) ProcessFrame(
),
); err != nil {
txn.Abort()
panic(err)
e.logger.Panic("could not add vertex", zap.Error(err))
}
case *protobufs.TokenOutput_DeletedCoin:
_, coin, err := e.coinStore.GetCoinByAddress(nil, o.DeletedCoin.Address)
@ -1319,7 +1319,7 @@ func (e *TokenExecutionEngine) ProcessFrame(
)
if err != nil {
txn.Abort()
panic(err)
e.logger.Panic("could not commit and save vertex data", zap.Error(err))
}
}
@ -1333,7 +1333,7 @@ func (e *TokenExecutionEngine) ProcessFrame(
),
); err != nil {
txn.Abort()
panic(err)
e.logger.Panic("could not remove vertex", zap.Error(err))
}
case *protobufs.TokenOutput_Proof:
address, err := outputAddresses[i], outputAddressErrors[i]
@ -2157,23 +2157,23 @@ func (e *TokenExecutionEngine) AnnounceProverMerge() *protobufs.AnnounceProverRe
for _, conf := range e.engineConfig.MultisigProverEnrollmentPaths {
extraConf, err := config.LoadConfig(conf, "", false)
if err != nil {
panic(err)
e.logger.Panic("error loading config", zap.Error(err))
}
peerPrivKey, err := hex.DecodeString(extraConf.P2P.PeerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
e.logger.Panic("error loading config", zap.Error(err))
}
privKey, err := pcrypto.UnmarshalEd448PrivateKey(peerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
e.logger.Panic("error unmarshaling peerkey", zap.Error(err))
}
pub := privKey.GetPublic()
pubBytes, err := pub.Raw()
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
e.logger.Panic("error unmarshaling peerkey", zap.Error(err))
}
helpers = append(helpers, protobufs.ED448SignHelper{
@ -2185,10 +2185,10 @@ func (e *TokenExecutionEngine) AnnounceProverMerge() *protobufs.AnnounceProverRe
announce := &protobufs.AnnounceProverRequest{}
if err := announce.SignED448(helpers); err != nil {
panic(err)
e.logger.Panic("error signing announce", zap.Error(err))
}
if err := announce.Validate(); err != nil {
panic(err)
e.logger.Panic("error validating announce", zap.Error(err))
}
return announce
@ -2207,10 +2207,10 @@ func (e *TokenExecutionEngine) AnnounceProverJoin() {
Announce: e.AnnounceProverMerge(),
}
if err := join.SignED448(e.pubSub.GetPublicKey(), e.pubSub.SignMessage); err != nil {
panic(err)
e.logger.Panic("error signing join", zap.Error(err))
}
if err := join.Validate(); err != nil {
panic(err)
e.logger.Panic("error validating join", zap.Error(err))
}
if err := e.publishMessage(

View File

@ -515,14 +515,15 @@ func CreateGenesisState(
[][]byte,
map[string]uint64,
) {
logger = logger.With(zap.String("stage", "create-genesis-state"))
genesis := config.GetGenesis()
if genesis == nil {
panic("genesis is nil")
logger.Panic("genesis is nil")
}
seed, err := hex.DecodeString(engineConfig.GenesisSeed)
if err != nil {
panic(err)
logger.Panic("failed to decode genesis seed", zap.Error(err))
}
logger.Info("creating genesis frame from message:")
@ -558,32 +559,32 @@ func CreateGenesisState(
err = json.Unmarshal(bridgedPeersJsonBinary, &bridged)
if err != nil {
panic(err)
logger.Panic("failed to unmarshal bridged peers json", zap.Error(err))
}
err = json.Unmarshal(ceremonyVouchersJsonBinary, &vouchers)
if err != nil {
panic(err)
logger.Panic("failed to unmarshal ceremony vouchers json", zap.Error(err))
}
err = json.Unmarshal(firstRetroJsonBinary, &firstRetro)
if err != nil {
panic(err)
logger.Panic("failed to unmarshal first retro json", zap.Error(err))
}
err = json.Unmarshal(secondRetroJsonBinary, &secondRetro)
if err != nil {
panic(err)
logger.Panic("failed to unmarshal second retro json", zap.Error(err))
}
err = json.Unmarshal(thirdRetroJsonBinary, &thirdRetro)
if err != nil {
panic(err)
logger.Panic("failed to unmarshal third retro json", zap.Error(err))
}
err = json.Unmarshal(fourthRetroJsonBinary, &fourthRetro)
if err != nil {
panic(err)
logger.Panic("failed to unmarshal fourth retro json", zap.Error(err))
}
bridgedAddrs := map[string]struct{}{}
@ -593,7 +594,7 @@ func CreateGenesisState(
for _, b := range bridged {
amt, err := decimal.NewFromString(b.Amount)
if err != nil {
panic(err)
logger.Panic("failed to parse bridged amount", zap.Error(err))
}
bridgeTotal = bridgeTotal.Add(amt)
bridgedAddrs[b.Identifier] = struct{}{}
@ -610,7 +611,7 @@ func CreateGenesisState(
if _, ok := bridgedAddrs[f.PeerId]; !ok {
peerIdTotals[f.PeerId], err = decimal.NewFromString(f.Reward)
if err != nil {
panic(err)
logger.Panic("failed to parse first retro amount", zap.Error(err))
}
}
@ -618,7 +619,7 @@ func CreateGenesisState(
max := 157208
actual, err := strconv.Atoi(f.Reward)
if err != nil {
panic(err)
logger.Panic("failed to parse first retro amount", zap.Error(err))
}
peerSeniority[addrBytes] = uint64(10 * 6 * 60 * 24 * 92 / (max / actual))
@ -642,7 +643,7 @@ func CreateGenesisState(
amount, err := decimal.NewFromString(f.Reward)
if err != nil {
panic(err)
logger.Panic("failed to parse second retro amount", zap.Error(err))
}
if !ok {
@ -687,7 +688,7 @@ func CreateGenesisState(
amount, err := decimal.NewFromString(f.Reward)
if err != nil {
panic(err)
logger.Panic("failed to parse third retro amount", zap.Error(err))
}
if !ok {
@ -713,7 +714,7 @@ func CreateGenesisState(
amount, err := decimal.NewFromString(f.Reward)
if err != nil {
panic(err)
logger.Panic("failed to parse fourth retro amount", zap.Error(err))
}
if !ok {
@ -736,7 +737,7 @@ func CreateGenesisState(
factor, _ := decimal.NewFromString("8000000000")
bridgeAddressHex, err := hex.DecodeString(BridgeAddress)
if err != nil {
panic(err)
logger.Panic("failed to decode bridge address", zap.Error(err))
}
totalExecutions := 0
@ -778,12 +779,12 @@ func CreateGenesisState(
}
peerBytes, err := base58.Decode(peerId)
if err != nil {
panic(err)
logger.Panic("failed to decode peer id", zap.Error(err))
}
addr, err := poseidon.HashBytes(peerBytes)
if err != nil {
panic(err)
logger.Panic("failed to hash peer id", zap.Error(err))
}
genesisState.Outputs = append(genesisState.Outputs, &protobufs.TokenOutput{
@ -818,12 +819,12 @@ func CreateGenesisState(
}
keyBytes, err := hex.DecodeString(voucher[2:])
if err != nil {
panic(err)
logger.Panic("failed to decode voucher", zap.Error(err))
}
addr, err := poseidon.HashBytes(keyBytes)
if err != nil {
panic(err)
logger.Panic("failed to hash voucher", zap.Error(err))
}
genesisState.Outputs = append(genesisState.Outputs, &protobufs.TokenOutput{
@ -856,12 +857,12 @@ func CreateGenesisState(
txn, err := coinStore.NewTransaction(false)
for _, output := range genesisState.Outputs {
if err != nil {
panic(err)
logger.Panic("failed to create transaction", zap.Error(err))
}
address, err := GetAddressOfCoin(output.GetCoin(), 0, 0)
if err != nil {
panic(err)
logger.Panic("failed to get address of coin", zap.Error(err))
}
err = coinStore.PutCoin(
txn,
@ -870,7 +871,7 @@ func CreateGenesisState(
output.GetCoin(),
)
if err != nil {
panic(err)
logger.Panic("failed to put coin", zap.Error(err))
}
value := []byte{}
@ -901,7 +902,7 @@ func CreateGenesisState(
)
if err != nil {
txn.Abort()
panic(err)
logger.Panic("failed to commit and save vertex data", zap.Error(err))
}
if err := hg.AddVertex(
@ -914,16 +915,16 @@ func CreateGenesisState(
),
); err != nil {
txn.Abort()
panic(err)
logger.Panic("failed to add vertex", zap.Error(err))
}
}
if err := txn.Commit(); err != nil {
panic(err)
logger.Panic("failed to commit transaction", zap.Error(err))
}
txn, err = clockStore.NewTransaction(false)
if err != nil {
panic(err)
logger.Panic("failed to create transaction", zap.Error(err))
}
err = clockStore.PutPeerSeniorityMap(
@ -932,20 +933,20 @@ func CreateGenesisState(
map[string]uint64{},
)
if err != nil {
panic(err)
logger.Panic("failed to put peer seniority map", zap.Error(err))
}
intrinsicFilter := p2p.GetBloomFilter(application.TOKEN_ADDRESS, 256, 3)
if err = txn.Commit(); err != nil {
panic(err)
logger.Panic("failed to commit transaction", zap.Error(err))
}
logger.Info("encoded transcript")
outputBytes, err := proto.Marshal(genesisState)
if err != nil {
panic(err)
logger.Panic("failed to marshal genesis state", zap.Error(err))
}
executionOutput := &protobufs.IntrinsicExecutionOutput{
@ -956,20 +957,20 @@ func CreateGenesisState(
data, err := proto.Marshal(executionOutput)
if err != nil {
panic(err)
logger.Panic("failed to marshal execution output", zap.Error(err))
}
logger.Debug("encoded execution output")
digest := sha3.NewShake256()
_, err = digest.Write(data)
if err != nil {
panic(err)
logger.Panic("failed to write digest", zap.Error(err))
}
expand := make([]byte, 1024)
_, err = digest.Read(expand)
if err != nil {
panic(err)
logger.Panic("failed to read digest", zap.Error(err))
}
commitment, err := inclusionProver.CommitRaw(
@ -977,7 +978,7 @@ func CreateGenesisState(
16,
)
if err != nil {
panic(err)
logger.Panic("failed to commit raw", zap.Error(err))
}
logger.Debug("creating kzg proof")
@ -987,7 +988,7 @@ func CreateGenesisState(
16,
)
if err != nil {
panic(err)
logger.Panic("failed to prove raw", zap.Error(err))
}
logger.Info("finalizing execution proof")
@ -1014,7 +1015,7 @@ func CreateGenesisState(
addr, err := poseidon.HashBytes(genesis.Beacon)
if err != nil {
panic(err)
logger.Panic("failed to hash beacon", zap.Error(err))
}
factor, _ := new(big.Int).SetString("8000000000", 10)
@ -1041,12 +1042,12 @@ func CreateGenesisState(
txn, err := coinStore.NewTransaction(false)
for _, output := range genesisState.Outputs {
if err != nil {
panic(err)
logger.Panic("failed to create transaction", zap.Error(err))
}
address, err := GetAddressOfCoin(output.GetCoin(), 0, 0)
if err != nil {
panic(err)
logger.Panic("failed to get address of coin", zap.Error(err))
}
err = coinStore.PutCoin(
txn,
@ -1055,11 +1056,11 @@ func CreateGenesisState(
output.GetCoin(),
)
if err != nil {
panic(err)
logger.Panic("failed to put coin", zap.Error(err))
}
coinBytes, err := proto.Marshal(output.GetCoin())
if err != nil {
panic(err)
logger.Panic("failed to marshal coin", zap.Error(err))
}
data := []byte{}
@ -1080,7 +1081,7 @@ func CreateGenesisState(
)
if err != nil {
txn.Abort()
panic(err)
logger.Panic("failed to commit and save vertex data", zap.Error(err))
}
if err := hg.AddVertex(
@ -1093,20 +1094,20 @@ func CreateGenesisState(
),
); err != nil {
txn.Abort()
panic(err)
logger.Panic("failed to add vertex", zap.Error(err))
}
}
intrinsicFilter := p2p.GetBloomFilter(application.TOKEN_ADDRESS, 256, 3)
if err := txn.Commit(); err != nil {
panic(err)
logger.Panic("failed to commit transaction", zap.Error(err))
}
logger.Info("encoded transcript")
outputBytes, err := proto.Marshal(genesisState)
if err != nil {
panic(err)
logger.Panic("failed to marshal genesis state", zap.Error(err))
}
executionOutput := &protobufs.IntrinsicExecutionOutput{
@ -1117,20 +1118,20 @@ func CreateGenesisState(
data, err := proto.Marshal(executionOutput)
if err != nil {
panic(err)
logger.Panic("failed to marshal execution output", zap.Error(err))
}
logger.Debug("encoded execution output")
digest := sha3.NewShake256()
_, err = digest.Write(data)
if err != nil {
panic(err)
logger.Panic("failed to write digest", zap.Error(err))
}
expand := make([]byte, 1024)
_, err = digest.Read(expand)
if err != nil {
panic(err)
logger.Panic("failed to read digest", zap.Error(err))
}
commitment, err := inclusionProver.CommitRaw(
@ -1138,7 +1139,7 @@ func CreateGenesisState(
16,
)
if err != nil {
panic(err)
logger.Panic("failed to commit raw", zap.Error(err))
}
logger.Debug("creating kzg proof")
@ -1148,7 +1149,7 @@ func CreateGenesisState(
16,
)
if err != nil {
panic(err)
logger.Panic("failed to prove raw", zap.Error(err))
}
logger.Info("finalizing execution proof")

View File

@ -1,6 +1,10 @@
package runtime
import "runtime"
import (
"runtime"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
)
const minimumCores = 3
@ -8,13 +12,14 @@ const minimumCores = 3
// It will use GOMAXPROCS as a base, and then subtract a number of CPUs
// which are meant to be left for other tasks, such as networking.
func WorkerCount(requested int, validate bool) int {
logger := utils.GetLogger()
n := runtime.GOMAXPROCS(0)
if validate {
if n < minimumCores {
panic("invalid system configuration, must have at least 3 cores")
logger.Panic("invalid system configuration, must have at least 3 cores")
}
if requested > 0 && requested < minimumCores {
panic("invalid worker count, must have at least 3 workers")
logger.Panic("invalid worker count, must have at least 3 workers")
}
}
if requested > 0 {

View File

@ -33,12 +33,12 @@ func NewFileKeyManager(
logger *zap.Logger,
) *FileKeyManager {
if keyStoreConfig.KeyStoreFile == nil {
panic("key store config missing")
logger.Panic("key store config missing")
}
key, err := hex.DecodeString(keyStoreConfig.KeyStoreFile.EncryptionKey)
if err != nil {
panic(err)
logger.Panic("could not decode encryption key", zap.Error(err))
}
store := make(map[string]Key)
@ -55,7 +55,7 @@ func NewFileKeyManager(
os.FileMode(0600),
)
if err != nil {
panic(err)
logger.Panic("could not open store", zap.Error(err))
}
defer file.Close()
@ -63,7 +63,7 @@ func NewFileKeyManager(
d := yaml.NewDecoder(file)
if err := d.Decode(store); err != nil {
panic(err)
logger.Panic("could not decode store", zap.Error(err))
}
return &FileKeyManager{

View File

@ -10,7 +10,6 @@ import (
"flag"
"fmt"
"io/fs"
"log"
"math/big"
"net/http"
npprof "net/http/pprof"
@ -161,7 +160,9 @@ func signatureCheckDefault() bool {
if err == nil {
return def
} else {
fmt.Println("Invalid environment variable QUILIBRIUM_SIGNATURE_CHECK, must be 'true' or 'false'. Got: " + envVarValue)
utils.GetLogger().Error(
"Invalid environment variable QUILIBRIUM_SIGNATURE_CHECK, must be 'true' or 'false'.",
zap.String("envVarValue", envVarValue))
}
}
@ -170,47 +171,43 @@ func signatureCheckDefault() bool {
func main() {
flag.Parse()
logger := utils.GetLogger()
if *signatureCheck {
sLogger := logger.With(zap.String("stage", "signature-check"))
if runtime.GOOS == "windows" {
fmt.Println("Signature check not available for windows yet, skipping...")
sLogger.Info("Signature check not available for windows yet, skipping...")
} else {
ex, err := os.Executable()
if err != nil {
panic(err)
sLogger.Panic("Failed to get executable path", zap.Error(err), zap.String("executable", ex))
}
b, err := os.ReadFile(ex)
if err != nil {
fmt.Println(
"Error encountered during signature check are you running this " +
"from source? (use --signature-check=false)",
)
panic(err)
sLogger.Panic("Error encountered during signature check are you running this "+
"from source? (use --signature-check=false)",
zap.Error(err))
}
checksum := sha3.Sum256(b)
digest, err := os.ReadFile(ex + ".dgst")
if err != nil {
fmt.Println("Digest file not found")
os.Exit(1)
sLogger.Fatal("Digest file not found", zap.Error(err))
}
parts := strings.Split(string(digest), " ")
if len(parts) != 2 {
fmt.Println("Invalid digest file format")
os.Exit(1)
sLogger.Fatal("Invalid digest file format")
}
digestBytes, err := hex.DecodeString(parts[1][:64])
if err != nil {
fmt.Println("Invalid digest file format")
os.Exit(1)
sLogger.Fatal("Invalid digest file format", zap.Error(err))
}
if !bytes.Equal(checksum[:], digestBytes) {
fmt.Println("Invalid digest for node")
os.Exit(1)
sLogger.Fatal("Invalid digest for node")
}
count := 0
@ -224,21 +221,19 @@ func main() {
pubkey, _ := hex.DecodeString(config.Signatories[i-1])
if !ed448.Verify(pubkey, digest, sig, "") {
fmt.Printf("Failed signature check for signatory #%d\n", i)
os.Exit(1)
sLogger.Fatal("Failed signature check for signatory", zap.Int("signatory", i))
}
count++
}
if count < ((len(config.Signatories)-4)/2)+((len(config.Signatories)-4)%2) {
fmt.Printf("Quorum on signatures not met")
os.Exit(1)
sLogger.Fatal("Quorum on signatures not met")
}
fmt.Println("Signature check passed")
sLogger.Info("Signature check passed")
}
} else {
fmt.Println("Signature check disabled, skipping...")
logger.Info("Signature check disabled, skipping...")
}
if *memprofile != "" && *core == 0 {
@ -247,7 +242,7 @@ func main() {
time.Sleep(5 * time.Minute)
f, err := os.Create(*memprofile)
if err != nil {
log.Fatal(err)
logger.Fatal("Failed to create memory profile file", zap.Error(err))
}
pprof.WriteHeapProfile(f)
f.Close()
@ -258,7 +253,7 @@ func main() {
if *cpuprofile != "" && *core == 0 {
f, err := os.Create(*cpuprofile)
if err != nil {
log.Fatal(err)
logger.Fatal("Failed to create CPU profile file", zap.Error(err))
}
defer f.Close()
pprof.StartCPUProfile(f)
@ -273,7 +268,7 @@ func main() {
mux.HandleFunc("/debug/pprof/profile", npprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", npprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", npprof.Trace)
log.Fatal(http.ListenAndServe(*pprofServer, mux))
logger.Fatal("Failed to start pprof server", zap.Error(http.ListenAndServe(*pprofServer, mux)))
}()
}
@ -281,14 +276,14 @@ func main() {
go func() {
mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.Handler())
log.Fatal(http.ListenAndServe(*prometheusServer, mux))
logger.Fatal("Failed to start prometheus server", zap.Error(http.ListenAndServe(*prometheusServer, mux)))
}()
}
if *balance {
config, err := config.LoadConfig(*configDirectory, "", false)
if err != nil {
panic(err)
logger.Fatal("Failed to load config", zap.Error(err))
}
printBalance(config)
@ -299,7 +294,7 @@ func main() {
if *peerId {
config, err := config.LoadConfig(*configDirectory, "", false)
if err != nil {
panic(err)
logger.Fatal("Failed to load config", zap.Error(err))
}
printPeerID(config.P2P)
@ -309,18 +304,18 @@ func main() {
if *importPrivKey != "" {
config, err := config.LoadConfig(*configDirectory, *importPrivKey, false)
if err != nil {
panic(err)
logger.Fatal("Failed to load config", zap.Error(err))
}
printPeerID(config.P2P)
fmt.Println("Import completed, you are ready for the launch.")
logger.Info("Import completed, you are ready for the launch.")
return
}
if *nodeInfo {
config, err := config.LoadConfig(*configDirectory, "", false)
if err != nil {
panic(err)
logger.Fatal("Failed to load config", zap.Error(err))
}
printNodeInfo(config)
@ -335,27 +330,26 @@ func main() {
nodeConfig, err := config.LoadConfig(*configDirectory, "", false)
if err != nil {
panic(err)
logger.Fatal("Failed to load config", zap.Error(err))
}
if *compactDB && *core == 0 {
db := store.NewPebbleDB(nodeConfig.DB)
if err := db.CompactAll(); err != nil {
panic(err)
logger.Fatal("Failed to compact database", zap.Error(err))
}
if err := db.Close(); err != nil {
panic(err)
logger.Fatal("Failed to close database", zap.Error(err))
}
return
}
if *network != 0 {
if nodeConfig.P2P.BootstrapPeers[0] == config.BootstrapPeers[0] {
fmt.Println(
logger.Fatal(
"Node has specified to run outside of mainnet but is still " +
"using default bootstrap list. This will fail. Exiting.",
)
os.Exit(1)
}
nodeConfig.Engine.GenesisSeed = fmt.Sprintf(
@ -364,7 +358,7 @@ func main() {
nodeConfig.Engine.GenesisSeed,
)
nodeConfig.P2P.Network = uint8(*network)
fmt.Println(
logger.Warn(
"Node is operating outside of mainnet be sure you intended to do this.",
)
}
@ -379,7 +373,7 @@ func main() {
if *dbConsole {
console, err := app.NewDBConsole(nodeConfig)
if err != nil {
panic(err)
logger.Panic("Failed to start database console", zap.Error(err))
}
console.Run()
@ -391,7 +385,7 @@ func main() {
signal.Notify(done, syscall.SIGINT, syscall.SIGTERM)
dht, err := app.NewDHTNode(nodeConfig)
if err != nil {
panic(err)
logger.Error("Failed to start DHT node", zap.Error(err))
}
go func() {
@ -406,8 +400,7 @@ func main() {
if len(nodeConfig.Engine.DataWorkerMultiaddrs) == 0 {
maxProcs, numCPU := runtime.GOMAXPROCS(0), runtime.NumCPU()
if maxProcs > numCPU && !nodeConfig.Engine.AllowExcessiveGOMAXPROCS {
fmt.Println("GOMAXPROCS is set higher than the number of available CPUs.")
os.Exit(1)
logger.Fatal("GOMAXPROCS is set higher than the number of available CPUs.")
}
nodeConfig.Engine.DataWorkerCount = qruntime.WorkerCount(
@ -419,12 +412,7 @@ func main() {
rdebug.SetMemoryLimit(nodeConfig.Engine.DataWorkerMemoryLimit)
if *parentProcess == 0 && len(nodeConfig.Engine.DataWorkerMultiaddrs) == 0 {
panic("parent process pid not specified")
}
l, err := zap.NewProduction()
if err != nil {
panic(err)
logger.Fatal("parent process pid not specified")
}
rpcMultiaddr := fmt.Sprintf(
@ -438,19 +426,19 @@ func main() {
srv, err := rpc.NewDataWorkerIPCServer(
rpcMultiaddr,
l,
logger,
uint32(*core)-1,
qcrypto.NewWesolowskiFrameProver(l),
qcrypto.NewWesolowskiFrameProver(logger),
nodeConfig,
*parentProcess,
)
if err != nil {
panic(err)
logger.Panic("Failed to start data worker server", zap.Error(err))
}
err = srv.Start()
if err != nil {
panic(err)
logger.Panic("Failed to start data worker server", zap.Error(err))
}
return
} else {
@ -461,11 +449,15 @@ func main() {
}
switch availableOverhead := totalMemory - dataWorkerReservedMemory; {
case totalMemory < dataWorkerReservedMemory:
fmt.Println("The memory allocated to data workers exceeds the total system memory.")
fmt.Println("You are at risk of running out of memory during runtime.")
logger.Warn("The memory allocated to data workers exceeds the total system memory.",
zap.Int64("totalMemory", totalMemory),
zap.Int64("dataWorkerReservedMemory", dataWorkerReservedMemory),
)
logger.Warn("You are at risk of running out of memory during runtime.")
case availableOverhead < 8*1024*1024*1024:
fmt.Println("The memory available to the node, unallocated to the data workers, is less than 8GiB.")
fmt.Println("You are at risk of running out of memory during runtime.")
logger.Warn("The memory available to the node, unallocated to the data workers, is less than 8GiB.",
zap.Int64("availableOverhead", availableOverhead))
logger.Warn("You are at risk of running out of memory during runtime.")
default:
if _, explicitGOMEMLIMIT := os.LookupEnv("GOMEMLIMIT"); !explicitGOMEMLIMIT {
rdebug.SetMemoryLimit(availableOverhead * 8 / 10)
@ -476,7 +468,7 @@ func main() {
}
}
fmt.Println("Loading ceremony state and starting node...")
logger.Info("Loading ceremony state and starting node...")
if !*integrityCheck {
go spawnDataWorkers(nodeConfig)
@ -506,7 +498,7 @@ func main() {
if *debug && *strictSyncServer == "" && *strictSyncClient == "" {
node, err = app.NewDebugNode(nodeConfig, report)
} else if *strictSyncServer != "" {
fmt.Println("Running in strict sync server mode, will not connect to regular p2p network...")
logger.Info("Running in strict sync server mode, will not connect to regular p2p network...")
node, err = app.NewStrictSyncNode(
nodeConfig,
@ -517,7 +509,7 @@ func main() {
),
)
} else if *strictSyncClient != "" {
fmt.Println("Running in strict sync client mode, will not connect to regular p2p network...")
logger.Info("Running in strict sync client mode, will not connect to regular p2p network...")
node, err = app.NewStrictSyncNode(
nodeConfig,
@ -529,13 +521,13 @@ func main() {
}
if err != nil {
panic(err)
logger.Panic("Failed to start node", zap.Error(err))
}
if *integrityCheck {
fmt.Println("Running integrity check...")
logger.Info("Running integrity check...")
node.VerifyProofIntegrity()
fmt.Println("Integrity check passed!")
logger.Info("Integrity check passed!")
return
}
@ -559,10 +551,10 @@ func main() {
node.GetExecutionEngines(),
)
if err != nil {
panic(err)
logger.Panic("Failed to new RPC server", zap.Error(err))
}
if err := srv.Start(); err != nil {
panic(err)
logger.Panic("Failed to start RPC server", zap.Error(err))
}
defer srv.Stop()
}
@ -573,20 +565,19 @@ func main() {
var dataWorkers []*exec.Cmd
func spawnDataWorkers(nodeConfig *config.Config) {
logger := utils.GetLogger().With(zap.String("stage", "spawn-data-worker"))
if len(nodeConfig.Engine.DataWorkerMultiaddrs) != 0 {
fmt.Println(
"Data workers configured by multiaddr, be sure these are running...",
)
logger.Warn("Data workers configured by multiaddr, be sure these are running...")
return
}
process, err := os.Executable()
if err != nil {
panic(err)
logger.Panic("Failed to get executable path", zap.Error(err))
}
dataWorkers = make([]*exec.Cmd, nodeConfig.Engine.DataWorkerCount)
fmt.Printf("Spawning %d data workers...\n", nodeConfig.Engine.DataWorkerCount)
logger.Info("Spawning data workers", zap.Int("count", nodeConfig.Engine.DataWorkerCount))
for i := 1; i <= nodeConfig.Engine.DataWorkerCount; i++ {
i := i
@ -602,13 +593,15 @@ func spawnDataWorkers(nodeConfig *config.Config) {
cmd.Stderr = os.Stdout
err := cmd.Start()
if err != nil {
panic(err)
logger.Panic("Failed to start data worker",
zap.String("cmd", cmd.String()),
zap.Error(err))
}
dataWorkers[i-1] = cmd
cmd.Wait()
time.Sleep(25 * time.Millisecond)
fmt.Printf("Data worker %d stopped, restarting...\n", i)
logger.Info("Data worker stopped, restarting...", zap.Int("worker-number", i))
}
}()
}
@ -618,9 +611,9 @@ func stopDataWorkers() {
for i := 0; i < len(dataWorkers); i++ {
err := dataWorkers[i].Process.Signal(os.Kill)
if err != nil {
fmt.Printf(
"fatal: unable to kill worker with pid %d, please kill this process!\n",
dataWorkers[i].Process.Pid,
utils.GetLogger().Info("unable to kill worker",
zap.Int("pid", dataWorkers[i].Process.Pid),
zap.Error(err),
)
}
}
@ -630,7 +623,7 @@ func RunSelfTestIfNeeded(
configDir string,
nodeConfig *config.Config,
) *protobufs.SelfTestReport {
logger, _ := zap.NewProduction()
logger := utils.GetLogger()
cores := runtime.GOMAXPROCS(0)
if len(nodeConfig.Engine.DataWorkerMultiaddrs) != 0 {
@ -642,7 +635,7 @@ func RunSelfTestIfNeeded(
if d == nil {
err := os.Mkdir(filepath.Join(configDir, "store"), 0755)
if err != nil {
panic(err)
logger.Panic("Failed to create store directory", zap.Error(err))
}
}
@ -661,7 +654,7 @@ func RunSelfTestIfNeeded(
}
reportBytes, err := proto.Marshal(report)
if err != nil {
panic(err)
logger.Panic("Failed to marshal self test report", zap.Error(err))
}
err = os.WriteFile(
@ -670,19 +663,20 @@ func RunSelfTestIfNeeded(
fs.FileMode(0600),
)
if err != nil {
panic(err)
logger.Panic("Failed to write self test report", zap.Error(err))
}
return report
}
func clearIfTestData(configDir string, nodeConfig *config.Config) {
logger := utils.GetLogger().With(zap.String("stage", "clear-test-data"))
_, err := os.Stat(filepath.Join(configDir, "RELEASE_VERSION"))
if os.IsNotExist(err) {
fmt.Println("Clearing test data...")
logger.Info("Clearing test data...")
err := os.RemoveAll(nodeConfig.DB.Path)
if err != nil {
panic(err)
logger.Panic("Failed to remove test data", zap.Error(err))
}
versionFile, err := os.OpenFile(
@ -691,30 +685,30 @@ func clearIfTestData(configDir string, nodeConfig *config.Config) {
fs.FileMode(0600),
)
if err != nil {
panic(err)
logger.Panic("Failed to open RELEASE_VERSION file", zap.Error(err))
}
_, err = versionFile.Write([]byte{0x01, 0x00, 0x00})
if err != nil {
panic(err)
logger.Panic("Failed to write RELEASE_VERSION file", zap.Error(err))
}
err = versionFile.Close()
if err != nil {
panic(err)
logger.Panic("Failed to close RELEASE_VERSION file", zap.Error(err))
}
}
}
func printBalance(config *config.Config) {
logger := utils.GetLogger()
if config.ListenGRPCMultiaddr == "" {
_, _ = fmt.Fprintf(os.Stderr, "gRPC Not Enabled, Please Configure\n")
os.Exit(1)
logger.Fatal("gRPC Not Enabled, Please Configure")
}
conn, err := app.ConnectToNode(config)
if err != nil {
panic(err)
logger.Panic("Connect to node failed", zap.Error(err))
}
defer conn.Close()
@ -722,7 +716,7 @@ func printBalance(config *config.Config) {
balance, err := app.FetchTokenBalance(client)
if err != nil {
panic(err)
logger.Panic("Failed to fetch token balance", zap.Error(err))
}
conversionFactor, _ := new(big.Int).SetString("1DCD65000", 16)
@ -732,20 +726,23 @@ func printBalance(config *config.Config) {
}
func getPeerID(p2pConfig *config.P2PConfig) peer.ID {
logger := utils.GetLogger()
peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
logger.Panic("Error to decode peer private key",
zap.Error(errors.Wrap(err, "error unmarshaling peerkey")))
}
privKey, err := crypto.UnmarshalEd448PrivateKey(peerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
logger.Panic("Error to unmarshal ed448 private key",
zap.Error(errors.Wrap(err, "error unmarshaling peerkey")))
}
pub := privKey.GetPublic()
id, err := peer.IDFromPublicKey(pub)
if err != nil {
panic(errors.Wrap(err, "error getting peer id"))
logger.Panic("Error to get peer id", zap.Error(err))
}
return id
@ -758,17 +755,16 @@ func printPeerID(p2pConfig *config.P2PConfig) {
}
func printNodeInfo(cfg *config.Config) {
logger := utils.GetLogger()
if cfg.ListenGRPCMultiaddr == "" {
_, _ = fmt.Fprintf(os.Stderr, "gRPC Not Enabled, Please Configure\n")
os.Exit(1)
logger.Fatal("gRPC Not Enabled, Please Configure")
}
printPeerID(cfg.P2P)
conn, err := app.ConnectToNode(cfg)
if err != nil {
fmt.Println("Could not connect to node. If it is still booting, please wait.")
os.Exit(1)
logger.Fatal("Could not connect to node. If it is still booting, please wait.", zap.Error(err))
}
defer conn.Close()
@ -776,7 +772,7 @@ func printNodeInfo(cfg *config.Config) {
nodeInfo, err := app.FetchNodeInfo(client)
if err != nil {
panic(err)
logger.Panic("Failed to fetch node info", zap.Error(err))
}
fmt.Println("Version: " + config.FormatVersion(nodeInfo.Version))

View File

@ -4,7 +4,9 @@ import (
"math/big"
"sort"
"go.uber.org/zap"
"golang.org/x/crypto/sha3"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
)
func GetOnesIndices(input []byte) []int {
@ -47,17 +49,18 @@ func GetBloomFilter(data []byte, bitLength int, k int) []byte {
// GetBloomFilterIndices returns the indices of a bloom filter, in increasing
// order, assuming bitLength is a multiple of 32 as in GetBloomFilter.
func GetBloomFilterIndices(data []byte, bitLength int, k int) []byte {
logger := utils.GetLogger().With(zap.String("stage", "get-bloom-filter-indices"))
size := big.NewInt(int64(bitLength)).BitLen() - 1
h := sha3.NewShake256()
_, err := h.Write(data)
if err != nil {
panic(err)
logger.Panic("could not write to hash", zap.Error(err))
}
digest := make([]byte, size*k)
_, err = h.Read(digest)
if err != nil {
panic(err)
logger.Panic("could not read from hash", zap.Error(err))
}
indices := []string{}

View File

@ -47,6 +47,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/internal/observability"
"source.quilibrium.com/quilibrium/monorepo/node/p2p/internal"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
)
const (
@ -88,20 +89,21 @@ var BITMASK_ALL = []byte{
var ANNOUNCE_PREFIX = "quilibrium-2.0.2-dusk-"
func getPeerID(p2pConfig *config.P2PConfig) peer.ID {
logger := utils.GetLogger().With(zap.String("stage", "get-peer-id"))
peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
logger.Panic("error unmarshaling peerkey", zap.Error(err))
}
privKey, err := crypto.UnmarshalEd448PrivateKey(peerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
logger.Panic("error unmarshaling peerkey", zap.Error(err))
}
pub := privKey.GetPublic()
id, err := peer.IDFromPublicKey(pub)
if err != nil {
panic(errors.Wrap(err, "error getting peer id"))
logger.Panic("error getting peer id", zap.Error(err))
}
return id
@ -121,7 +123,7 @@ func NewBlossomSubStreamer(
peerinfo, err := peer.AddrInfoFromString("/ip4/185.209.178.191/udp/8336/quic-v1/p2p/QmcKQjpQmLpbDsiif2MuakhHFyxWvqYauPsJDaXnLav7PJ")
if err != nil {
panic(err)
logger.Panic("error getting peer info", zap.Error(err))
}
bootstrappers = append(bootstrappers, *peerinfo)
@ -130,12 +132,12 @@ func NewBlossomSubStreamer(
if p2pConfig.PeerPrivKey != "" {
peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
logger.Panic("error unmarshaling peerkey", zap.Error(err))
}
privKey, err = crypto.UnmarshalEd448PrivateKey(peerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
logger.Panic("error unmarshaling peerkey", zap.Error(err))
}
opts = append(opts, libp2p.Identity(privKey))
@ -152,7 +154,7 @@ func NewBlossomSubStreamer(
h, err := libp2p.New(opts...)
if err != nil {
panic(errors.Wrap(err, "error constructing p2p"))
logger.Panic("error constructing p2p", zap.Error(err))
}
logger.Info("established peer id", zap.String("peer_id", h.ID().String()))
@ -193,7 +195,7 @@ func NewBlossomSub(
for _, peerAddr := range config.BootstrapPeers {
peerinfo, err := peer.AddrInfoFromString(peerAddr)
if err != nil {
panic(err)
logger.Panic("error getting peer info", zap.Error(err))
}
if bytes.Equal([]byte(peerinfo.ID), []byte(peerId)) {
@ -205,7 +207,7 @@ func NewBlossomSub(
for _, peerAddr := range p2pConfig.BootstrapPeers {
peerinfo, err := peer.AddrInfoFromString(peerAddr)
if err != nil {
panic(err)
logger.Panic("error getting peer info", zap.Error(err))
}
if bytes.Equal([]byte(peerinfo.ID), []byte(peerId)) {
@ -226,7 +228,7 @@ func NewBlossomSub(
for _, peerAddr := range defaultBootstrapPeers {
peerinfo, err := peer.AddrInfoFromString(peerAddr)
if err != nil {
panic(err)
logger.Panic("error getting peer info", zap.Error(err))
}
bootstrappers = append(bootstrappers, *peerinfo)
@ -236,12 +238,12 @@ func NewBlossomSub(
if p2pConfig.PeerPrivKey != "" {
peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
logger.Panic("error unmarshaling peerkey", zap.Error(err))
}
privKey, err = crypto.UnmarshalEd448PrivateKey(peerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
logger.Panic("error unmarshaling peerkey", zap.Error(err))
}
opts = append(opts, libp2p.Identity(privKey))
@ -256,7 +258,7 @@ func NewBlossomSub(
for _, peerAddr := range p2pConfig.DirectPeers {
peerinfo, err := peer.AddrInfoFromString(peerAddr)
if err != nil {
panic(err)
logger.Panic("error getting peer info", zap.Error(err))
}
logger.Info("adding direct peer", zap.String("peer", peerinfo.ID.String()))
directPeers = append(directPeers, *peerinfo)
@ -272,7 +274,7 @@ func NewBlossomSub(
connmgr.WithEmergencyTrim(true),
)
if err != nil {
panic(err)
logger.Panic("error creating connection manager", zap.Error(err))
}
rm, err := resourceManager(
@ -280,7 +282,7 @@ func NewBlossomSub(
allowedPeers,
)
if err != nil {
panic(err)
logger.Panic("error creating resource manager", zap.Error(err))
}
opts = append(
@ -305,7 +307,7 @@ func NewBlossomSub(
h, err := libp2p.New(opts...)
if err != nil {
panic(errors.Wrap(err, "error constructing p2p"))
logger.Panic("error constructing p2p", zap.Error(err))
}
idService := internal.IDServiceFromHost(h)
@ -313,7 +315,7 @@ func NewBlossomSub(
reachabilitySub, err := h.EventBus().Subscribe(&event.EvtLocalReachabilityChanged{}, eventbus.Name("blossomsub"))
if err != nil {
panic(err)
logger.Panic("error subscribing to reachability events", zap.Error(err))
}
go func() {
defer reachabilitySub.Close()
@ -366,7 +368,7 @@ func NewBlossomSub(
internal.NewStaticPeerSource(bootstrappers, true),
)
if err := bootstrap.Connect(ctx); err != nil {
panic(err)
logger.Panic("error connecting to bootstrap peers", zap.Error(err))
}
bootstrap = internal.NewConditionalPeerConnector(
ctx,
@ -393,7 +395,7 @@ func NewBlossomSub(
),
)
if err := discovery.Connect(ctx); err != nil {
panic(err)
logger.Panic("error connecting to discovery peers", zap.Error(err))
}
discovery = internal.NewChainedPeerConnector(ctx, bootstrap, discovery)
bs.discovery = discovery
@ -418,7 +420,7 @@ func NewBlossomSub(
} else {
tracer, err = blossomsub.NewJSONTracer(p2pConfig.TraceLogFile)
if err != nil {
panic(errors.Wrap(err, "error building file tracer"))
logger.Panic("error building file tracer", zap.Error(err))
}
}
@ -484,7 +486,7 @@ func NewBlossomSub(
blossomOpts = append(blossomOpts, rt.WithDefaultTagTracer())
pubsub, err := blossomsub.NewBlossomSubWithRouter(ctx, h, rt, blossomOpts...)
if err != nil {
panic(err)
logger.Panic("error creating pubsub", zap.Error(err))
}
peerID := h.ID()
@ -794,10 +796,10 @@ func initDHT(
opts...,
)
if err != nil {
panic(err)
logger.Panic("error creating dht", zap.Error(err))
}
if err := kademliaDHT.Bootstrap(ctx); err != nil {
panic(err)
logger.Panic("error bootstrapping dht", zap.Error(err))
}
return kademliaDHT
}

View File

@ -97,19 +97,19 @@ func NewDataWorkerIPCServer(
) (*DataWorkerIPCServer, error) {
peerPrivKey, err := hex.DecodeString(config.P2P.PeerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
logger.Panic("error decoding peerkey", zap.Error(err))
}
privKey, err := pcrypto.UnmarshalEd448PrivateKey(peerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
logger.Panic("error unmarshaling peerkey", zap.Error(err))
}
pub := privKey.GetPublic()
pubKey, err := pub.Raw()
if err != nil {
panic(err)
logger.Panic("error getting public key", zap.Error(err))
}
digest := make([]byte, 128)
@ -117,7 +117,7 @@ func NewDataWorkerIPCServer(
s.Write([]byte(pubKey))
_, err = s.Read(digest)
if err != nil {
panic(err)
logger.Panic("error reading digest", zap.Error(err))
}
indices := p2p.GetOnesIndices(p2p.GetBloomFilter(digest, 1024, 64))
@ -159,8 +159,7 @@ func (r *DataWorkerIPCServer) Start() error {
zap.String("address", r.listenAddrGRPC),
)
if err := s.Serve(mn.NetListener(lis)); err != nil {
r.logger.Error("terminating server", zap.Error(err))
panic(err)
r.logger.Panic("terminating server", zap.Error(err))
}
return nil

View File

@ -28,6 +28,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/internal/grpc"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
)
type Synchronizer interface {
@ -54,7 +55,7 @@ func NewStandaloneHypersyncServer(
) Synchronizer {
listenAddr, err := multiaddr.NewMultiaddr(strictSyncServer)
if err != nil {
panic(err)
utils.GetLogger().Panic("failed to parse listen address", zap.Error(err))
}
return &StandaloneHypersyncServer{
@ -71,7 +72,7 @@ func NewStandaloneHypersyncClient(
) Synchronizer {
serverAddr, err := multiaddr.NewMultiaddr(strictSyncClient)
if err != nil {
panic(err)
utils.GetLogger().Panic("failed to parse server address", zap.Error(err))
}
return &StandaloneHypersyncClient{
@ -87,7 +88,7 @@ func (s *StandaloneHypersyncServer) Start(
) {
lis, err := mn.Listen(s.listenAddr)
if err != nil {
panic(err)
logger.Panic("failed to listen", zap.Error(err))
}
s.grpcServer = grpc.NewServer(
@ -98,7 +99,7 @@ func (s *StandaloneHypersyncServer) Start(
hypergraphStore := store.NewPebbleHypergraphStore(s.dbConfig, db, logger)
hypergraph, err := hypergraphStore.LoadHypergraph()
if err != nil {
panic(err)
logger.Panic("failed to load hypergraph", zap.Error(err))
}
logger.Info("calculating existing hypergraph root commit")
@ -118,7 +119,7 @@ func (s *StandaloneHypersyncServer) Start(
[]byte{0xff},
)
if err != nil {
panic(err)
logger.Panic("failed to range coins", zap.Error(err))
}
for iter.First(); iter.Valid(); iter.Next() {
@ -165,7 +166,7 @@ func (s *StandaloneHypersyncClient) Start(
hypergraphStore := store.NewPebbleHypergraphStore(s.dbConfig, db, logger)
hypergraph, err := hypergraphStore.LoadHypergraph()
if err != nil {
panic(err)
logger.Panic("failed to load hypergraph", zap.Error(err))
}
logger.Info("calculating existing hypergraph root commit")
@ -185,7 +186,7 @@ func (s *StandaloneHypersyncClient) Start(
[]byte{0xff},
)
if err != nil {
panic(err)
logger.Panic("failed to range coins", zap.Error(err))
}
for iter.First(); iter.Valid(); iter.Next() {
@ -200,7 +201,7 @@ func (s *StandaloneHypersyncClient) Start(
_, addr, err := mn.DialArgs(s.serverAddr)
if err != nil {
panic(err)
logger.Panic("failed to parse server address", zap.Error(err))
}
credentials := insecure.NewCredentials()
@ -216,7 +217,7 @@ func (s *StandaloneHypersyncClient) Start(
),
)
if err != nil {
panic(err)
logger.Panic("failed to dial server", zap.Error(err))
}
client := protobufs.NewHypergraphComparisonServiceClient(cc)
@ -466,7 +467,7 @@ func getNodeAtPath(
slices.Concat(n.FullPrefix, []int{int(childIndex)}),
)
if err != nil && !strings.Contains(err.Error(), "item not found") {
panic(err)
utils.GetLogger().Panic("failed to get node by path", zap.Error(err))
}
if child == nil {
@ -540,7 +541,7 @@ func getBranchInfoFromTree(
slices.Concat(branch.FullPrefix, []int{i}),
)
if err != nil && !strings.Contains(err.Error(), "item not found") {
panic(err)
utils.GetLogger().Panic("failed to get node by path", zap.Error(err))
}
}
if child != nil {

View File

@ -324,12 +324,12 @@ func (r *RPCServer) GetTokenInfo(
peerBytes := r.pubSub.GetPeerID()
peerAddr, err := poseidon.HashBytes(peerBytes)
if err != nil {
panic(err)
r.logger.Panic("failed to hash peer id", zap.Error(err))
}
addr, err := poseidon.HashBytes(provingKey.PublicKey)
if err != nil {
panic(err)
r.logger.Panic("failed to hash proving key", zap.Error(err))
}
addrBytes := addr.FillBytes(make([]byte, 32))
@ -337,12 +337,12 @@ func (r *RPCServer) GetTokenInfo(
_, _, coins, err := r.coinStore.GetCoinsForOwner(addrBytes)
if err != nil {
panic(err)
r.logger.Panic("failed to get coins for owner", zap.Error(err))
}
_, _, otherCoins, err := r.coinStore.GetCoinsForOwner(peerAddrBytes)
if err != nil {
panic(err)
r.logger.Panic("failed to get coins for owner", zap.Error(err))
}
total := big.NewInt(0)

View File

@ -1287,7 +1287,7 @@ func (p *PebbleClockStore) Compact(
selector, err := frame.GetSelector()
if err != nil {
panic(err)
p.logger.Panic("failed to get selector from frame", zap.Error(err))
}
parents = append(parents,
@ -1302,7 +1302,7 @@ func (p *PebbleClockStore) Compact(
selector, err := frame.GetSelector()
if err != nil {
panic(err)
p.logger.Panic("failed to get selector from frame", zap.Error(err))
}
err = p.db.Set(

View File

@ -70,7 +70,7 @@ func NewPebbleCoinStore(
) *PebbleCoinStore {
return &PebbleCoinStore{
db,
logger,
logger.With(zap.String("stage", "pebble-coin-store")),
}
}
@ -443,12 +443,12 @@ func (p *PebbleCoinStore) SetMigrationVersion(
err = txn.Set(migrationKey(), []byte{0x02, 0x00, 0x01, 0x04})
if err != nil {
panic(err)
p.logger.Panic("failed to set migration version", zap.Error(err))
}
err = txn.Set(genesisSeedKey(), seed)
if err != nil {
panic(err)
p.logger.Panic("failed to set genesis seed", zap.Error(err))
}
return txn.Commit()
@ -470,7 +470,7 @@ func (p *PebbleCoinStore) internalMigrate(
),
)
if err != nil {
panic(err)
p.logger.Panic("failed to delete range", zap.Error(err))
}
err = p.db.DeleteRange(
coinKey(
@ -481,7 +481,7 @@ func (p *PebbleCoinStore) internalMigrate(
),
)
if err != nil {
panic(err)
p.logger.Panic("failed to delete range", zap.Error(err))
}
err = p.db.DeleteRange(
proofByOwnerKey(
@ -494,7 +494,7 @@ func (p *PebbleCoinStore) internalMigrate(
),
)
if err != nil {
panic(err)
p.logger.Panic("failed to delete range", zap.Error(err))
}
err = p.db.DeleteRange(
proofKey(
@ -505,13 +505,13 @@ func (p *PebbleCoinStore) internalMigrate(
),
)
if err != nil {
panic(err)
p.logger.Panic("failed to delete range", zap.Error(err))
}
if err := p.db.Delete(clockDataEarliestIndex(filter)); err != nil {
panic(err)
p.logger.Panic("failed to delete range", zap.Error(err))
}
if err := p.db.Delete(clockDataLatestIndex(filter)); err != nil {
panic(err)
p.logger.Panic("failed to delete range", zap.Error(err))
}
txn, err := p.NewTransaction(false)
@ -521,12 +521,12 @@ func (p *PebbleCoinStore) internalMigrate(
err = txn.Set(migrationKey(), []byte{0x02, 0x00, 0x01, 0x04})
if err != nil {
panic(err)
p.logger.Panic("failed to set migration version", zap.Error(err))
}
err = txn.Set(genesisSeedKey(), genesisSeed)
if err != nil {
panic(err)
p.logger.Panic("failed to set genesis seed", zap.Error(err))
}
return txn.Commit()
@ -564,13 +564,13 @@ func (p *PebbleCoinStore) Migrate(filter []byte, genesisSeedHex string) error {
err = txn.Set(migrationKey(), []byte{0x02, 0x00, 0x01, 0x04})
if err != nil {
panic(err)
p.logger.Panic("failed to set migration version", zap.Error(err))
}
return txn.Commit()
} else {
defer closer.Close()
if len(status) == 4 && bytes.Compare(status, []byte{0x02, 0x00, 0x01, 0x04}) > 0 {
panic("database has been migrated to a newer version, do not rollback")
p.logger.Panic("database has been migrated to a newer version, do not rollback")
} else if len(status) == 3 || bytes.Compare(status, []byte{0x02, 0x00, 0x01, 0x04}) < 0 {
return p.internalMigrate(filter, seed)
}

View File

@ -2,7 +2,6 @@ package store
import (
"encoding/binary"
"fmt"
"math/big"
"github.com/cockroachdb/pebble"
@ -11,6 +10,7 @@ import (
"golang.org/x/crypto/sha3"
"google.golang.org/protobuf/proto"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
)
type DataProofStore interface {
@ -243,13 +243,14 @@ func internalListAggregateProofKeys(
commitment []byte,
frameNumber uint64,
) ([][]byte, [][]byte, [][]byte, error) {
logger := utils.GetLogger()
proofs := [][]byte{dataProofMetadataKey(filter, commitment)}
commits := [][]byte{}
data := [][]byte{}
value, closer, err := db.Get(dataProofMetadataKey(filter, commitment))
if err != nil {
fmt.Println("proof lookup failed")
logger.Error("proof lookup failed", zap.Error(err))
if errors.Is(err, pebble.ErrNotFound) {
return nil, nil, nil, ErrNotFound
@ -268,7 +269,7 @@ func internalListAggregateProofKeys(
dataProofInclusionKey(filter, commitment, limit+1),
)
if err != nil {
fmt.Println("inclusion lookup failed")
logger.Error("inclusion lookup failed", zap.Error(err))
return nil, nil, nil, errors.Wrap(err, "list aggregate proof")
}
@ -606,7 +607,7 @@ func (p *PebbleDataProofStore) RewindToIncrement(
for j := uint32(0); j <= increment; j++ {
_, parallelism, _, _, err := p.GetDataTimeProof(peerId, uint32(j))
if err != nil {
panic(err)
p.logger.Panic("Failed to get data time proof", zap.Error(err))
}
pomwBasis := big.NewInt(1200000)

View File

@ -17,6 +17,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/hypergraph/application"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
)
type HypergraphStore interface {
@ -1118,7 +1119,7 @@ func (p *PebbleHypergraphStore) GetNodeByPath(
func generateSlices(fullpref, pref []int) [][]int {
result := [][]int{}
if len(pref) > len(fullpref) {
panic("invalid prefix length")
utils.GetLogger().Panic("invalid prefix length")
}
for i := len(pref); i <= len(fullpref); i++ {
newSlice := make([]int, i)

View File

@ -5,7 +5,9 @@ import (
"github.com/cockroachdb/pebble"
"github.com/pkg/errors"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
)
type PebbleDB struct {
@ -22,7 +24,7 @@ func NewPebbleDB(config *config.DBConfig) *PebbleDB {
}
db, err := pebble.Open(config.Path, opts)
if err != nil {
panic(err)
utils.GetLogger().Panic("failed to open pebble db", zap.Error(err))
}
return &PebbleDB{db}

24
node/utils/logging.go Normal file
View File

@ -0,0 +1,24 @@
package utils
import "go.uber.org/zap"
var logger *zap.Logger
var debugLogger *zap.Logger
func GetLogger() *zap.Logger {
return logger
}
func GetDebugLogger() *zap.Logger {
return debugLogger
}
func init() {
config := zap.NewProductionConfig()
config.DisableCaller = false
config.DisableStacktrace = false
logger = zap.Must(config.Build())
debugConfig := zap.NewDevelopmentConfig()
debugLogger = zap.Must(debugConfig.Build())
}

View File

@ -3,14 +3,17 @@
package utils
import "golang.org/x/sys/unix"
import (
"go.uber.org/zap"
"golang.org/x/sys/unix"
)
func GetDiskSpace(dir string) uint64 {
var stat unix.Statfs_t
err := unix.Statfs(dir, &stat)
if err != nil {
panic(err)
GetLogger().Panic("failed statfs", zap.Error(err))
}
return stat.Bavail * uint64(stat.Bsize)

View File

@ -3,7 +3,10 @@
package utils
import "golang.org/x/sys/windows"
import (
"go.uber.org/zap"
"golang.org/x/sys/windows"
)
func GetDiskSpace(dir string) uint64 {
var freeBytesAvailable uint64
@ -13,7 +16,7 @@ func GetDiskSpace(dir string) uint64 {
err := windows.GetDiskFreeSpaceEx(windows.StringToUTF16Ptr(dir),
&freeBytesAvailable, &totalNumberOfBytes, &totalNumberOfFreeBytes)
if err != nil {
panic(err)
GetLogger().Panic("failed GetDiskFreeSpaceEx", zap.Error(err))
}
return totalNumberOfBytes