ceremonyclient/node/main.go
2024-10-30 21:58:29 -05:00

527 lines
12 KiB
Go
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

//go:build !js && !wasm
package main
import (
"encoding/binary"
"encoding/hex"
"flag"
"fmt"
"io/fs"
"math/big"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"go.uber.org/zap"
"golang.org/x/crypto/sha3"
"google.golang.org/protobuf/proto"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pbnjay/memory"
"github.com/pkg/errors"
"source.quilibrium.com/quilibrium/monorepo/node/app"
"source.quilibrium.com/quilibrium/monorepo/node/config"
qcrypto "source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/crypto/kzg"
)
var (
configDirectory = flag.String(
"config",
filepath.Join(".", ".config"),
"the configuration directory",
)
emergencyRepair = flag.Bool(
"emergency-repair",
false,
"performs an attempt at emergency repair. extremely dangerous, take a backup of your store before running.",
)
)
func main() {
flag.Parse()
nodeConfig, err := config.LoadConfig(*configDirectory, "", false)
if err != nil {
panic(err)
}
if *emergencyRepair {
fmt.Println("Emergency Repair Mode")
fmt.Println("WARNING")
fmt.Println("WARNING")
fmt.Println("WARNING")
fmt.Println(
"This operation will try an attempt at repairing your 1.4.21.1 store. " +
"It is not guaranteed to work, and may make things worse. Before you " +
"run this, please take a backup of your store. Proofs generated by " +
"this repair tool will evaluate at single core, and earn less QUIL " +
"for the proofs produced than you would have previously earned with a " +
"valid backup. Do you wish to proceed?",
)
fmt.Println("WARNING")
fmt.Println("WARNING")
fmt.Println("WARNING")
fmt.Printf("Proceed? (Y/N): ")
var response string
_, err := fmt.Scanln(&response)
if err != nil {
fmt.Println("Invalid response, exiting without running repair.")
os.Exit(1)
}
response = strings.ToUpper(strings.TrimSpace(response))
if response == "Y" || response == "YES" {
runEmergencyRepair(nodeConfig)
} else {
fmt.Println(
"Did not receive confirmation, exiting without running repair.",
)
os.Exit(0)
}
}
fmt.Println("This is not a normal node distribution, this is a repair tool. To run repair, use --emergency-repair.")
}
func check(pstore store.DataProofStore, peerId []byte, i uint32) bool {
defer func() {
if err := recover(); err != nil {
fmt.Println(err)
}
}()
fmt.Println("Checking increment", i)
_, _, _, _, err := pstore.GetDataTimeProof(
[]byte(peerId),
uint32(i),
)
if err != nil {
if !errors.Is(err, store.ErrNotFound) {
fmt.Println("Uncorrectable error detected: ", err)
}
fmt.Println("Missing record at increment", i, " adding to repair set")
return true
}
return false
}
func runEmergencyRepair(cfg *config.Config) {
fmt.Println("Starting emergency repair.")
kzg.Init()
fmt.Println(
"Opening pebble database. If you see a invalid chunk error, your " +
"database is corrupted beyond the abilities of this tool to repair.",
)
db := store.NewPebbleDB(cfg.DB)
defer db.Close()
fmt.Println("Scanning for gaps in record...")
logger, err := zap.NewDevelopment()
if err != nil {
panic(err)
}
pstore := store.NewPebbleDataProofStore(db, logger)
peerId := getPeerID(cfg.P2P)
increment, _, _, err := pstore.GetLatestDataTimeProof([]byte(peerId))
if err != nil {
fmt.Println(
"Could not find latest proof. Please ensure you are using the correct " +
"config.yml and the path to the store in the config is correct. (Hint: " +
"try an absolute path for the store)",
)
os.Exit(1)
}
fmt.Println(
"Latest proof found, increment:", increment, " iterating to find gaps...",
)
gapStarts := []uint32{}
for i := uint32(0); i < increment; i++ {
if check(pstore, []byte(peerId), i) {
gapStarts = append(gapStarts, i-1)
}
}
if len(gapStarts) == 0 {
fmt.Println("No gaps found, quitting.")
os.Exit(0)
}
kprover := qcrypto.NewKZGInclusionProver(logger)
wprover := qcrypto.NewWesolowskiFrameProver(logger)
for _, gapPredecessor := range gapStarts {
prevIndex := -1
hashes := []byte{}
previousCommitment := []byte{}
proofs := [][]byte{}
commitment := []byte{}
_, _, _, previousOutput, err := pstore.GetDataTimeProof(
[]byte(peerId),
gapPredecessor,
)
if err != nil {
if errors.Is(err, store.ErrNotFound) && len(gapStarts) > 1 &&
gapPredecessor == uint32(0xFFFFFFFF) {
fmt.Println(
"Could not load predecessor data time proof, store is severely "+
"corrupted. Please review the logs above. If you encounter this "+
"scenario starting from increment 0 -",
gapStarts[len(gapStarts)-1],
"create a new 1.4.21.1 store, keeping this config.yml and "+
"keys.yml, and run the node up to",
gapStarts[len(gapStarts)-1],
)
}
fmt.Println("Uncorrectable error detected: ", err)
os.Exit(1)
}
_, _, previousCommitment, _ = app.GetOutputs(previousOutput)
fmt.Println(
"Missing record at increment", gapPredecessor+1, " repairing...",
)
input := []byte{}
input = append(input, []byte(peerId)...)
input = append(input, previousCommitment...)
proof, _ := wprover.RecalculatePreDuskChallengeProof(
input,
0,
gapPredecessor+1,
)
proofs = append(proofs, proof)
hashes, commitment, prevIndex = performDataCommitment(
kprover,
proofs,
1,
uint64(128),
)
p, err := kprover.ProveRaw(
hashes,
0,
uint64(128),
)
if err != nil {
fmt.Println("Error while proving", err, " stopping")
os.Exit(1)
}
output := serializeOutput(
uint32(prevIndex),
proofs,
commitment,
p,
)
txn, err := pstore.NewTransaction()
if err != nil {
fmt.Println("Error while preparing transaction", err, " stopping")
os.Exit(1)
}
fmt.Println("Storing repaired proof, increment:", gapPredecessor+1)
err = pstore.PutDataTimeProof(
txn,
1,
[]byte(peerId),
gapPredecessor+1,
previousCommitment,
output,
true,
)
if err != nil {
fmt.Println("Error while saving proof", err, " stopping")
os.Exit(1)
}
if err := txn.Commit(); err != nil {
fmt.Println("Error while committing transaction", err, " stopping")
os.Exit(1)
}
}
fmt.Println("Emergency repair completed successfully.")
os.Exit(0)
}
func serializeOutput(
previousIndex uint32,
previousOutputs [][]byte,
kzgCommitment []byte,
kzgProof []byte,
) []byte {
serializedOutput := []byte{}
serializedOutput = binary.BigEndian.AppendUint32(
serializedOutput,
previousIndex,
)
serializedOutput = append(serializedOutput, previousOutputs[previousIndex]...)
serializedOutput = append(serializedOutput, kzgCommitment...)
serializedOutput = append(serializedOutput, kzgProof...)
return serializedOutput
}
func performDataCommitment(
kprover *qcrypto.KZGInclusionProver,
proofs [][]byte,
parallelism int,
polySize uint64,
) ([]byte, []byte, int) {
// Take the VDF outputs and generate some deterministic outputs to feed
// into a KZG commitment:
output := []byte{}
for i := 0; i < len(proofs); i++ {
h := sha3.Sum512(proofs[i])
output = append(output, h[:]...)
}
nextInput, err := kprover.CommitRaw(output, polySize)
if err != nil {
panic(err)
}
inputHash := sha3.Sum256(nextInput)
inputHashBI := big.NewInt(0).SetBytes(inputHash[:])
prevIndex := int(inputHashBI.Mod(
inputHashBI,
big.NewInt(int64(parallelism)),
).Int64())
return output, nextInput, prevIndex
}
var dataWorkers []*exec.Cmd
func spawnDataWorkers(nodeConfig *config.Config) {
if len(nodeConfig.Engine.DataWorkerMultiaddrs) != 0 {
fmt.Println(
"Data workers configured by multiaddr, be sure these are running...",
)
return
}
process, err := os.Executable()
if err != nil {
panic(err)
}
cores := runtime.GOMAXPROCS(0)
dataWorkers = make([]*exec.Cmd, cores-1)
fmt.Printf("Spawning %d data workers...\n", cores-1)
for i := 1; i <= cores-1; i++ {
i := i
go func() {
for {
args := []string{
fmt.Sprintf("--core=%d", i),
fmt.Sprintf("--parent-process=%d", os.Getpid()),
}
args = append(args, os.Args[1:]...)
cmd := exec.Command(process, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stdout
err := cmd.Start()
if err != nil {
panic(err)
}
dataWorkers[i-1] = cmd
cmd.Wait()
time.Sleep(25 * time.Millisecond)
fmt.Printf("Data worker %d stopped, restarting...\n", i)
}
}()
}
}
func stopDataWorkers() {
for i := 0; i < len(dataWorkers); i++ {
err := dataWorkers[i].Process.Signal(os.Kill)
if err != nil {
fmt.Printf(
"fatal: unable to kill worker with pid %d, please kill this process!\n",
dataWorkers[i].Process.Pid,
)
}
}
}
func RunSelfTestIfNeeded(
configDir string,
nodeConfig *config.Config,
) *protobufs.SelfTestReport {
logger, _ := zap.NewProduction()
cores := runtime.GOMAXPROCS(0)
if len(nodeConfig.Engine.DataWorkerMultiaddrs) != 0 {
cores = len(nodeConfig.Engine.DataWorkerMultiaddrs) + 1
}
memory := memory.TotalMemory()
d, err := os.Stat(filepath.Join(configDir, "store"))
if d == nil {
err := os.Mkdir(filepath.Join(configDir, "store"), 0755)
if err != nil {
panic(err)
}
}
report := &protobufs.SelfTestReport{}
report.Cores = uint32(cores)
report.Memory = binary.BigEndian.AppendUint64([]byte{}, memory)
disk := utils.GetDiskSpace(nodeConfig.DB.Path)
report.Storage = binary.BigEndian.AppendUint64([]byte{}, disk)
logger.Info("writing report")
report.Capabilities = []*protobufs.Capability{
{
ProtocolIdentifier: 0x020000,
},
}
reportBytes, err := proto.Marshal(report)
if err != nil {
panic(err)
}
err = os.WriteFile(
filepath.Join(configDir, "SELF_TEST"),
reportBytes,
fs.FileMode(0600),
)
if err != nil {
panic(err)
}
return report
}
func clearIfTestData(configDir string, nodeConfig *config.Config) {
_, err := os.Stat(filepath.Join(configDir, "RELEASE_VERSION"))
if os.IsNotExist(err) {
fmt.Println("Clearing test data...")
err := os.RemoveAll(nodeConfig.DB.Path)
if err != nil {
panic(err)
}
versionFile, err := os.OpenFile(
filepath.Join(configDir, "RELEASE_VERSION"),
os.O_CREATE|os.O_RDWR,
fs.FileMode(0600),
)
if err != nil {
panic(err)
}
_, err = versionFile.Write([]byte{0x01, 0x00, 0x00})
if err != nil {
panic(err)
}
err = versionFile.Close()
if err != nil {
panic(err)
}
}
}
func printBalance(config *config.Config) {
if config.ListenGRPCMultiaddr == "" {
_, _ = fmt.Fprintf(os.Stderr, "gRPC Not Enabled, Please Configure\n")
os.Exit(1)
}
conn, err := app.ConnectToNode(config)
if err != nil {
panic(err)
}
defer conn.Close()
client := protobufs.NewNodeServiceClient(conn)
balance, err := app.FetchTokenBalance(client)
if err != nil {
panic(err)
}
conversionFactor, _ := new(big.Int).SetString("1DCD65000", 16)
r := new(big.Rat).SetFrac(balance.Owned, conversionFactor)
fmt.Println("Owned balance:", r.FloatString(12), "QUIL")
fmt.Println("Note: bridged balance is not reflected here, you must bridge back to QUIL to use QUIL on mainnet.")
}
func getPeerID(p2pConfig *config.P2PConfig) peer.ID {
peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
}
privKey, err := crypto.UnmarshalEd448PrivateKey(peerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
}
pub := privKey.GetPublic()
id, err := peer.IDFromPublicKey(pub)
if err != nil {
panic(errors.Wrap(err, "error getting peer id"))
}
return id
}
func printPeerID(p2pConfig *config.P2PConfig) {
id := getPeerID(p2pConfig)
fmt.Println("Peer ID: " + id.String())
}
func printNodeInfo(cfg *config.Config) {
if cfg.ListenGRPCMultiaddr == "" {
_, _ = fmt.Fprintf(os.Stderr, "gRPC Not Enabled, Please Configure\n")
os.Exit(1)
}
printPeerID(cfg.P2P)
conn, err := app.ConnectToNode(cfg)
if err != nil {
fmt.Println("Could not connect to node. If it is still booting, please wait.")
os.Exit(1)
}
defer conn.Close()
client := protobufs.NewNodeServiceClient(conn)
nodeInfo, err := app.FetchNodeInfo(client)
if err != nil {
panic(err)
}
fmt.Println("Version: " + config.FormatVersion(nodeInfo.Version))
fmt.Println("Max Frame: " + strconv.FormatUint(nodeInfo.GetMaxFrame(), 10))
fmt.Println("Peer Score: " + strconv.FormatUint(nodeInfo.GetPeerScore(), 10))
printBalance(cfg)
}