* roll up v2.0.1-b2 to develop

* b2-fixed

* adjust return data of fast sync so it doesn't return the earliest frame

* -b3

* fix: announce peer based on leading frame, not initial frame; fix: looping bug

* fix: last batch fails due to underflow; qol: make logging chattier

* -b4

* resolve frame cache issue

* fix: mint loop + re-migrate

* fix: register execution panic

* fix: mint loop, other side

* fix: handle unexpected return of nil status

* final -b4

* handle subtle change to migration

* qol: add heuristic to handle corruption scenario

* bump genesis

* qol: use separate channel for worker

* final parameterization, parallelize streams

* Add direct peers to blossomsub (#309)

Co-authored-by: Tyler Sturos <tyler.john@qcommander.sh>

* chore(docker): add ca-certificates to fix x509 error. (#307)

* Update qcommander.sh bootrap (#304)

* chore(docker): add ca-certificates to fix x509 error.

---------

Co-authored-by: Tyler Sturos <55340199+tjsturos@users.noreply.github.com>

* deprecate signers 10, 11, 14, 17

* adjust signatory check size to match rotated out signers

* qol: sync by rebroadcast

* upgrade version

* more small adjustments

* wait a little longer

* fix: don't use iterator for frame directly until iterator is fixed

* change iterator, genesis for testnet

* adjust to previous sync handling

* adjust: don't grab the very latest while it's already being broadcasted

* ok, ready for testnet

* handle rebroadcast quirks

* more adjustments from testing

* faster

* temporarily bulk process on frame candidates

* resolve separate frames

* don't loop

* make worker reset resume to check where it should continue

* move window

* reduce signature count now that supermajority signed last

* resolve bottlenecks

* remove GOMAXPROCS limit for now

* revisions for v2.0.2.1

* bump version

* bulk import

* reintroduce sync

* small adustments to make life better

* check bitmask for peers and keep alive

* adjust reconnect

* ensure peer doesn't fall off address list

* adjust blossomsub to background discovery

* bump version

* remove dev check

* remove debug log line

* further adjustments

* a little more logic around connection management

* v2.0.2.3

* Fix peer discovery (#319)

* Fix peer discovery

* Make peer discovery connections parallel

* Monitor peers via pings (#317)

* Support QUILIBRIUM_SIGNATURE_CHECK in client (#314)

* Ensure direct peers are not pruned by resource limits (#315)

* Support pprof profiling via HTTP (#313)

* Fix CPU profiling

* Add pprof server support

* Additional peering connection improvements (#320)

* Lookup peers if not enough external peers are available

* Make bootstrap peer discovery sensitive to a lack of bootstrappers

---------

Co-authored-by: Tyler Sturos <55340199+tjsturos@users.noreply.github.com>
Co-authored-by: Tyler Sturos <tyler.john@qcommander.sh>
Co-authored-by: linquanisaac <33619994+linquanisaac@users.noreply.github.com>
Co-authored-by: petricadaipegsp <155911522+petricadaipegsp@users.noreply.github.com>
This commit is contained in:
Cassandra Heart 2024-10-31 16:43:49 -05:00 committed by GitHub
parent 5d52ab5de0
commit b0cf294c99
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
29 changed files with 2380 additions and 1142 deletions

View File

@ -71,6 +71,8 @@ LABEL org.opencontainers.image.source=$GIT_REPO
LABEL org.opencontainers.image.ref.name=$GIT_BRANCH
LABEL org.opencontainers.image.revision=$GIT_COMMIT
RUN apt-get update && apt-get install -y ca-certificates
COPY --from=build /go/bin/node /usr/local/bin
COPY --from=build /go/bin/grpcurl /usr/local/bin
COPY --from=build /opt/ceremonyclient/client/qclient /usr/local/bin

View File

@ -6,13 +6,19 @@ import (
"encoding/binary"
"fmt"
"os"
gotime "time"
"strings"
"time"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token"
"google.golang.org/grpc"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token/application"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
)
@ -27,12 +33,6 @@ var allCmd = &cobra.Command{
os.Exit(1)
}
conn, err := GetGRPCClient()
if err != nil {
panic(err)
}
defer conn.Close()
if !LightNode {
fmt.Println(
"mint all cannot be run unless node is not running. ensure your node " +
@ -41,8 +41,6 @@ var allCmd = &cobra.Command{
os.Exit(1)
}
client := protobufs.NewNodeServiceClient(conn)
db := store.NewPebbleDB(NodeConfig.DB)
logger, _ := zap.NewProduction()
dataProofStore := store.NewPebbleDataProofStore(db, logger)
@ -57,137 +55,213 @@ var allCmd = &cobra.Command{
panic(err)
}
pubSub := p2p.NewBlossomSub(NodeConfig.P2P, logger)
logger.Info("connecting to network")
time.Sleep(5 * time.Second)
increment, _, _, err := dataProofStore.GetLatestDataTimeProof(
[]byte(peerId),
)
if err != nil {
if errors.Is(err, store.ErrNotFound) {
logger.Info("could not find pre-2.0 proofs")
return
}
addr, err := poseidon.HashBytes([]byte(peerId))
panic(err)
}
addrBI, err := poseidon.HashBytes([]byte(peerId))
if err != nil {
panic(err)
}
addr := addrBI.FillBytes(make([]byte, 32))
genesis := config.GetGenesis()
bpub, err := crypto.UnmarshalEd448PublicKey(genesis.Beacon)
if err != nil {
panic(err)
}
resp, err := client.GetPreCoinProofsByAccount(
context.Background(),
&protobufs.GetPreCoinProofsByAccountRequest{
Address: addr.FillBytes(make([]byte, 32)),
},
)
bpeerId, err := peer.IDFromPublicKey(bpub)
if err != nil {
panic(err)
panic(errors.Wrap(err, "error getting peer id"))
}
resume := make([]byte, 32)
for _, pr := range resp.Proofs {
if pr.IndexProof != nil {
resume, err = token.GetAddressOfPreCoinProof(pr)
if err != nil {
panic(err)
}
increment = pr.Difficulty - 1
}
}
if increment == 0 && !bytes.Equal(resume, make([]byte, 32)) {
fmt.Println("already completed pre-midnight mint")
return
}
proofs := [][]byte{
[]byte("pre-dusk"),
resume,
}
batchCount := 0
for i := int(increment); i >= 0; i-- {
_, parallelism, input, output, err := dataProofStore.GetDataTimeProof(
[]byte(peerId),
uint32(i),
cc, err := pubSub.GetDirectChannel([]byte(bpeerId), "worker")
if err != nil {
logger.Info(
"could not establish direct channel, waiting...",
zap.Error(err),
)
if err == nil {
p := []byte{}
p = binary.BigEndian.AppendUint32(p, uint32(i))
p = binary.BigEndian.AppendUint32(p, parallelism)
p = binary.BigEndian.AppendUint64(p, uint64(len(input)))
p = append(p, input...)
p = binary.BigEndian.AppendUint64(p, uint64(len(output)))
p = append(p, output...)
proofs = append(proofs, p)
} else {
fmt.Println("could not find data time proof for peer and increment, stopping at increment", i)
panic(err)
time.Sleep(10 * time.Second)
}
for {
if cc == nil {
cc, err = pubSub.GetDirectChannel([]byte(bpeerId), "worker")
if err != nil {
logger.Info(
"could not establish direct channel, waiting...",
zap.Error(err),
)
cc = nil
time.Sleep(10 * time.Second)
continue
}
}
batchCount++
if batchCount == 200 || i == 0 {
fmt.Println("publishing proof batch, increment", i)
payload := []byte("mint")
for _, i := range proofs {
payload = append(payload, i...)
}
sig, err := privKey.Sign(payload)
if err != nil {
panic(err)
client := protobufs.NewDataServiceClient(cc)
if bytes.Equal(resume, make([]byte, 32)) {
status, err := client.GetPreMidnightMintStatus(
context.Background(),
&protobufs.PreMidnightMintStatusRequest{
Owner: addr,
},
grpc.MaxCallSendMsgSize(1*1024*1024),
grpc.MaxCallRecvMsgSize(1*1024*1024),
)
if err != nil || status == nil {
logger.Error(
"got error response, waiting...",
zap.Error(err),
)
time.Sleep(10 * time.Second)
cc.Close()
cc = nil
err = pubSub.Reconnect([]byte(peerId))
if err != nil {
logger.Error(
"got error response, waiting...",
zap.Error(err),
)
time.Sleep(10 * time.Second)
}
continue
}
_, err = client.SendMessage(
context.Background(),
&protobufs.TokenRequest{
Request: &protobufs.TokenRequest_Mint{
Mint: &protobufs.MintCoinRequest{
Proofs: proofs,
Signature: &protobufs.Ed448Signature{
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: pub,
},
Signature: sig,
resume = status.Address
if status.Increment != 0 {
increment = status.Increment - 1
} else if !bytes.Equal(status.Address, make([]byte, 32)) {
increment = 0
}
}
proofs := [][]byte{
[]byte("pre-dusk"),
resume,
}
batchCount := 0
// the cast is important, it underflows without:
for i := int(increment); i >= 0; i-- {
_, parallelism, input, output, err := dataProofStore.GetDataTimeProof(
[]byte(peerId),
uint32(i),
)
if err == nil {
p := []byte{}
p = binary.BigEndian.AppendUint32(p, uint32(i))
p = binary.BigEndian.AppendUint32(p, parallelism)
p = binary.BigEndian.AppendUint64(p, uint64(len(input)))
p = append(p, input...)
p = binary.BigEndian.AppendUint64(p, uint64(len(output)))
p = append(p, output...)
proofs = append(proofs, p)
} else {
logger.Error(
"could not find data time proof for peer and increment, stopping worker",
zap.String("peer_id", peerId.String()),
zap.Int("increment", i),
)
cc.Close()
cc = nil
return
}
batchCount++
if batchCount == 200 || i == 0 {
logger.Info("publishing proof batch", zap.Int("increment", i))
payload := []byte("mint")
for _, i := range proofs {
payload = append(payload, i...)
}
sig, err := pubSub.SignMessage(payload)
if err != nil {
cc.Close()
panic(err)
}
resp, err := client.HandlePreMidnightMint(
context.Background(),
&protobufs.MintCoinRequest{
Proofs: proofs,
Signature: &protobufs.Ed448Signature{
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: pub,
},
Signature: sig,
},
},
},
)
if err != nil {
panic(err)
}
waitForConf:
for {
gotime.Sleep(20 * gotime.Second)
resp, err := client.GetPreCoinProofsByAccount(
context.Background(),
&protobufs.GetPreCoinProofsByAccountRequest{
Address: addr.FillBytes(make([]byte, 32)),
},
grpc.MaxCallSendMsgSize(1*1024*1024),
grpc.MaxCallRecvMsgSize(1*1024*1024),
)
if err != nil {
for _, pr := range resp.Proofs {
if pr.IndexProof != nil {
newResume, err := token.GetAddressOfPreCoinProof(pr)
if err != nil {
panic(err)
}
if bytes.Equal(newResume, resume) {
fmt.Println("waiting for confirmation...")
continue waitForConf
}
}
}
}
break
}
batchCount = 0
proofs = [][]byte{
[]byte("pre-dusk"),
resume,
}
if i == 0 {
fmt.Println("all proofs submitted, returning")
return
if err != nil {
if strings.Contains(
err.Error(),
application.ErrInvalidStateTransition.Error(),
) && i == 0 {
resume = make([]byte, 32)
logger.Info("pre-midnight proofs submitted, returning")
cc.Close()
cc = nil
return
}
logger.Error(
"got error response, waiting...",
zap.Error(err),
)
resume = make([]byte, 32)
cc.Close()
cc = nil
time.Sleep(10 * time.Second)
err = pubSub.Reconnect([]byte(peerId))
if err != nil {
logger.Error(
"got error response, waiting...",
zap.Error(err),
)
time.Sleep(10 * time.Second)
}
break
}
resume = resp.Address
batchCount = 0
proofs = [][]byte{
[]byte("pre-dusk"),
resume,
}
if i == 0 {
logger.Info("pre-midnight proofs submitted, returning")
cc.Close()
cc = nil
return
} else {
increment = uint32(i) - 1
}
break
}
}
}

View File

@ -22,6 +22,16 @@ var balanceCmd = &cobra.Command{
client := protobufs.NewNodeServiceClient(conn)
peerId := GetPeerIDFromConfig(NodeConfig)
privKey, err := GetPrivKeyFromConfig(NodeConfig)
if err != nil {
panic(err)
}
pub, err := privKey.GetPublic().Raw()
if err != nil {
panic(err)
}
addr, err := poseidon.HashBytes([]byte(peerId))
if err != nil {
panic(err)
@ -38,16 +48,42 @@ var balanceCmd = &cobra.Command{
panic(err)
}
if info.OwnedTokens == nil {
panic("invalid response from RPC")
}
tokens := new(big.Int).SetBytes(info.OwnedTokens)
conversionFactor, _ := new(big.Int).SetString("1DCD65000", 16)
r := new(big.Rat).SetFrac(tokens, conversionFactor)
altAddr, err := poseidon.HashBytes([]byte(pub))
if err != nil {
panic(err)
}
altAddrBytes := altAddr.FillBytes(make([]byte, 32))
info, err = client.GetTokenInfo(
context.Background(),
&protobufs.GetTokenInfoRequest{
Address: altAddrBytes,
},
)
if err != nil {
panic(err)
}
if info.OwnedTokens == nil {
panic("invalid response from RPC")
}
tokens = new(big.Int).SetBytes(info.OwnedTokens)
r2 := new(big.Rat).SetFrac(tokens, conversionFactor)
fmt.Println("Total balance:", r.FloatString(12), fmt.Sprintf(
"QUIL (Account 0x%x)",
addrBytes,
))
if r2.Cmp(big.NewRat(0, 1)) != 0 {
fmt.Println("Total balance:", r2.FloatString(12), fmt.Sprintf(
"QUIL (Account 0x%x)",
altAddrBytes,
))
}
},
}

View File

@ -22,6 +22,16 @@ var coinsCmd = &cobra.Command{
client := protobufs.NewNodeServiceClient(conn)
peerId := GetPeerIDFromConfig(NodeConfig)
privKey, err := GetPrivKeyFromConfig(NodeConfig)
if err != nil {
panic(err)
}
pub, err := privKey.GetPublic().Raw()
if err != nil {
panic(err)
}
addr, err := poseidon.HashBytes([]byte(peerId))
if err != nil {
panic(err)
@ -42,6 +52,26 @@ var coinsCmd = &cobra.Command{
panic("invalid response from RPC")
}
altAddr, err := poseidon.HashBytes([]byte(pub))
if err != nil {
panic(err)
}
altAddrBytes := altAddr.FillBytes(make([]byte, 32))
resp2, err := client.GetTokensByAccount(
context.Background(),
&protobufs.GetTokensByAccountRequest{
Address: altAddrBytes,
},
)
if err != nil {
panic(err)
}
if len(resp.Coins) != len(resp.FrameNumbers) {
panic("invalid response from RPC")
}
for i, coin := range resp.Coins {
amount := new(big.Int).SetBytes(coin.Amount)
conversionFactor, _ := new(big.Int).SetString("1DCD65000", 16)
@ -51,6 +81,15 @@ var coinsCmd = &cobra.Command{
fmt.Sprintf("QUIL (Coin 0x%x)", resp.Addresses[i]),
)
}
for i, coin := range resp2.Coins {
amount := new(big.Int).SetBytes(coin.Amount)
conversionFactor, _ := new(big.Int).SetString("1DCD65000", 16)
r := new(big.Rat).SetFrac(amount, conversionFactor)
fmt.Println(
r.FloatString(12),
fmt.Sprintf("QUIL (Coin 0x%x)", resp.Addresses[i]),
)
}
},
}

View File

@ -6,6 +6,7 @@ import (
"encoding/hex"
"fmt"
"os"
"strconv"
"strings"
"github.com/cloudflare/circl/sign/ed448"
@ -150,6 +151,20 @@ func GetGRPCClient() (*grpc.ClientConn, error) {
)
}
func signatureCheckDefault() bool {
envVarValue, envVarExists := os.LookupEnv("QUILIBRIUM_SIGNATURE_CHECK")
if envVarExists {
def, err := strconv.ParseBool(envVarValue)
if err == nil {
return def
} else {
fmt.Println("Invalid environment variable QUILIBRIUM_SIGNATURE_CHECK, must be 'true' or 'false'. Got: " + envVarValue)
}
}
return true
}
func init() {
rootCmd.PersistentFlags().StringVar(
&configDirectory,
@ -166,7 +181,7 @@ func init() {
rootCmd.PersistentFlags().BoolVar(
&signatureCheck,
"signature-check",
true,
"bypass signature check (not recommended for binaries)",
signatureCheckDefault(),
"bypass signature check (not recommended for binaries) (default true or value of QUILIBRIUM_SIGNATURE_CHECK env var)",
)
}

View File

@ -4,6 +4,7 @@ import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
@ -81,6 +82,7 @@ var BootstrapPeers = []string{
"/ip4/148.251.9.90/udp/8336/quic-v1/p2p/QmRpKmQ1W83s6moBFpG6D6nrttkqdQSbdCJpvfxDVGcs38",
"/ip4/15.235.211.121/udp/8336/quic-v1/p2p/QmZHNLUSAFCkTwHiEE3vWay3wsus5fWYsNLFTFU6tPCmNR",
"/ip4/63.141.228.58/udp/8336/quic-v1/p2p/QmezARggdWKa1sw3LqE3LfZwVvtuCpXpK8WVo8EEdfakJV",
"/ip4/185.209.178.191/udp/8336/quic-v1/p2p/QmcKQjpQmLpbDsiif2MuakhHFyxWvqYauPsJDaXnLav7PJ",
// purged peers (keep your node online to return to this list)
// "/ip4/204.186.74.47/udp/8317/quic-v1/p2p/Qmd233pLUDvcDW3ama27usfbG1HxKNh1V9dmWVW1SXp1pd",
// "/ip4/186.233.184.181/udp/8336/quic-v1/p2p/QmW6QDvKuYqJYYMP5tMZSp12X3nexywK28tZNgqtqNpEDL",
@ -99,8 +101,8 @@ var BootstrapPeers = []string{
}
type Signature struct {
PublicKeyHex string `json:"publicKeyHex"`
SignatureHex string `json:"signatureHex"`
PublicKeyHex string `json:PublicKeyHex`
SignatureHex string `json:SignatureHex`
}
type SignedGenesisUnlock struct {
@ -135,7 +137,7 @@ var unlock *SignedGenesisUnlock
func DownloadAndVerifyGenesis(network uint) (*SignedGenesisUnlock, error) {
if network != 0 {
unlock = &SignedGenesisUnlock{
GenesisSeedHex: "726573697374206d7563682c206f626579206c6974746c657c000000000000000000000004",
GenesisSeedHex: "726573697374206d7563682c206f626579206c6974746c657c000000000000000000000005",
Beacon: []byte{
0x58, 0xef, 0xd9, 0x7e, 0xdd, 0x0e, 0xb6, 0x2f,
0x51, 0xc7, 0x5d, 0x00, 0x29, 0x12, 0x45, 0x49,
@ -147,6 +149,51 @@ func DownloadAndVerifyGenesis(network uint) (*SignedGenesisUnlock, error) {
0x80,
},
}
} else {
// From https://releases.quilibrium.com/genesisunlock, skip a download:
beacon, _ := base64.StdEncoding.DecodeString("ImqaBAzHM61pHODoywHu2a6FIOqoXKY/RECZuOXjDfds8DBxtA0g+4hCfOgwiti2TpOF8AH7xH0A")
unlock = &SignedGenesisUnlock{
GenesisSeedHex: "726573697374206d7563682c206f626579206c6974746c657c083fb0a4274b1f70e9aa2b3f",
Signatures: []Signature{
{
PublicKeyHex: "b1214da7f355f5a9edb7bcc23d403bdf789f070cca10db2b4cadc22f2d837afb650944853e35d5f42ef3c4105b802b144b4077d5d3253e4100",
SignatureHex: "5176d46e7974cb0d37eb16864fa01ed4d10222ffd38009e451a6339af0ae4938d95dad7af5db7ea9a3bc818cf4dee8e20f9a3be6717d45aa80c0b8bf9783bc5129a7efb0cd900b2a56d84f16d2c531e16a4c4456a37ebed68b95dff3d5b910705aa3963989a92e8908d8eb58622d47bb0c00",
},
{
PublicKeyHex: "de4cfe7083104bfe32f0d4082fa0200464d8b10804a811653eedda376efcad64dd222f0f0ceb0b8ae58abe830d7a7e3f3b2d79d691318daa00",
SignatureHex: "6f6fb897e54787d716697b54bb18eab857857114d30ca3abe7949d1d1502662a4b181942a207d7ebb144ebd56b0eb83b7860eddf85d51bcd0065d1429006a5840dad464d21d0ac0293bec6ec0ea9f7b38c48e9979febaa36e51101f8a263d1e7666d3cc23746626168d2ad2c817b36f00a00",
},
{
PublicKeyHex: "540237a35e124882d6b64e7bb5718273fa338e553f772b77fe90570e45303762b34131bdcb6c0b9f2cf9e393d9c7e0f546eeab0bcbbd881680",
SignatureHex: "2ef74fb5222ca8053543b6f62aa89a728fb316c17154c191a27fc50d9923ca55bf469c32134df667a142e28ef563205e72fcfcc0afed3ff50032975bee3f6f2b8f14b90a3693d065075880f0e42755de2828882f5245840edb71083fc8620f041ed44da8515b03360ea6d78715c189f71300",
},
{
PublicKeyHex: "fbe4166e37f93f90d2ebf06305315ae11b37e501d09596f8bde11ba9d343034fbca80f252205aa2f582a512a72ad293df371baa582da072900",
SignatureHex: "15b25055d570d8a6a1caab8e266995609fc7489045f216871a37201c85515c341c1dbf3f0537ff9436579858ee38c4741dce9e00b4c1ddf180cb592cc73ef6ba6e9374d8a8937fac84ad76a66b528164db9a8de48a11a15557f296f075f729617afe9ca17552f1a8f6dd2c1bb151f2930e00",
},
{
PublicKeyHex: "45170b626884b85d61ae109f2aa9b0e1ecc18b181508431ea6308f3869f2adae49da9799a0a594eaa4ef3ad492518fb1729decd44169d40d00",
SignatureHex: "4af69e871617eee5ba8b51d73190500bc064ec77e7e396e4d8bca1942dfb538007b1f1ac65787d57f3406e54279d3d360f465723eaf58a8e002cfd54fe78c2c8799cb71a37ea13debd32b868005ff61eea9946b063fa25407929dc445e99b58786e3fe01749208e2a2e367640d9a66130100",
},
{
PublicKeyHex: "001a4cbfce5d9aeb7e20665b0d236721b228a32f0baee62ffa77f45b82ecaf577e8a38b7ef91fcf7d2d2d2b504f085461398d30b24abb1d700",
SignatureHex: "966f12f5b59f9ac18e15608f638938c137017c9a68f5419de8560f6aedffd454b0dbd6326719a37c84b1e56795933f1584e156145f8814970000554d97e98156c1489b95a1cd196391f71f13d4958eaa66054399c710fe32c4e6cb3214c1f2126f3d44a3402247209cf32bf17b5806d63700",
},
{
PublicKeyHex: "61628beef8f6964466fd078d6a2b90a397ab0777a14b9728227fd19f36752f9451b1a8d780740a0b9a8ce3df5f89ca7b9ff17de9274a270980",
SignatureHex: "9521933c79b269d33f38ca45f65f02555ae2126e0c378f40ccbf6edc16680035098104caf34a91733043297b44870a739af2ce23a035ffa080b394d438eb781d69167966b7aec1ba2194cda276dfdcf25158d4795f863d779a28c3fd7858ba3b9d3af6c69d91e5609c1b3a28101697500f00",
},
{
PublicKeyHex: "81d63a45f068629f568de812f18be5807bfe828a830097f09cf02330d6acd35e3607401df3fda08b03b68ea6e68afd506b23506b11e87a0f80",
SignatureHex: "3ebba8c10d2e188ce8e7138d2189dac51a3854c9706849f28c7f60a264951cc5b88534793e5a25b540bb2cb736da5c0b97040ed904d79afe8061e4ad334b16b89a3e29c1c26f6062fc6db146a00f9b7da76ee237004f60bca6e32f452d9074b4c07402092a62cb2596c2eab96d80454c0000",
},
{
PublicKeyHex: "6e2872f73c4868c4286bef7bfe2f5479a41c42f4e07505efa4883c7950c740252e0eea78eef10c584b19b1dcda01f7767d3135d07c33244100",
SignatureHex: "5701f5cd907a105d0421d2d6d49b147410211e297ef1bc7b8040ec96c742d1b628523cda378ebb57e37bf6a9b6d23bf196a75dc1c461d5b5809be734030c41e577854641b103fe394524439e2c538458bdd4b5490176bf35cac03eb90dfd9b54ff87e46f0da4b7fd2057394922c448eb1c00",
},
},
Beacon: beacon,
}
}
if unlock != nil {
return unlock, nil

View File

@ -39,4 +39,5 @@ type P2PConfig struct {
Network uint8 `yaml:"network"`
LowWatermarkConnections uint `yaml:"lowWatermarkConnections"`
HighWatermarkConnections uint `yaml:"highWatermarkConnections"`
DirectPeers []string `yaml:"directPeers"`
}

View File

@ -6,15 +6,15 @@ import (
)
func GetMinimumVersionCutoff() time.Time {
return time.Date(2024, time.October, 12, 11, 0, 0, 0, time.UTC)
return time.Date(2024, time.October, 24, 11, 0, 0, 0, time.UTC)
}
func GetMinimumVersion() []byte {
return []byte{0x02, 0x00, 0x01}
return []byte{0x02, 0x00, 0x02}
}
func GetVersion() []byte {
return []byte{0x02, 0x00, 0x01}
return []byte{0x02, 0x00, 0x02}
}
func GetVersionString() string {
@ -36,9 +36,9 @@ func FormatVersion(version []byte) string {
}
func GetPatchNumber() byte {
return 0x00
return 0x03
}
func GetRCNumber() byte {
return 0x04
return 0x00
}

View File

@ -32,55 +32,100 @@ func (e *DataClockConsensusEngine) publishProof(
"publishing frame and aggregations",
zap.Uint64("frame_number", frame.FrameNumber),
)
head, err := e.dataTimeReel.Head()
timestamp := time.Now().UnixMilli()
msg := binary.BigEndian.AppendUint64([]byte{}, frame.FrameNumber)
msg = append(msg, config.GetVersion()...)
msg = binary.BigEndian.AppendUint64(msg, uint64(timestamp))
sig, err := e.pubSub.SignMessage(msg)
if err != nil {
panic(err)
}
peers, max, err := e.GetMostAheadPeer(head.FrameNumber)
if err != nil || len(peers) == 0 || head.FrameNumber > max {
timestamp := time.Now().UnixMilli()
msg := binary.BigEndian.AppendUint64([]byte{}, frame.FrameNumber)
msg = append(msg, config.GetVersion()...)
msg = binary.BigEndian.AppendUint64(msg, uint64(timestamp))
sig, err := e.pubSub.SignMessage(msg)
if err != nil {
panic(err)
}
e.peerMapMx.Lock()
e.peerMap[string(e.pubSub.GetPeerID())] = &peerInfo{
peerId: e.pubSub.GetPeerID(),
multiaddr: "",
maxFrame: frame.FrameNumber,
version: config.GetVersion(),
signature: sig,
publicKey: e.pubSub.GetPublicKey(),
timestamp: timestamp,
totalDistance: e.dataTimeReel.GetTotalDistance().FillBytes(
make([]byte, 256),
),
}
list := &protobufs.DataPeerListAnnounce{
PeerList: []*protobufs.DataPeer{},
}
list.PeerList = append(list.PeerList, &protobufs.DataPeer{
PeerId: e.pubSub.GetPeerID(),
Multiaddr: "",
MaxFrame: frame.FrameNumber,
Version: config.GetVersion(),
Signature: sig,
PublicKey: e.pubSub.GetPublicKey(),
Timestamp: timestamp,
TotalDistance: e.dataTimeReel.GetTotalDistance().FillBytes(
make([]byte, 256),
),
})
e.peerMapMx.Unlock()
if err := e.publishMessage(e.filter, list); err != nil {
e.logger.Debug("error publishing message", zap.Error(err))
}
e.peerMapMx.Lock()
e.peerMap[string(e.pubSub.GetPeerID())] = &peerInfo{
peerId: e.pubSub.GetPeerID(),
multiaddr: "",
maxFrame: frame.FrameNumber,
version: config.GetVersion(),
signature: sig,
publicKey: e.pubSub.GetPublicKey(),
timestamp: timestamp,
totalDistance: e.dataTimeReel.GetTotalDistance().FillBytes(
make([]byte, 256),
),
}
list := &protobufs.DataPeerListAnnounce{
PeerList: []*protobufs.DataPeer{},
}
list.PeerList = append(list.PeerList, &protobufs.DataPeer{
PeerId: e.pubSub.GetPeerID(),
Multiaddr: "",
MaxFrame: frame.FrameNumber,
Version: config.GetVersion(),
Signature: sig,
PublicKey: e.pubSub.GetPublicKey(),
Timestamp: timestamp,
TotalDistance: e.dataTimeReel.GetTotalDistance().FillBytes(
make([]byte, 256),
),
})
e.peerMapMx.Unlock()
if err := e.publishMessage(e.filter, list); err != nil {
e.logger.Debug("error publishing message", zap.Error(err))
}
e.publishMessage(e.filter, frame)
return nil
}
func (e *DataClockConsensusEngine) insertMessage(
filter []byte,
message proto.Message,
) error {
any := &anypb.Any{}
if err := any.MarshalFrom(message); err != nil {
return errors.Wrap(err, "publish message")
}
any.TypeUrl = strings.Replace(
any.TypeUrl,
"type.googleapis.com",
"types.quilibrium.com",
1,
)
payload, err := proto.Marshal(any)
if err != nil {
return errors.Wrap(err, "publish message")
}
h, err := poseidon.HashBytes(payload)
if err != nil {
return errors.Wrap(err, "publish message")
}
msg := &protobufs.Message{
Hash: h.Bytes(),
Address: e.provingKeyAddress,
Payload: payload,
}
data, err := proto.Marshal(msg)
if err != nil {
return errors.Wrap(err, "publish message")
}
m := &pb.Message{
Data: data,
Bitmask: filter,
From: e.pubSub.GetPeerID(),
Seqno: nil,
}
go func() {
e.messageProcessorCh <- m
}()
return nil
}

View File

@ -17,6 +17,44 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (e *DataClockConsensusEngine) collect(
enqueuedFrame *protobufs.ClockFrame,
) (*protobufs.ClockFrame, error) {
e.logger.Info("collecting vdf proofs")
latest := enqueuedFrame
for {
peerId, maxFrame, err := e.GetMostAheadPeer(latest.FrameNumber)
if maxFrame > latest.FrameNumber {
e.syncingStatus = SyncStatusSynchronizing
if err != nil {
e.logger.Info("no peers available for sync, waiting")
time.Sleep(5 * time.Second)
} else if maxFrame > latest.FrameNumber {
if maxFrame-latest.FrameNumber > 100 {
maxFrame = latest.FrameNumber + 100
}
latest, err = e.sync(latest, maxFrame, peerId)
if err == nil {
break
}
}
} else {
break
}
}
e.syncingStatus = SyncStatusNotSyncing
e.logger.Info(
"returning leader frame",
zap.Uint64("frame_number", latest.FrameNumber),
)
return latest, nil
}
func (e *DataClockConsensusEngine) prove(
previousFrame *protobufs.ClockFrame,
) (*protobufs.ClockFrame, error) {
@ -169,13 +207,17 @@ func (e *DataClockConsensusEngine) GetMostAheadPeer(
uint64,
error,
) {
e.logger.Info(
e.logger.Debug(
"checking peer list",
zap.Int("peers", len(e.peerMap)),
zap.Int("uncooperative_peers", len(e.uncooperativePeersMap)),
zap.Uint64("current_head_frame", frameNumber),
)
if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) {
return e.pubSub.GetPeerID(), frameNumber, nil
}
max := frameNumber
var peer []byte = nil
e.peerMapMx.RLock()
@ -211,7 +253,7 @@ func (e *DataClockConsensusEngine) sync(
) (*protobufs.ClockFrame, error) {
latest := currentLatest
e.logger.Info("polling peer for new frames", zap.Binary("peer_id", peerId))
cc, err := e.pubSub.GetDirectChannel(peerId, "")
cc, err := e.pubSub.GetDirectChannel(peerId, "sync")
if err != nil {
e.logger.Debug(
"could not establish direct channel",
@ -265,6 +307,7 @@ func (e *DataClockConsensusEngine) sync(
if response.ClockFrame == nil ||
response.ClockFrame.FrameNumber != latest.FrameNumber+1 ||
response.ClockFrame.Timestamp < latest.Timestamp {
e.logger.Debug("received invalid response from peer")
e.peerMapMx.Lock()
@ -279,12 +322,10 @@ func (e *DataClockConsensusEngine) sync(
}
return latest, nil
}
e.logger.Info(
"received new leading frame",
zap.Uint64("frame_number", response.ClockFrame.FrameNumber),
)
if !e.IsInProverTrie(
response.ClockFrame.GetPublicKeySignatureEd448().PublicKey.KeyValue,
) {
@ -296,62 +337,19 @@ func (e *DataClockConsensusEngine) sync(
}
e.peerMapMx.Unlock()
}
if err := e.frameProver.VerifyDataClockFrame(
response.ClockFrame,
); err != nil {
return nil, errors.Wrap(err, "sync")
}
e.dataTimeReel.Insert(response.ClockFrame, true)
latest = response.ClockFrame
if latest.FrameNumber >= maxFrame {
break
}
}
if err := cc.Close(); err != nil {
e.logger.Error("error while closing connection", zap.Error(err))
}
return latest, nil
}
func (e *DataClockConsensusEngine) collect(
enqueuedFrame *protobufs.ClockFrame,
) (*protobufs.ClockFrame, error) {
e.logger.Info("collecting vdf proofs")
latest := enqueuedFrame
for {
peerId, maxFrame, err := e.GetMostAheadPeer(latest.FrameNumber)
if maxFrame > latest.FrameNumber {
e.syncingStatus = SyncStatusSynchronizing
if err != nil {
e.logger.Info("no peers available for sync, waiting")
time.Sleep(5 * time.Second)
} else if maxFrame > latest.FrameNumber {
if maxFrame-latest.FrameNumber > 100 {
maxFrame = latest.FrameNumber + 100
}
latest, err = e.sync(latest, maxFrame, peerId)
if err == nil {
break
}
}
} else {
break
}
}
e.syncingStatus = SyncStatusNotSyncing
e.logger.Info(
"returning leader frame",
zap.Uint64("frame_number", latest.FrameNumber),
)
return latest, nil
}

View File

@ -61,7 +61,7 @@ type ChannelServer = protobufs.DataService_GetPublicChannelServer
type DataClockConsensusEngine struct {
protobufs.UnimplementedDataServiceServer
difficulty uint32
engineConfig *config.EngineConfig
config *config.Config
logger *zap.Logger
state consensus.EngineState
clockStore store.ClockStore
@ -93,24 +93,24 @@ type DataClockConsensusEngine struct {
currentReceivingSyncPeersMx sync.Mutex
currentReceivingSyncPeers int
frameChan chan *protobufs.ClockFrame
executionEngines map[string]execution.ExecutionEngine
filter []byte
input []byte
parentSelector []byte
syncingStatus SyncStatusType
syncingTarget []byte
previousHead *protobufs.ClockFrame
engineMx sync.Mutex
dependencyMapMx sync.Mutex
stagedTransactions *protobufs.TokenRequests
stagedTransactionsMx sync.Mutex
peerMapMx sync.RWMutex
peerAnnounceMapMx sync.Mutex
proverTrieJoinRequests map[string]string
proverTrieLeaveRequests map[string]string
proverTriePauseRequests map[string]string
proverTrieResumeRequests map[string]string
frameChan chan *protobufs.ClockFrame
executionEngines map[string]execution.ExecutionEngine
filter []byte
input []byte
parentSelector []byte
syncingStatus SyncStatusType
syncingTarget []byte
previousHead *protobufs.ClockFrame
engineMx sync.Mutex
dependencyMapMx sync.Mutex
stagedTransactions *protobufs.TokenRequests
stagedTransactionsMx sync.Mutex
peerMapMx sync.RWMutex
peerAnnounceMapMx sync.Mutex
// proverTrieJoinRequests map[string]string
// proverTrieLeaveRequests map[string]string
// proverTriePauseRequests map[string]string
// proverTrieResumeRequests map[string]string
proverTrieRequestsMx sync.Mutex
lastKeyBundleAnnouncementFrame uint64
peerSeniority *peerSeniority
@ -145,7 +145,7 @@ func (p peerSeniorityItem) Priority() *big.Int {
var _ consensus.DataConsensusEngine = (*DataClockConsensusEngine)(nil)
func NewDataClockConsensusEngine(
engineConfig *config.EngineConfig,
config *config.Config,
logger *zap.Logger,
keyManager keys.KeyManager,
clockStore store.ClockStore,
@ -167,7 +167,7 @@ func NewDataClockConsensusEngine(
panic(errors.New("logger is nil"))
}
if engineConfig == nil {
if config == nil {
panic(errors.New("engine config is nil"))
}
@ -215,12 +215,12 @@ func NewDataClockConsensusEngine(
panic(errors.New("peer info manager is nil"))
}
minimumPeersRequired := engineConfig.MinimumPeersRequired
minimumPeersRequired := config.Engine.MinimumPeersRequired
if minimumPeersRequired == 0 {
minimumPeersRequired = 3
}
difficulty := engineConfig.Difficulty
difficulty := config.Engine.Difficulty
if difficulty == 0 {
difficulty = 160000
}
@ -259,14 +259,14 @@ func NewDataClockConsensusEngine(
peerInfoManager: peerInfoManager,
peerSeniority: newFromMap(peerSeniority),
messageProcessorCh: make(chan *pb.Message),
engineConfig: engineConfig,
config: config,
preMidnightMint: map[string]struct{}{},
}
logger.Info("constructing consensus engine")
signer, keyType, bytes, address := e.GetProvingKey(
engineConfig,
config.Engine,
)
e.filter = filter
@ -302,17 +302,15 @@ func (e *DataClockConsensusEngine) Start() <-chan error {
e.logger.Info("subscribing to pubsub messages")
e.pubSub.Subscribe(e.filter, e.handleMessage)
go func() {
server := grpc.NewServer(
grpc.MaxSendMsgSize(600*1024*1024),
grpc.MaxRecvMsgSize(600*1024*1024),
)
protobufs.RegisterDataServiceServer(server, e)
if err := e.pubSub.StartDirectChannelListener(
e.pubSub.GetPeerID(),
"",
"sync",
server,
); err != nil {
panic(err)
@ -322,8 +320,8 @@ func (e *DataClockConsensusEngine) Start() <-chan error {
go func() {
if e.dataTimeReel.GetFrameProverTries()[0].Contains(e.provingKeyAddress) {
server := grpc.NewServer(
grpc.MaxSendMsgSize(600*1024*1024),
grpc.MaxRecvMsgSize(600*1024*1024),
grpc.MaxSendMsgSize(1*1024*1024),
grpc.MaxRecvMsgSize(1*1024*1024),
)
protobufs.RegisterDataServiceServer(server, e)
@ -351,9 +349,9 @@ func (e *DataClockConsensusEngine) Start() <-chan error {
panic(err)
}
if frame.FrameNumber >= nextFrame.FrameNumber ||
if frame.FrameNumber-100 >= nextFrame.FrameNumber ||
nextFrame.FrameNumber == 0 {
time.Sleep(30 * time.Second)
time.Sleep(60 * time.Second)
continue
}
@ -436,6 +434,16 @@ func (e *DataClockConsensusEngine) Start() <-chan error {
}()
go e.runLoop()
go e.rebroadcastLoop()
go func() {
time.Sleep(30 * time.Second)
e.logger.Info("checking for snapshots to play forward")
if err := e.downloadSnapshot(e.config.DB.Path, e.config.P2P.Network); err != nil {
e.logger.Error("error downloading snapshot", zap.Error(err))
} else if err := e.applySnapshot(e.config.DB.Path); err != nil {
e.logger.Error("error replaying snapshot", zap.Error(err))
}
}()
go func() {
errChan <- nil
@ -458,7 +466,7 @@ func (e *DataClockConsensusEngine) Start() <-chan error {
}
var clients []protobufs.DataIPCServiceClient
if len(e.engineConfig.DataWorkerMultiaddrs) != 0 {
if len(e.config.Engine.DataWorkerMultiaddrs) != 0 {
clients, err = e.createParallelDataClientsFromList()
if err != nil {
panic(err)
@ -510,7 +518,7 @@ func (e *DataClockConsensusEngine) PerformTimeProof(
for j := 3; j >= 0; j-- {
var err error
if client == nil {
if len(e.engineConfig.DataWorkerMultiaddrs) != 0 {
if len(e.config.Engine.DataWorkerMultiaddrs) != 0 {
e.logger.Error(
"client failed, reconnecting after 50ms",
zap.Uint32("client", uint32(i)),
@ -520,7 +528,7 @@ func (e *DataClockConsensusEngine) PerformTimeProof(
if err != nil {
e.logger.Error("failed to reconnect", zap.Error(err))
}
} else if len(e.engineConfig.DataWorkerMultiaddrs) == 0 {
} else if len(e.config.Engine.DataWorkerMultiaddrs) == 0 {
e.logger.Error(
"client failed, reconnecting after 50ms",
)
@ -549,7 +557,7 @@ func (e *DataClockConsensusEngine) PerformTimeProof(
if j == 0 {
e.logger.Error("unable to get a response in time from worker", zap.Error(err))
}
if len(e.engineConfig.DataWorkerMultiaddrs) != 0 {
if len(e.config.Engine.DataWorkerMultiaddrs) != 0 {
e.logger.Error(
"client failed, reconnecting after 50ms",
zap.Uint32("client", uint32(i)),
@ -559,7 +567,7 @@ func (e *DataClockConsensusEngine) PerformTimeProof(
if err != nil {
e.logger.Error("failed to reconnect", zap.Error(err))
}
} else if len(e.engineConfig.DataWorkerMultiaddrs) == 0 {
} else if len(e.config.Engine.DataWorkerMultiaddrs) == 0 {
e.logger.Error(
"client failed, reconnecting after 50ms",
)
@ -607,24 +615,24 @@ func (e *DataClockConsensusEngine) Stop(force bool) <-chan error {
e.state = consensus.EngineStateStopping
errChan := make(chan error)
msg := []byte("pause")
msg = binary.BigEndian.AppendUint64(msg, e.GetFrame().FrameNumber)
msg = append(msg, e.filter...)
sig, err := e.pubSub.SignMessage(msg)
if err != nil {
panic(err)
}
// msg := []byte("pause")
// msg = binary.BigEndian.AppendUint64(msg, e.GetFrame().FrameNumber)
// msg = append(msg, e.filter...)
// sig, err := e.pubSub.SignMessage(msg)
// if err != nil {
// panic(err)
// }
e.publishMessage(e.filter, &protobufs.AnnounceProverPause{
Filter: e.filter,
FrameNumber: e.GetFrame().FrameNumber,
PublicKeySignatureEd448: &protobufs.Ed448Signature{
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: e.pubSub.GetPublicKey(),
},
Signature: sig,
},
})
// e.publishMessage(e.filter, &protobufs.AnnounceProverPause{
// Filter: e.filter,
// FrameNumber: e.GetFrame().FrameNumber,
// PublicKeySignatureEd448: &protobufs.Ed448Signature{
// PublicKey: &protobufs.Ed448PublicKey{
// KeyValue: e.pubSub.GetPublicKey(),
// },
// Signature: sig,
// },
// })
wg := sync.WaitGroup{}
wg.Add(len(e.executionEngines))
@ -666,7 +674,7 @@ func (e *DataClockConsensusEngine) GetDifficulty() uint32 {
func (e *DataClockConsensusEngine) GetFrame() *protobufs.ClockFrame {
frame, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
return nil
}
return frame
@ -752,7 +760,7 @@ func (e *DataClockConsensusEngine) createParallelDataClientsFromListAndIndex(
protobufs.DataIPCServiceClient,
error,
) {
ma, err := multiaddr.NewMultiaddr(e.engineConfig.DataWorkerMultiaddrs[index])
ma, err := multiaddr.NewMultiaddr(e.config.Engine.DataWorkerMultiaddrs[index])
if err != nil {
return nil, errors.Wrap(err, "create parallel data client")
}
@ -798,18 +806,18 @@ func (
zap.Uint32("client", index),
)
if e.engineConfig.DataWorkerBaseListenMultiaddr == "" {
e.engineConfig.DataWorkerBaseListenMultiaddr = "/ip4/127.0.0.1/tcp/%d"
if e.config.Engine.DataWorkerBaseListenMultiaddr == "" {
e.config.Engine.DataWorkerBaseListenMultiaddr = "/ip4/127.0.0.1/tcp/%d"
}
if e.engineConfig.DataWorkerBaseListenPort == 0 {
e.engineConfig.DataWorkerBaseListenPort = 40000
if e.config.Engine.DataWorkerBaseListenPort == 0 {
e.config.Engine.DataWorkerBaseListenPort = 40000
}
ma, err := multiaddr.NewMultiaddr(
fmt.Sprintf(
e.engineConfig.DataWorkerBaseListenMultiaddr,
int(e.engineConfig.DataWorkerBaseListenPort)+int(index),
e.config.Engine.DataWorkerBaseListenMultiaddr,
int(e.config.Engine.DataWorkerBaseListenPort)+int(index),
),
)
if err != nil {
@ -848,7 +856,7 @@ func (e *DataClockConsensusEngine) createParallelDataClientsFromList() (
[]protobufs.DataIPCServiceClient,
error,
) {
parallelism := len(e.engineConfig.DataWorkerMultiaddrs)
parallelism := len(e.config.Engine.DataWorkerMultiaddrs)
e.logger.Info(
"connecting to data worker processes",
@ -858,7 +866,7 @@ func (e *DataClockConsensusEngine) createParallelDataClientsFromList() (
clients := make([]protobufs.DataIPCServiceClient, parallelism)
for i := 0; i < parallelism; i++ {
ma, err := multiaddr.NewMultiaddr(e.engineConfig.DataWorkerMultiaddrs[i])
ma, err := multiaddr.NewMultiaddr(e.config.Engine.DataWorkerMultiaddrs[i])
if err != nil {
panic(err)
}
@ -902,12 +910,12 @@ func (e *DataClockConsensusEngine) createParallelDataClientsFromBaseMultiaddr(
zap.Int("parallelism", parallelism),
)
if e.engineConfig.DataWorkerBaseListenMultiaddr == "" {
e.engineConfig.DataWorkerBaseListenMultiaddr = "/ip4/127.0.0.1/tcp/%d"
if e.config.Engine.DataWorkerBaseListenMultiaddr == "" {
e.config.Engine.DataWorkerBaseListenMultiaddr = "/ip4/127.0.0.1/tcp/%d"
}
if e.engineConfig.DataWorkerBaseListenPort == 0 {
e.engineConfig.DataWorkerBaseListenPort = 40000
if e.config.Engine.DataWorkerBaseListenPort == 0 {
e.config.Engine.DataWorkerBaseListenPort = 40000
}
clients := make([]protobufs.DataIPCServiceClient, parallelism)
@ -915,8 +923,8 @@ func (e *DataClockConsensusEngine) createParallelDataClientsFromBaseMultiaddr(
for i := 0; i < parallelism; i++ {
ma, err := multiaddr.NewMultiaddr(
fmt.Sprintf(
e.engineConfig.DataWorkerBaseListenMultiaddr,
int(e.engineConfig.DataWorkerBaseListenPort)+i,
e.config.Engine.DataWorkerBaseListenMultiaddr,
int(e.config.Engine.DataWorkerBaseListenPort)+i,
),
)
if err != nil {

View File

@ -0,0 +1,272 @@
package data
import (
"archive/zip"
"bufio"
"crypto/sha256"
"encoding/binary"
"encoding/hex"
"fmt"
"io"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/pkg/errors"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/store"
)
func (e *DataClockConsensusEngine) downloadSnapshot(
dbPath string,
network uint8,
) error {
frame, _, err := e.clockStore.GetLatestDataClockFrame(e.filter)
if err != nil {
return errors.Wrap(err, "download snapshot")
}
if frame.Timestamp > time.Now().Add(-6*time.Hour).UnixMilli() {
return errors.Wrap(
errors.New("synced higher than recent snapshot"),
"download snapshot",
)
}
resp, err := http.Get(
fmt.Sprintf(
"https://frame-snapshots.quilibrium.com/%d/latest-backup",
network,
),
)
if err != nil {
return errors.Wrap(err, "download snapshot")
}
defer resp.Body.Close()
scanner := bufio.NewScanner(resp.Body)
if !scanner.Scan() {
return errors.Wrap(
errors.New("metadata file is empty"),
"download snapshot",
)
}
zipURL := strings.TrimSpace(scanner.Text())
if !scanner.Scan() {
return errors.Wrap(
errors.New("metadata file missing hash"),
"download snapshot",
)
}
expectedHash := strings.TrimSpace(scanner.Text())
resp, err = http.Get(
fmt.Sprintf(
"https://frame-snapshots.quilibrium.com/%d/%s",
network,
zipURL,
),
)
if err != nil {
return errors.Wrap(err, "download snapshot")
}
defer resp.Body.Close()
err = os.MkdirAll(
path.Join(dbPath, "snapshot"),
0755,
)
if err != nil {
return errors.Wrap(
fmt.Errorf("failed to create extraction directory: %w", err),
"download snapshot",
)
}
tempFile, err := os.CreateTemp(
path.Join(dbPath, "snapshot"),
"snapshot.zip",
)
if err != nil {
return errors.Wrap(err, "download snapshot")
}
defer os.Remove(tempFile.Name())
defer tempFile.Close()
hasher := sha256.New()
writer := io.MultiWriter(tempFile, hasher)
_, err = io.Copy(writer, resp.Body)
if err != nil {
return errors.Wrap(err, "download snapshot")
}
actualHash := hex.EncodeToString(hasher.Sum(nil))
if actualHash != expectedHash {
return errors.Wrap(
fmt.Errorf(
"hash mismatch: expected %s, got %s",
expectedHash,
actualHash,
),
"download snapshot",
)
}
zipReader, err := zip.OpenReader(tempFile.Name())
if err != nil {
return fmt.Errorf("failed to open zip file: %w", err)
}
defer zipReader.Close()
for _, file := range zipReader.File {
destPath := filepath.Join(
path.Join(dbPath, "snapshot"),
file.Name,
)
if !strings.HasPrefix(
destPath,
filepath.Clean(path.Join(dbPath, "snapshot"))+string(os.PathSeparator),
) {
return errors.Wrap(
fmt.Errorf("invalid file path in zip: %s", file.Name),
"download snapshot",
)
}
if file.FileInfo().IsDir() {
os.MkdirAll(destPath, file.Mode())
continue
}
err := os.MkdirAll(filepath.Dir(destPath), 0755)
if err != nil {
return errors.Wrap(
fmt.Errorf(
"failed to create directory for file %s: %w",
file.Name,
err,
),
"download snapshot",
)
}
destFile, err := os.OpenFile(
destPath,
os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode(),
)
if err != nil {
return errors.Wrap(
fmt.Errorf("failed to create destination file %s: %w", file.Name, err),
"download snapshot",
)
}
srcFile, err := file.Open()
if err != nil {
destFile.Close()
return errors.Wrap(
fmt.Errorf("failed to open file in zip %s: %w", file.Name, err),
"download snapshot",
)
}
_, err = io.Copy(destFile, srcFile)
srcFile.Close()
destFile.Close()
if err != nil {
return errors.Wrap(
fmt.Errorf("failed to extract file %s: %w", file.Name, err),
"download snapshot",
)
}
}
return nil
}
func (e *DataClockConsensusEngine) applySnapshot(
dbPath string,
) error {
dirEntries, err := os.ReadDir(
path.Join(dbPath, "snapshot"),
)
if err != nil {
return errors.Wrap(
err,
"apply snapshot",
)
}
defer os.RemoveAll(path.Join(dbPath, "snapshot"))
snapshotDBPath := ""
for _, entry := range dirEntries {
if entry.IsDir() && strings.HasPrefix(entry.Name(), "exporter") {
snapshotDBPath = path.Join(path.Join(dbPath, "snapshot"), entry.Name())
}
}
if snapshotDBPath == "" {
return nil
}
temporaryStore := store.NewPebbleDB(&config.DBConfig{
Path: snapshotDBPath,
})
temporaryClockStore := store.NewPebbleClockStore(temporaryStore, e.logger)
max, _, err := e.clockStore.GetLatestDataClockFrame(e.filter)
if err != nil {
temporaryStore.Close()
return errors.Wrap(
err,
"apply snapshot",
)
}
key := []byte{store.CLOCK_FRAME, store.CLOCK_DATA_FRAME_DATA}
key = binary.BigEndian.AppendUint64(key, 0)
key = append(key, e.filter...)
_, _, err = temporaryClockStore.GetDataClockFrame(
e.filter,
max.FrameNumber+1,
false,
)
if err != nil {
fmt.Println("not found", max.FrameNumber+1)
temporaryStore.Close()
return errors.Wrap(
err,
"apply snapshot",
)
}
for i := max.FrameNumber + 1; true; i++ {
frame, _, err := temporaryClockStore.GetDataClockFrame(
e.filter,
i,
false,
)
if err != nil {
break
}
if err := e.handleClockFrame([]byte{}, []byte{}, frame); err != nil {
temporaryStore.Close()
return errors.Wrap(
err,
"apply snapshot",
)
}
}
temporaryStore.Close()
e.logger.Info("imported snapshot")
return nil
}

View File

@ -2,7 +2,7 @@ package data
import (
"bytes"
"slices"
"crypto/rand"
"time"
"go.uber.org/zap"
@ -57,13 +57,14 @@ func (e *DataClockConsensusEngine) runLoop() {
select {
case dataFrame := <-dataFrameCh:
if latestFrame, err = e.collect(dataFrame); err != nil {
e.logger.Error("could not collect", zap.Error(err))
}
dataFrame, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
e.logger.Info(
"current frame head",
zap.Uint64("frame_number", dataFrame.FrameNumber),
)
if !e.IsInProverTrie(e.provingKeyBytes) {
if latestFrame, err = e.collect(dataFrame); err != nil {
e.logger.Error("could not collect", zap.Error(err))
}
}
if latestFrame != nil &&
@ -91,72 +92,72 @@ func (e *DataClockConsensusEngine) runLoop() {
continue
}
e.proverTrieRequestsMx.Lock()
joinAddrs := tries.NewMinHeap[peerSeniorityItem]()
leaveAddrs := tries.NewMinHeap[peerSeniorityItem]()
for _, addr := range e.proverTrieJoinRequests {
if _, ok := (*e.peerSeniority)[addr]; !ok {
joinAddrs.Push(peerSeniorityItem{
addr: addr,
seniority: 0,
})
} else {
joinAddrs.Push((*e.peerSeniority)[addr])
}
}
for _, addr := range e.proverTrieLeaveRequests {
if _, ok := (*e.peerSeniority)[addr]; !ok {
leaveAddrs.Push(peerSeniorityItem{
addr: addr,
seniority: 0,
})
} else {
leaveAddrs.Push((*e.peerSeniority)[addr])
}
}
for _, addr := range e.proverTrieResumeRequests {
if _, ok := e.proverTriePauseRequests[addr]; ok {
delete(e.proverTriePauseRequests, addr)
}
}
// e.proverTrieRequestsMx.Lock()
// joinAddrs := tries.NewMinHeap[peerSeniorityItem]()
// leaveAddrs := tries.NewMinHeap[peerSeniorityItem]()
// for _, addr := range e.proverTrieJoinRequests {
// if _, ok := (*e.peerSeniority)[addr]; !ok {
// joinAddrs.Push(peerSeniorityItem{
// addr: addr,
// seniority: 0,
// })
// } else {
// joinAddrs.Push((*e.peerSeniority)[addr])
// }
// }
// for _, addr := range e.proverTrieLeaveRequests {
// if _, ok := (*e.peerSeniority)[addr]; !ok {
// leaveAddrs.Push(peerSeniorityItem{
// addr: addr,
// seniority: 0,
// })
// } else {
// leaveAddrs.Push((*e.peerSeniority)[addr])
// }
// }
// for _, addr := range e.proverTrieResumeRequests {
// if _, ok := e.proverTriePauseRequests[addr]; ok {
// delete(e.proverTriePauseRequests, addr)
// }
// }
joinReqs := make([]peerSeniorityItem, len(joinAddrs.All()))
copy(joinReqs, joinAddrs.All())
slices.Reverse(joinReqs)
leaveReqs := make([]peerSeniorityItem, len(leaveAddrs.All()))
copy(leaveReqs, leaveAddrs.All())
slices.Reverse(leaveReqs)
// joinReqs := make([]peerSeniorityItem, len(joinAddrs.All()))
// copy(joinReqs, joinAddrs.All())
// slices.Reverse(joinReqs)
// leaveReqs := make([]peerSeniorityItem, len(leaveAddrs.All()))
// copy(leaveReqs, leaveAddrs.All())
// slices.Reverse(leaveReqs)
e.proverTrieJoinRequests = make(map[string]string)
e.proverTrieLeaveRequests = make(map[string]string)
e.proverTrieRequestsMx.Unlock()
// e.proverTrieJoinRequests = make(map[string]string)
// e.proverTrieLeaveRequests = make(map[string]string)
// e.proverTrieRequestsMx.Unlock()
e.frameProverTriesMx.Lock()
for _, addr := range joinReqs {
rings := len(e.frameProverTries)
last := e.frameProverTries[rings-1]
set := last.FindNearestAndApproximateNeighbors(make([]byte, 32))
if len(set) == 1024 {
e.frameProverTries = append(
e.frameProverTries,
&tries.RollingFrecencyCritbitTrie{},
)
last = e.frameProverTries[rings]
}
last.Add([]byte(addr.addr), nextFrame.FrameNumber)
}
for _, addr := range leaveReqs {
for _, t := range e.frameProverTries {
if bytes.Equal(
t.FindNearest([]byte(addr.addr)).External.Key,
[]byte(addr.addr),
) {
t.Remove([]byte(addr.addr))
break
}
}
}
e.frameProverTriesMx.Unlock()
// e.frameProverTriesMx.Lock()
// for _, addr := range joinReqs {
// rings := len(e.frameProverTries)
// last := e.frameProverTries[rings-1]
// set := last.FindNearestAndApproximateNeighbors(make([]byte, 32))
// if len(set) == 1024 {
// e.frameProverTries = append(
// e.frameProverTries,
// &tries.RollingFrecencyCritbitTrie{},
// )
// last = e.frameProverTries[rings]
// }
// last.Add([]byte(addr.addr), nextFrame.FrameNumber)
// }
// for _, addr := range leaveReqs {
// for _, t := range e.frameProverTries {
// if bytes.Equal(
// t.FindNearest([]byte(addr.addr)).External.Key,
// []byte(addr.addr),
// ) {
// t.Remove([]byte(addr.addr))
// break
// }
// }
// }
// e.frameProverTriesMx.Unlock()
e.dataTimeReel.Insert(nextFrame, true)
@ -172,9 +173,15 @@ func (e *DataClockConsensusEngine) runLoop() {
panic(err)
}
if latestFrame, err = e.collect(dataFrame); err != nil {
e.logger.Error("could not collect", zap.Error(err))
continue
e.logger.Info(
"current frame head",
zap.Uint64("frame_number", dataFrame.FrameNumber),
)
if !e.IsInProverTrie(e.provingKeyBytes) {
if latestFrame, err = e.collect(dataFrame); err != nil {
e.logger.Error("could not collect", zap.Error(err))
}
}
if latestFrame == nil ||
@ -201,72 +208,72 @@ func (e *DataClockConsensusEngine) runLoop() {
continue
}
e.proverTrieRequestsMx.Lock()
joinAddrs := tries.NewMinHeap[peerSeniorityItem]()
leaveAddrs := tries.NewMinHeap[peerSeniorityItem]()
for _, addr := range e.proverTrieJoinRequests {
if _, ok := (*e.peerSeniority)[addr]; !ok {
joinAddrs.Push(peerSeniorityItem{
addr: addr,
seniority: 0,
})
} else {
joinAddrs.Push((*e.peerSeniority)[addr])
}
}
for _, addr := range e.proverTrieLeaveRequests {
if _, ok := (*e.peerSeniority)[addr]; !ok {
leaveAddrs.Push(peerSeniorityItem{
addr: addr,
seniority: 0,
})
} else {
leaveAddrs.Push((*e.peerSeniority)[addr])
}
}
for _, addr := range e.proverTrieResumeRequests {
if _, ok := e.proverTriePauseRequests[addr]; ok {
delete(e.proverTriePauseRequests, addr)
}
}
// e.proverTrieRequestsMx.Lock()
// joinAddrs := tries.NewMinHeap[peerSeniorityItem]()
// leaveAddrs := tries.NewMinHeap[peerSeniorityItem]()
// for _, addr := range e.proverTrieJoinRequests {
// if _, ok := (*e.peerSeniority)[addr]; !ok {
// joinAddrs.Push(peerSeniorityItem{
// addr: addr,
// seniority: 0,
// })
// } else {
// joinAddrs.Push((*e.peerSeniority)[addr])
// }
// }
// for _, addr := range e.proverTrieLeaveRequests {
// if _, ok := (*e.peerSeniority)[addr]; !ok {
// leaveAddrs.Push(peerSeniorityItem{
// addr: addr,
// seniority: 0,
// })
// } else {
// leaveAddrs.Push((*e.peerSeniority)[addr])
// }
// }
// for _, addr := range e.proverTrieResumeRequests {
// if _, ok := e.proverTriePauseRequests[addr]; ok {
// delete(e.proverTriePauseRequests, addr)
// }
// }
joinReqs := make([]peerSeniorityItem, len(joinAddrs.All()))
copy(joinReqs, joinAddrs.All())
slices.Reverse(joinReqs)
leaveReqs := make([]peerSeniorityItem, len(leaveAddrs.All()))
copy(leaveReqs, leaveAddrs.All())
slices.Reverse(leaveReqs)
// joinReqs := make([]peerSeniorityItem, len(joinAddrs.All()))
// copy(joinReqs, joinAddrs.All())
// slices.Reverse(joinReqs)
// leaveReqs := make([]peerSeniorityItem, len(leaveAddrs.All()))
// copy(leaveReqs, leaveAddrs.All())
// slices.Reverse(leaveReqs)
e.proverTrieJoinRequests = make(map[string]string)
e.proverTrieLeaveRequests = make(map[string]string)
e.proverTrieRequestsMx.Unlock()
// e.proverTrieJoinRequests = make(map[string]string)
// e.proverTrieLeaveRequests = make(map[string]string)
// e.proverTrieRequestsMx.Unlock()
e.frameProverTriesMx.Lock()
for _, addr := range joinReqs {
rings := len(e.frameProverTries)
last := e.frameProverTries[rings-1]
set := last.FindNearestAndApproximateNeighbors(make([]byte, 32))
if len(set) == 8 {
e.frameProverTries = append(
e.frameProverTries,
&tries.RollingFrecencyCritbitTrie{},
)
last = e.frameProverTries[rings]
}
last.Add([]byte(addr.addr), nextFrame.FrameNumber)
}
for _, addr := range leaveReqs {
for _, t := range e.frameProverTries {
if bytes.Equal(
t.FindNearest([]byte(addr.addr)).External.Key,
[]byte(addr.addr),
) {
t.Remove([]byte(addr.addr))
break
}
}
}
e.frameProverTriesMx.Unlock()
// e.frameProverTriesMx.Lock()
// for _, addr := range joinReqs {
// rings := len(e.frameProverTries)
// last := e.frameProverTries[rings-1]
// set := last.FindNearestAndApproximateNeighbors(make([]byte, 32))
// if len(set) == 8 {
// e.frameProverTries = append(
// e.frameProverTries,
// &tries.RollingFrecencyCritbitTrie{},
// )
// last = e.frameProverTries[rings]
// }
// last.Add([]byte(addr.addr), nextFrame.FrameNumber)
// }
// for _, addr := range leaveReqs {
// for _, t := range e.frameProverTries {
// if bytes.Equal(
// t.FindNearest([]byte(addr.addr)).External.Key,
// []byte(addr.addr),
// ) {
// t.Remove([]byte(addr.addr))
// break
// }
// }
// }
// e.frameProverTriesMx.Unlock()
e.dataTimeReel.Insert(nextFrame, true)
@ -281,3 +288,74 @@ func (e *DataClockConsensusEngine) runLoop() {
}
}
}
func (e *DataClockConsensusEngine) rebroadcastLoop() {
if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) {
time.Sleep(120 * time.Second)
for {
_, err := e.dataTimeReel.Head()
if err != nil {
e.logger.Info("no frames to rebroadcast yet, waiting...")
time.Sleep(10 * time.Second)
continue
}
max, _, err := e.clockStore.GetLatestDataClockFrame(e.filter)
frames := []*protobufs.ClockFrame{}
sent := false
for i := uint64(1); i < max.FrameNumber; i++ {
if e.state == consensus.EngineStateStopped ||
e.state == consensus.EngineStateStopping {
e.logger.Info("shutting down rebroadcaster")
return
}
frame, _, err := e.clockStore.GetDataClockFrame(e.filter, i, false)
if err != nil {
frames = []*protobufs.ClockFrame{}
e.logger.Error("error while iterating", zap.Error(err))
break
}
if frame == nil {
frames = []*protobufs.ClockFrame{}
e.logger.Error("too far ahead", zap.Error(err))
break
}
frames = append(frames, frame)
if i%50 == 0 {
e.logger.Info(
"rebroadcasting frames",
zap.Uint64("from", frames[0].FrameNumber),
zap.Uint64("to", frames[len(frames)-1].FrameNumber),
)
e.publishMessage(e.filter, &protobufs.FrameRebroadcast{
From: frames[0].FrameNumber,
To: frames[len(frames)-1].FrameNumber,
ClockFrames: frames,
})
time.Sleep(60 * time.Second)
sent = true
frames = []*protobufs.ClockFrame{}
}
}
if !sent && len(frames) != 0 {
e.logger.Info(
"rebroadcasting frames",
zap.Uint64("from", frames[0].FrameNumber),
zap.Uint64("to", frames[len(frames)-1].FrameNumber),
)
b := make([]byte, 24)
rand.Read(b)
e.publishMessage(e.filter, &protobufs.FrameRebroadcast{
From: frames[0].FrameNumber,
To: frames[len(frames)-1].FrameNumber,
ClockFrames: frames,
Random: b,
})
time.Sleep(60 * time.Second)
}
}
}
}

View File

@ -25,15 +25,11 @@ func (e *DataClockConsensusEngine) runMessageHandler() {
msg := &protobufs.Message{}
if err := proto.Unmarshal(message.Data, msg); err != nil {
e.logger.Debug("bad message")
continue
}
e.peerMapMx.RLock()
peer, ok := e.peerMap[string(message.From)]
e.peerMapMx.RUnlock()
if ok && bytes.Compare(peer.version, config.GetMinimumVersion()) >= 0 &&
e.frameProverTries[0].Contains(e.provingKeyAddress) &&
if e.frameProverTries[0].Contains(e.provingKeyAddress) &&
e.syncingStatus == SyncStatusNotSyncing {
for name := range e.executionEngines {
name := name
@ -89,9 +85,27 @@ func (e *DataClockConsensusEngine) runMessageHandler() {
e.logger.Error("error while unmarshaling", zap.Error(err))
continue
}
e.logger.Debug("message type", zap.String("type", any.TypeUrl))
go func() {
switch any.TypeUrl {
case protobufs.FrameRebroadcastType:
if err := e.handleRebroadcast(
message.From,
msg.Address,
any,
); err != nil {
return
}
case protobufs.ClockFrameType:
if err := e.handleClockFrameData(
message.From,
msg.Address,
any,
false,
); err != nil {
return
}
case protobufs.DataPeerListAnnounceType:
if err := e.handleDataPeerListAnnounce(
message.From,
@ -100,47 +114,177 @@ func (e *DataClockConsensusEngine) runMessageHandler() {
); err != nil {
return
}
case protobufs.AnnounceProverJoinType:
if err := e.handleDataAnnounceProverJoin(
message.From,
msg.Address,
any,
); err != nil {
return
}
case protobufs.AnnounceProverLeaveType:
if !e.IsInProverTrie(peer.peerId) {
return
}
if err := e.handleDataAnnounceProverLeave(
message.From,
msg.Address,
any,
); err != nil {
return
}
// case protobufs.AnnounceProverJoinType:
// if err := e.handleDataAnnounceProverJoin(
// message.From,
// msg.Address,
// any,
// ); err != nil {
// return
// }
// case protobufs.AnnounceProverLeaveType:
// if !e.IsInProverTrie(peer.peerId) {
// return
// }
// if err := e.handleDataAnnounceProverLeave(
// message.From,
// msg.Address,
// any,
// ); err != nil {
// return
// }
case protobufs.AnnounceProverPauseType:
if err := e.handleDataAnnounceProverPause(
message.From,
msg.Address,
any,
); err != nil {
return
}
// stop spamming
e.pubSub.AddPeerScore(message.From, -1000)
// if err := e.handleDataAnnounceProverPause(
// message.From,
// msg.Address,
// any,
// ); err != nil {
// return
// }
case protobufs.AnnounceProverResumeType:
if err := e.handleDataAnnounceProverResume(
message.From,
msg.Address,
any,
); err != nil {
return
}
// stop spamming
e.pubSub.AddPeerScore(message.From, -1000)
// if err := e.handleDataAnnounceProverResume(
// message.From,
// msg.Address,
// any,
// ); err != nil {
// return
// }
}
}()
}
}
}
func (e *DataClockConsensusEngine) handleRebroadcast(
peerID []byte,
address []byte,
any *anypb.Any,
) error {
if bytes.Equal(peerID, e.pubSub.GetPeerID()) {
return nil
}
frames := &protobufs.FrameRebroadcast{}
if err := any.UnmarshalTo(frames); err != nil {
return errors.Wrap(err, "handle clock frame data")
}
head, err := e.dataTimeReel.Head()
if err != nil {
return nil
}
e.logger.Debug(
"received rebroadcast",
zap.Uint64("from", frames.From),
zap.Uint64("to", frames.To),
)
if head.FrameNumber+1 < frames.From {
return nil
}
if head.FrameNumber > frames.To {
return nil
}
for _, frame := range frames.ClockFrames {
if head.FrameNumber >= frame.FrameNumber {
continue
}
e.logger.Info("receiving synchronization data")
if err := e.handleClockFrame(peerID, address, frame); err != nil {
// if they're sending invalid clock frames, nuke them.
e.pubSub.AddPeerScore(peerID, -100000)
return errors.Wrap(err, "handle rebroadcast")
}
}
return nil
}
func (e *DataClockConsensusEngine) handleClockFrame(
peerID []byte,
address []byte,
frame *protobufs.ClockFrame,
) error {
if frame == nil {
return errors.Wrap(errors.New("frame is nil"), "handle clock frame")
}
addr, err := poseidon.HashBytes(
frame.GetPublicKeySignatureEd448().PublicKey.KeyValue,
)
if err != nil {
return errors.Wrap(err, "handle clock frame data")
}
trie := e.GetFrameProverTries()[0]
if !trie.Contains(addr.Bytes()) {
e.logger.Debug(
"prover not in trie at frame, address may be in fork",
zap.Binary("address", address),
zap.Binary("filter", frame.Filter),
zap.Uint64("frame_number", frame.FrameNumber),
)
return nil
}
e.logger.Debug(
"got clock frame",
zap.Binary("address", address),
zap.Binary("filter", frame.Filter),
zap.Uint64("frame_number", frame.FrameNumber),
zap.Int("proof_count", len(frame.AggregateProofs)),
)
if err := e.frameProver.VerifyDataClockFrame(frame); err != nil {
e.logger.Debug("could not verify clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame data")
}
e.logger.Debug(
"clock frame was valid",
zap.Binary("address", address),
zap.Binary("filter", frame.Filter),
zap.Uint64("frame_number", frame.FrameNumber),
)
head, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
}
if frame.FrameNumber > head.FrameNumber {
e.dataTimeReel.Insert(frame, false)
}
return nil
}
func (e *DataClockConsensusEngine) handleClockFrameData(
peerID []byte,
address []byte,
any *anypb.Any,
isSync bool,
) error {
if bytes.Equal(peerID, e.pubSub.GetPeerID()) {
return nil
}
frame := &protobufs.ClockFrame{}
if err := any.UnmarshalTo(frame); err != nil {
return errors.Wrap(err, "handle clock frame data")
}
return e.handleClockFrame(peerID, address, frame)
}
func (e *DataClockConsensusEngine) handleDataPeerListAnnounce(
peerID []byte,
address []byte,
@ -203,7 +347,7 @@ func (e *DataClockConsensusEngine) handleDataPeerListAnnounce(
"peer provided outdated version, penalizing app score",
zap.Binary("peer_id", p.PeerId),
)
e.pubSub.SetPeerScore(p.PeerId, -10000)
e.pubSub.SetPeerScore(p.PeerId, -1000000)
continue
}
}
@ -273,182 +417,182 @@ func (e *DataClockConsensusEngine) getAddressFromSignature(
return addrBI.FillBytes(make([]byte, 32)), nil
}
func (e *DataClockConsensusEngine) handleDataAnnounceProverJoin(
peerID []byte,
address []byte,
any *anypb.Any,
) error {
if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) {
announce := &protobufs.AnnounceProverJoin{}
if err := any.UnmarshalTo(announce); err != nil {
return errors.Wrap(err, "handle data announce prover join")
}
// func (e *DataClockConsensusEngine) handleDataAnnounceProverJoin(
// peerID []byte,
// address []byte,
// any *anypb.Any,
// ) error {
// if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) {
// announce := &protobufs.AnnounceProverJoin{}
// if err := any.UnmarshalTo(announce); err != nil {
// return errors.Wrap(err, "handle data announce prover join")
// }
if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil {
return errors.Wrap(
errors.New("invalid data"),
"handle data announce prover join",
)
}
// if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil {
// return errors.Wrap(
// errors.New("invalid data"),
// "handle data announce prover join",
// )
// }
address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448)
if err != nil {
return errors.Wrap(err, "handle data announce prover join")
}
// address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448)
// if err != nil {
// return errors.Wrap(err, "handle data announce prover join")
// }
msg := []byte("join")
msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber)
msg = append(msg, announce.Filter...)
if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil {
return errors.Wrap(err, "handle data announce prover join")
}
// msg := []byte("join")
// msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber)
// msg = append(msg, announce.Filter...)
// if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil {
// return errors.Wrap(err, "handle data announce prover join")
// }
e.proverTrieRequestsMx.Lock()
if len(announce.Filter) != len(e.filter) {
return errors.Wrap(
errors.New("filter width mismatch"),
"handle data announce prover join",
)
}
// e.proverTrieRequestsMx.Lock()
// if len(announce.Filter) != len(e.filter) {
// return errors.Wrap(
// errors.New("filter width mismatch"),
// "handle data announce prover join",
// )
// }
e.proverTrieJoinRequests[string(address)] = string(announce.Filter)
e.proverTrieRequestsMx.Unlock()
}
return nil
}
// e.proverTrieJoinRequests[string(address)] = string(announce.Filter)
// e.proverTrieRequestsMx.Unlock()
// }
// return nil
// }
func (e *DataClockConsensusEngine) handleDataAnnounceProverLeave(
peerID []byte,
address []byte,
any *anypb.Any,
) error {
if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) {
announce := &protobufs.AnnounceProverLeave{}
if err := any.UnmarshalTo(announce); err != nil {
return errors.Wrap(err, "handle data announce prover leave")
}
// func (e *DataClockConsensusEngine) handleDataAnnounceProverLeave(
// peerID []byte,
// address []byte,
// any *anypb.Any,
// ) error {
// if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) {
// announce := &protobufs.AnnounceProverLeave{}
// if err := any.UnmarshalTo(announce); err != nil {
// return errors.Wrap(err, "handle data announce prover leave")
// }
if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil {
return errors.Wrap(
errors.New("invalid data"),
"handle data announce prover leave",
)
}
// if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil {
// return errors.Wrap(
// errors.New("invalid data"),
// "handle data announce prover leave",
// )
// }
e.proverTrieRequestsMx.Lock()
// e.proverTrieRequestsMx.Lock()
if len(announce.Filter) != len(e.filter) {
return errors.Wrap(
errors.New("filter width mismatch"),
"handle data announce prover leave",
)
}
// if len(announce.Filter) != len(e.filter) {
// return errors.Wrap(
// errors.New("filter width mismatch"),
// "handle data announce prover leave",
// )
// }
msg := []byte("leave")
msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber)
msg = append(msg, announce.Filter...)
if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil {
return errors.Wrap(err, "handle data announce prover leave")
}
// msg := []byte("leave")
// msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber)
// msg = append(msg, announce.Filter...)
// if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil {
// return errors.Wrap(err, "handle data announce prover leave")
// }
address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448)
if err != nil {
return errors.Wrap(err, "handle data announce prover leave")
}
// address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448)
// if err != nil {
// return errors.Wrap(err, "handle data announce prover leave")
// }
e.proverTrieLeaveRequests[string(address)] = string(announce.Filter)
e.proverTrieRequestsMx.Unlock()
}
return nil
}
// e.proverTrieLeaveRequests[string(address)] = string(announce.Filter)
// e.proverTrieRequestsMx.Unlock()
// }
// return nil
// }
func (e *DataClockConsensusEngine) handleDataAnnounceProverPause(
peerID []byte,
address []byte,
any *anypb.Any,
) error {
if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) {
announce := &protobufs.AnnounceProverPause{}
if err := any.UnmarshalTo(announce); err != nil {
return errors.Wrap(err, "handle data announce prover pause")
}
// func (e *DataClockConsensusEngine) handleDataAnnounceProverPause(
// peerID []byte,
// address []byte,
// any *anypb.Any,
// ) error {
// if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) {
// announce := &protobufs.AnnounceProverPause{}
// if err := any.UnmarshalTo(announce); err != nil {
// return errors.Wrap(err, "handle data announce prover pause")
// }
if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil {
return errors.Wrap(
errors.New("invalid data"),
"handle data announce prover leave",
)
}
// if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil {
// return errors.Wrap(
// errors.New("invalid data"),
// "handle data announce prover leave",
// )
// }
e.proverTrieRequestsMx.Lock()
if len(announce.Filter) != len(e.filter) {
return errors.Wrap(
errors.New("filter width mismatch"),
"handle data announce prover pause",
)
}
// e.proverTrieRequestsMx.Lock()
// if len(announce.Filter) != len(e.filter) {
// return errors.Wrap(
// errors.New("filter width mismatch"),
// "handle data announce prover pause",
// )
// }
msg := []byte("pause")
msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber)
msg = append(msg, announce.Filter...)
if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil {
return errors.Wrap(err, "handle data announce prover pause")
}
// msg := []byte("pause")
// msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber)
// msg = append(msg, announce.Filter...)
// if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil {
// return errors.Wrap(err, "handle data announce prover pause")
// }
address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448)
if err != nil {
return errors.Wrap(err, "handle data announce prover pause")
}
// address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448)
// if err != nil {
// return errors.Wrap(err, "handle data announce prover pause")
// }
e.proverTriePauseRequests[string(address)] = string(announce.Filter)
e.proverTrieRequestsMx.Unlock()
}
return nil
}
// e.proverTriePauseRequests[string(address)] = string(announce.Filter)
// e.proverTrieRequestsMx.Unlock()
// }
// return nil
// }
func (e *DataClockConsensusEngine) handleDataAnnounceProverResume(
peerID []byte,
address []byte,
any *anypb.Any,
) error {
if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) {
announce := &protobufs.AnnounceProverResume{}
if err := any.UnmarshalTo(announce); err != nil {
return errors.Wrap(err, "handle data announce prover resume")
}
// func (e *DataClockConsensusEngine) handleDataAnnounceProverResume(
// peerID []byte,
// address []byte,
// any *anypb.Any,
// ) error {
// if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) {
// announce := &protobufs.AnnounceProverResume{}
// if err := any.UnmarshalTo(announce); err != nil {
// return errors.Wrap(err, "handle data announce prover resume")
// }
if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil {
return errors.Wrap(
errors.New("invalid data"),
"handle data announce prover resume",
)
}
// if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil {
// return errors.Wrap(
// errors.New("invalid data"),
// "handle data announce prover resume",
// )
// }
e.proverTrieRequestsMx.Lock()
if len(announce.Filter) != len(e.filter) {
return errors.Wrap(
errors.New("filter width mismatch"),
"handle data announce prover resume",
)
}
// e.proverTrieRequestsMx.Lock()
// if len(announce.Filter) != len(e.filter) {
// return errors.Wrap(
// errors.New("filter width mismatch"),
// "handle data announce prover resume",
// )
// }
address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448)
if err != nil {
return errors.Wrap(err, "handle data announce prover resume")
}
// address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448)
// if err != nil {
// return errors.Wrap(err, "handle data announce prover resume")
// }
msg := []byte("resume")
msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber)
msg = append(msg, announce.Filter...)
if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil {
return errors.Wrap(err, "handle data announce prover resume")
}
// msg := []byte("resume")
// msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber)
// msg = append(msg, announce.Filter...)
// if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil {
// return errors.Wrap(err, "handle data announce prover resume")
// }
e.proverTrieResumeRequests[string(address)] = string(announce.Filter)
e.proverTrieRequestsMx.Unlock()
}
return nil
}
// e.proverTrieResumeRequests[string(address)] = string(announce.Filter)
// e.proverTrieRequestsMx.Unlock()
// }
// return nil
// }
func (e *DataClockConsensusEngine) handleTokenRequest(
transition *protobufs.TokenRequest,

View File

@ -101,6 +101,7 @@ func (e *DataClockConsensusEngine) HandlePreMidnightMint(
) (*protobufs.PreMidnightMintResponse, error) {
addr, err := e.handleMint(t)
if err != nil {
e.logger.Error("error while handling pre-midnight mint", zap.Error(err))
return nil, err
}
@ -222,13 +223,18 @@ func (e *DataClockConsensusEngine) handleMint(
return nil, errors.Wrap(application.ErrInvalidStateTransition, "handle mint")
}
e.logger.Debug(
"got pre-midnight mint request",
zap.String("peer", peerId.String()),
)
if len(t.Proofs) >= 3 &&
len(t.Proofs) < 204 &&
bytes.Equal(
t.Proofs[0],
[]byte("pre-dusk"),
) && (!bytes.Equal(t.Proofs[1], make([]byte, 32)) ||
head.FrameNumber < 60480) && e.GetFrameProverTries()[0].Contains(
head.FrameNumber < 67000) && e.GetFrameProverTries()[0].Contains(
e.provingKeyAddress,
) {
prevInput := []byte{}
@ -250,7 +256,9 @@ func (e *DataClockConsensusEngine) handleMint(
return nil, errors.Wrap(application.ErrInvalidStateTransition, "handle mint")
}
if pre.Difficulty == 0 {
_, pr, err := e.coinStore.GetPreCoinProofsForOwner(t.Proofs[0][32:])
_, pr, err := e.coinStore.GetPreCoinProofsForOwner(
altAddr.FillBytes(make([]byte, 32)),
)
if err != nil && !errors.Is(err, store.ErrNotFound) {
return nil, errors.Wrap(application.ErrInvalidStateTransition, "handle mint")
}
@ -505,11 +513,8 @@ func (e *DataClockConsensusEngine) handleMint(
txn.Abort()
return nil, errors.Wrap(err, "handle mint")
}
e.stagedTransactionsMx.Lock()
if e.stagedTransactions == nil {
e.stagedTransactions = &protobufs.TokenRequests{}
}
e.stagedTransactions.Requests = append(e.stagedTransactions.Requests,
err = e.insertMessage(
e.filter,
&protobufs.TokenRequest{
Request: &protobufs.TokenRequest_Mint{
Mint: &protobufs.MintCoinRequest{
@ -522,8 +527,12 @@ func (e *DataClockConsensusEngine) handleMint(
},
},
},
})
e.stagedTransactionsMx.Unlock()
},
)
if err != nil {
txn.Abort()
return nil, errors.Wrap(err, "handle mint")
}
}
if len(deletes) == 1 {

View File

@ -4,6 +4,7 @@ import (
"bytes"
"context"
"encoding/binary"
"strings"
"time"
"github.com/iden3/go-iden3-crypto/poseidon"
@ -61,72 +62,110 @@ func (e *DataClockConsensusEngine) runPreMidnightProofWorker() {
panic(errors.Wrap(err, "error getting peer id"))
}
outer:
for {
frame, err := e.dataTimeReel.Head()
tries := e.GetFrameProverTries()
e.peerMapMx.RLock()
wait := false
for _, v := range e.peerMap {
if v.maxFrame-10 > frame.FrameNumber {
wait = true
}
}
e.peerMapMx.RUnlock()
if len(tries) == 0 || wait {
if len(tries) == 0 || e.pubSub.GetNetworkPeersCount() < 3 {
e.logger.Info("waiting for more peer info to appear")
time.Sleep(10 * time.Second)
continue
}
cc, err := e.pubSub.GetDirectChannel([]byte(peerId), "worker")
if err != nil {
e.logger.Info(
"could not establish direct channel, waiting...",
zap.Error(err),
)
time.Sleep(10 * time.Second)
continue
_, prfs, err := e.coinStore.GetPreCoinProofsForOwner(addr)
if err != nil && !errors.Is(err, store.ErrNotFound) {
e.logger.Error("error while fetching pre-coin proofs", zap.Error(err))
return
}
if len(prfs) != 0 {
e.logger.Info("already completed pre-midnight mint")
return
}
break
}
resume := make([]byte, 32)
cc, err := e.pubSub.GetDirectChannel([]byte(peerId), "worker")
if err != nil {
e.logger.Info(
"could not establish direct channel, waiting...",
zap.Error(err),
)
time.Sleep(10 * time.Second)
}
for {
if e.state >= consensus.EngineStateStopping || e.state == consensus.EngineStateStopped {
break
}
_, prfs, err := e.coinStore.GetPreCoinProofsForOwner(addr)
if err != nil && !errors.Is(err, store.ErrNotFound) {
e.logger.Error("error while fetching pre-coin proofs", zap.Error(err))
return
}
if len(prfs) != 0 {
e.logger.Info("already completed pre-midnight mint")
return
}
if cc == nil {
cc, err = e.pubSub.GetDirectChannel([]byte(peerId), "worker")
if err != nil {
e.logger.Info(
"could not establish direct channel, waiting...",
zap.Error(err),
)
cc = nil
time.Sleep(10 * time.Second)
continue
}
}
client := protobufs.NewDataServiceClient(cc)
status, err := client.GetPreMidnightMintStatus(
context.Background(),
&protobufs.PreMidnightMintStatusRequest{
Owner: addr,
},
grpc.MaxCallRecvMsgSize(600*1024*1024),
)
if err != nil || status == nil {
e.logger.Error(
"got error response, waiting...",
zap.Error(err),
if bytes.Equal(resume, make([]byte, 32)) {
status, err := client.GetPreMidnightMintStatus(
context.Background(),
&protobufs.PreMidnightMintStatusRequest{
Owner: addr,
},
grpc.MaxCallSendMsgSize(1*1024*1024),
grpc.MaxCallRecvMsgSize(1*1024*1024),
)
time.Sleep(10 * time.Second)
cc.Close()
continue
}
if err != nil || status == nil {
e.logger.Error(
"got error response, waiting...",
zap.Error(err),
)
time.Sleep(10 * time.Second)
cc.Close()
cc = nil
err = e.pubSub.Reconnect([]byte(peerId))
if err != nil {
e.logger.Error(
"got error response, waiting...",
zap.Error(err),
)
time.Sleep(10 * time.Second)
}
continue
}
resume := status.Address
resume = status.Address
if status.Increment != 0 {
increment = status.Increment - 1
} else if !bytes.Equal(status.Address, make([]byte, 32)) {
increment = 0
}
}
proofs := [][]byte{
[]byte("pre-dusk"),
resume,
}
if status.Increment != 0 {
increment = status.Increment - 1
}
if status.Increment == 0 && !bytes.Equal(status.Address, make([]byte, 32)) {
e.logger.Info("already completed pre-midnight mint")
cc.Close()
return
}
batchCount := 0
// the cast is important, it underflows without:
for i := int(increment); i >= 0; i-- {
@ -151,6 +190,7 @@ outer:
zap.Int("increment", i),
)
cc.Close()
cc = nil
return
}
@ -179,16 +219,40 @@ outer:
Signature: sig,
},
},
grpc.MaxCallSendMsgSize(1*1024*1024),
grpc.MaxCallRecvMsgSize(1*1024*1024),
)
if err != nil {
if strings.Contains(
err.Error(),
application.ErrInvalidStateTransition.Error(),
) && i == 0 {
resume = make([]byte, 32)
e.logger.Info("pre-midnight proofs submitted, returning")
cc.Close()
cc = nil
return
}
e.logger.Error(
"got error response, waiting...",
zap.Error(err),
)
time.Sleep(10 * time.Second)
resume = make([]byte, 32)
cc.Close()
continue outer
cc = nil
time.Sleep(10 * time.Second)
err = e.pubSub.Reconnect([]byte(peerId))
if err != nil {
e.logger.Error(
"got error response, waiting...",
zap.Error(err),
)
time.Sleep(10 * time.Second)
}
break
}
resume = resp.Address
@ -201,11 +265,15 @@ outer:
if i == 0 {
e.logger.Info("pre-midnight proofs submitted, returning")
cc.Close()
cc = nil
return
} else {
increment = uint32(i) - 1
}
break
}
}
cc.Close()
}
}

View File

@ -19,6 +19,7 @@ import (
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
@ -67,9 +68,11 @@ func (pubsub) GetNetworkInfo() *protobufs.NetworkInfoResponse {
func (p pubsub) SignMessage(msg []byte) ([]byte, error) {
return p.privkey.Sign(rand.Reader, msg, gocrypto.Hash(0))
}
func (p pubsub) GetPublicKey() []byte { return p.pubkey }
func (pubsub) GetPeerScore(peerId []byte) int64 { return 0 }
func (pubsub) SetPeerScore(peerId []byte, score int64) {}
func (p pubsub) GetPublicKey() []byte { return p.pubkey }
func (pubsub) GetPeerScore(peerId []byte) int64 { return 0 }
func (pubsub) SetPeerScore(peerId []byte, score int64) {}
func (pubsub) AddPeerScore(peerId []byte, scoreDelta int64) {}
func (pubsub) Reconnect(peerId []byte) error { return nil }
type outputs struct {
difficulty uint32
@ -128,7 +131,7 @@ func TestHandlePreMidnightMint(t *testing.T) {
peerInfoManager: nil,
peerSeniority: newFromMap(map[string]uint64{}),
messageProcessorCh: make(chan *pb.Message),
engineConfig: nil,
config: nil,
preMidnightMint: map[string]struct{}{},
}
@ -623,7 +626,23 @@ func TestHandlePreMidnightMint(t *testing.T) {
}
}
assert.Len(t, d.stagedTransactions.Requests, 1)
req := <-d.messageProcessorCh
assert.NotNil(t, req)
message := &protobufs.Message{}
err = proto.Unmarshal(req.Data, message)
assert.NoError(t, err)
appMsg := &anypb.Any{}
err = proto.Unmarshal(message.Payload, appMsg)
assert.NoError(t, err)
tr := &protobufs.TokenRequest{}
err = proto.Unmarshal(appMsg.Value, tr)
assert.NoError(t, err)
d.stagedTransactions = &protobufs.TokenRequests{
Requests: []*protobufs.TokenRequest{tr},
}
// confirm operation cannot occur twice:
d.stagedTransactions.Requests = append(
d.stagedTransactions.Requests,

View File

@ -4,7 +4,6 @@ import (
"bytes"
"encoding/hex"
"math/big"
"sort"
"sync"
lru "github.com/hashicorp/golang-lru/v2"
@ -49,12 +48,12 @@ type DataTimeReel struct {
headDistance *big.Int
lruFrames *lru.Cache[string, string]
proverTries []*tries.RollingFrecencyCritbitTrie
pending map[uint64][]*pendingFrame
incompleteForks map[uint64][]*pendingFrame
frames chan *pendingFrame
newFrameCh chan *protobufs.ClockFrame
badFrameCh chan *protobufs.ClockFrame
done chan bool
// pending map[uint64][]*pendingFrame
incompleteForks map[uint64][]*pendingFrame
frames chan *pendingFrame
newFrameCh chan *protobufs.ClockFrame
badFrameCh chan *protobufs.ClockFrame
done chan bool
}
func NewDataTimeReel(
@ -109,12 +108,12 @@ func NewDataTimeReel(
initialInclusionProof: initialInclusionProof,
initialProverKeys: initialProverKeys,
lruFrames: cache,
pending: make(map[uint64][]*pendingFrame),
incompleteForks: make(map[uint64][]*pendingFrame),
frames: make(chan *pendingFrame),
newFrameCh: make(chan *protobufs.ClockFrame),
badFrameCh: make(chan *protobufs.ClockFrame),
done: make(chan bool),
// pending: make(map[uint64][]*pendingFrame),
incompleteForks: make(map[uint64][]*pendingFrame),
frames: make(chan *pendingFrame),
newFrameCh: make(chan *protobufs.ClockFrame),
badFrameCh: make(chan *protobufs.ClockFrame),
done: make(chan bool),
}
}
@ -138,6 +137,7 @@ func (d *DataTimeReel) Start() error {
d.headDistance, err = d.GetDistance(frame)
}
d.running = true
go d.runLoop()
return nil
@ -169,11 +169,11 @@ func (d *DataTimeReel) Insert(frame *protobufs.ClockFrame, isSync bool) error {
zap.String("output_tag", hex.EncodeToString(frame.Output[:64])),
)
if d.lruFrames.Contains(string(frame.Output[:64])) {
return nil
}
// if d.lruFrames.Contains(string(frame.Output[:64])) {
// return nil
// }
d.lruFrames.Add(string(frame.Output[:64]), string(frame.ParentSelector))
// d.lruFrames.Add(string(frame.Output[:64]), string(frame.ParentSelector))
parent := new(big.Int).SetBytes(frame.ParentSelector)
selector, err := frame.GetSelector()
@ -183,16 +183,18 @@ func (d *DataTimeReel) Insert(frame *protobufs.ClockFrame, isSync bool) error {
distance, _ := d.GetDistance(frame)
d.storePending(selector, parent, distance, frame)
if d.head.FrameNumber < frame.FrameNumber {
d.storePending(selector, parent, distance, frame)
if isSync {
go func() {
d.frames <- &pendingFrame{
selector: selector,
parentSelector: parent,
frameNumber: frame.FrameNumber,
}
}()
if d.head.FrameNumber+1 == frame.FrameNumber {
go func() {
d.frames <- &pendingFrame{
selector: selector,
parentSelector: parent,
frameNumber: frame.FrameNumber,
}
}()
}
}
return nil
@ -268,6 +270,17 @@ func (d *DataTimeReel) createGenesisFrame() (
panic(err)
}
err = txn.Commit()
if err != nil {
txn.Abort()
panic(err)
}
txn, err = d.clockStore.NewTransaction()
if err != nil {
panic(err)
}
if err := d.clockStore.CommitDataClockFrame(
d.filter,
0,
@ -288,7 +301,6 @@ func (d *DataTimeReel) createGenesisFrame() (
// Main data consensus loop
func (d *DataTimeReel) runLoop() {
d.running = true
for {
select {
case frame := <-d.frames:
@ -312,11 +324,11 @@ func (d *DataTimeReel) runLoop() {
if d.head.FrameNumber < rawFrame.FrameNumber {
d.logger.Debug("frame is higher")
parent := new(big.Int).SetBytes(rawFrame.ParentSelector)
selector, err := rawFrame.GetSelector()
if err != nil {
panic(err)
}
// parent := new(big.Int).SetBytes(rawFrame.ParentSelector)
// selector, err := rawFrame.GetSelector()
// if err != nil {
// panic(err)
// }
distance, err := d.GetDistance(rawFrame)
if err != nil {
@ -324,26 +336,14 @@ func (d *DataTimeReel) runLoop() {
panic(err)
}
d.addPending(selector, parent, frame.frameNumber)
// d.addPending(selector, parent, frame.frameNumber)
d.processPending(d.head, frame)
continue
}
headSelector, err := d.head.GetSelector()
if err != nil {
panic(err)
}
// If the frame has a gap from the head or is not descendent, mark it as
// pending:
if rawFrame.FrameNumber-d.head.FrameNumber != 1 {
d.logger.Debug(
"frame has has gap, fork choice",
zap.Bool("has_gap", rawFrame.FrameNumber-d.head.FrameNumber != 1),
zap.String("parent_selector", parent.Text(16)),
zap.String("head_selector", headSelector.Text(16)),
)
d.processPending(d.head, frame)
continue
}
@ -359,58 +359,68 @@ func (d *DataTimeReel) runLoop() {
continue
}
distance, err := d.GetDistance(rawFrame)
if err != nil {
panic(err)
}
d.logger.Debug(
"frame is same height",
zap.String("head_distance", d.headDistance.Text(16)),
zap.String("distance", distance.Text(16)),
)
// temp: remove fork choice until prover ring testing
// distance, err := d.GetDistance(rawFrame)
// if err != nil {
// panic(err)
// }
// d.logger.Debug(
// "frame is same height",
// zap.String("head_distance", d.headDistance.Text(16)),
// zap.String("distance", distance.Text(16)),
// )
// Optimization: if competing frames share a parent we can short-circuit
// fork choice
if bytes.Equal(d.head.ParentSelector, rawFrame.ParentSelector) &&
distance.Cmp(d.headDistance) < 0 {
d.logger.Debug(
"frame shares parent, has shorter distance, short circuit",
)
d.totalDistance.Sub(d.totalDistance, d.headDistance)
d.setHead(rawFrame, distance)
d.processPending(d.head, frame)
continue
}
// // Optimization: if competing frames share a parent we can short-circuit
// // fork choice
// if bytes.Equal(d.head.ParentSelector, rawFrame.ParentSelector) &&
// distance.Cmp(d.headDistance) < 0 {
// d.logger.Debug(
// "frame shares parent, has shorter distance, short circuit",
// )
// d.totalDistance.Sub(d.totalDistance, d.headDistance)
// d.setHead(rawFrame, distance)
// d.processPending(d.head, frame)
// continue
// }
// Choose fork
d.forkChoice(rawFrame, distance)
// d.forkChoice(rawFrame, distance)
d.processPending(d.head, frame)
} else {
d.logger.Debug("frame is lower height")
// d.logger.Debug("frame is lower height")
existing, _, err := d.clockStore.GetDataClockFrame(
d.filter,
rawFrame.FrameNumber,
true,
)
if err != nil {
// if this returns an error it's either not found (which shouldn't
// happen without corruption) or pebble is borked, either way, panic
panic(err)
}
// existing, _, err := d.clockStore.GetDataClockFrame(
// d.filter,
// rawFrame.FrameNumber,
// true,
// )
// if err != nil {
// // if this returns an error it's either not found (which shouldn't
// // happen without corruption) or pebble is borked, either way, panic
// panic(err)
// }
// It's a fork, but it's behind. We need to stash it until it catches
// up (or dies off)
if !bytes.Equal(existing.Output, rawFrame.Output) {
d.logger.Debug("is fork, add pending")
parent, selector, err := rawFrame.GetParentAndSelector()
if err != nil {
panic(err)
}
// if !bytes.Equal(existing.Output, rawFrame.Output) {
// parent, selector, err := rawFrame.GetParentAndSelector()
// if err != nil {
// panic(err)
// }
d.addPending(selector, parent, frame.frameNumber)
d.processPending(d.head, frame)
}
// if bytes.Equal(existing.ParentSelector, rawFrame.ParentSelector) {
// ld := d.getTotalDistance(existing)
// rd := d.getTotalDistance(rawFrame)
// if rd.Cmp(ld) < 0 {
// d.forkChoice(rawFrame, rd)
// d.processPending(d.head, frame)
// } else {
// d.addPending(selector, parent, frame.frameNumber)
// d.processPending(d.head, frame)
// }
// } else {
// d.addPending(selector, parent, frame.frameNumber)
// d.processPending(d.head, frame)
// }
// }
}
case <-d.done:
return
@ -418,49 +428,49 @@ func (d *DataTimeReel) runLoop() {
}
}
func (d *DataTimeReel) addPending(
selector *big.Int,
parent *big.Int,
frameNumber uint64,
) {
d.logger.Debug(
"add pending",
zap.Uint64("head_frame_number", d.head.FrameNumber),
zap.Uint64("add_frame_number", frameNumber),
zap.String("selector", selector.Text(16)),
zap.String("parent", parent.Text(16)),
)
// func (d *DataTimeReel) addPending(
// selector *big.Int,
// parent *big.Int,
// frameNumber uint64,
// ) {
// // d.logger.Debug(
// // "add pending",
// // zap.Uint64("head_frame_number", d.head.FrameNumber),
// // zap.Uint64("add_frame_number", frameNumber),
// // zap.String("selector", selector.Text(16)),
// // zap.String("parent", parent.Text(16)),
// // )
if d.head.FrameNumber <= frameNumber {
if _, ok := d.pending[frameNumber]; !ok {
d.pending[frameNumber] = []*pendingFrame{}
}
// if d.head.FrameNumber <= frameNumber {
// if _, ok := d.pending[frameNumber]; !ok {
// d.pending[frameNumber] = []*pendingFrame{}
// }
// avoid heavy thrashing
for _, frame := range d.pending[frameNumber] {
if frame.selector.Cmp(selector) == 0 {
d.logger.Debug("exists in pending already")
return
}
}
}
// // avoid heavy thrashing
// for _, frame := range d.pending[frameNumber] {
// if frame.selector.Cmp(selector) == 0 {
// d.logger.Debug("exists in pending already")
// return
// }
// }
// }
if d.head.FrameNumber <= frameNumber {
d.logger.Debug(
"accumulate in pending",
zap.Int("pending_neighbors", len(d.pending[frameNumber])),
)
// if d.head.FrameNumber <= frameNumber {
// // d.logger.Debug(
// // "accumulate in pending",
// // zap.Int("pending_neighbors", len(d.pending[frameNumber])),
// // )
d.pending[frameNumber] = append(
d.pending[frameNumber],
&pendingFrame{
selector: selector,
parentSelector: parent,
frameNumber: frameNumber,
},
)
}
}
// d.pending[frameNumber] = append(
// d.pending[frameNumber],
// &pendingFrame{
// selector: selector,
// parentSelector: parent,
// frameNumber: frameNumber,
// },
// )
// }
// }
func (d *DataTimeReel) storePending(
selector *big.Int,
@ -506,62 +516,69 @@ func (d *DataTimeReel) processPending(
frame *protobufs.ClockFrame,
lastReceived *pendingFrame,
) {
d.logger.Debug(
"process pending",
zap.Int("pending_frame_numbers", len(d.pending)),
)
frameNumbers := []uint64{}
for f := range d.pending {
frameNumbers = append(frameNumbers, f)
d.logger.Debug(
"pending per frame number",
zap.Uint64("pending_frame_number", f),
zap.Int("pending_frames", len(d.pending[f])),
)
}
sort.Slice(frameNumbers, func(i, j int) bool {
return frameNumbers[i] > frameNumbers[j]
})
// d.logger.Debug(
// "process pending",
// zap.Uint64("head_frame", frame.FrameNumber),
// zap.Uint64("last_received_frame", lastReceived.frameNumber),
// zap.Int("pending_frame_numbers", len(d.pending)),
// )
lastSelector := lastReceived.selector
for _, f := range frameNumbers {
if f < d.head.FrameNumber {
delete(d.pending, f)
for {
next := d.head.FrameNumber + 1
sel, err := d.head.GetSelector()
if err != nil {
panic(err)
}
nextPending := d.pending[f]
d.logger.Debug(
"checking frame set",
zap.Uint64("pending_frame_number", f),
zap.Uint64("frame_number", frame.FrameNumber),
)
if f < frame.FrameNumber {
d.logger.Debug(
"purging frame set",
zap.Uint64("pending_frame_number", f),
zap.Uint64("frame_number", frame.FrameNumber),
)
delete(d.pending, f)
continue
}
selector := sel.FillBytes(make([]byte, 32))
// d.logger.Debug(
// "checking frame set",
// zap.Uint64("pending_frame_number", f),
// zap.Uint64("frame_number", frame.FrameNumber),
// )
// Pull the next
for len(nextPending) != 0 {
d.logger.Debug("try process next")
next := nextPending[0]
d.pending[f] = d.pending[f][1:]
if f == lastReceived.frameNumber && next.selector.Cmp(lastSelector) == 0 {
d.pending[f] = append(d.pending[f], next)
if len(d.pending[f]) == 1 {
nextPending = nil
}
d.logger.Debug("try process next")
//// todo: revise for prover rings
rawFrames, err := d.clockStore.GetStagedDataClockFramesForFrameNumber(
d.filter,
next,
)
if err != nil {
panic(err)
}
found := false
for _, rawFrame := range rawFrames {
if !bytes.Equal(rawFrame.ParentSelector, selector) {
continue
}
go func() {
d.frames <- next
}()
return
d.logger.Debug(
"processing frame",
zap.Uint64("frame_number", rawFrame.FrameNumber),
zap.String("output_tag", hex.EncodeToString(rawFrame.Output[:64])),
zap.Uint64("head_number", d.head.FrameNumber),
zap.String("head_output_tag", hex.EncodeToString(d.head.Output[:64])),
)
distance, err := d.GetDistance(rawFrame)
if err != nil {
if !errors.Is(err, store.ErrNotFound) {
panic(err)
}
continue
}
// Otherwise set it as the next and process all pending
d.setHead(rawFrame, distance)
found = true
break
}
if !found {
break
}
}
}
@ -715,7 +732,7 @@ func (d *DataTimeReel) forkChoice(
zap.Uint64("head_number", d.head.FrameNumber),
zap.String("head_output_tag", hex.EncodeToString(d.head.Output[:64])),
)
parentSelector, selector, err := frame.GetParentAndSelector()
_, selector, err := frame.GetParentAndSelector()
if err != nil {
panic(err)
}
@ -748,7 +765,7 @@ func (d *DataTimeReel) forkChoice(
if err != nil {
// If lineage cannot be verified, set it for later
if errors.Is(err, store.ErrNotFound) {
d.addPending(selector, parentSelector, frame.FrameNumber)
// d.addPending(selector, parentSelector, frame.FrameNumber)
return
} else {
panic(err)
@ -811,7 +828,7 @@ func (d *DataTimeReel) forkChoice(
if err != nil {
// If lineage cannot be verified, set it for later
if errors.Is(err, store.ErrNotFound) {
d.addPending(selector, parentSelector, frame.FrameNumber)
// d.addPending(selector, parentSelector, frame.FrameNumber)
return
} else {
panic(err)
@ -845,7 +862,7 @@ func (d *DataTimeReel) forkChoice(
zap.String("right_total", rightTotal.Text(16)),
zap.String("left_total", overweight.Text(16)),
)
d.addPending(selector, parentSelector, frame.FrameNumber)
// d.addPending(selector, parentSelector, frame.FrameNumber)
return
}

View File

@ -3,6 +3,7 @@ package time_test
import (
"bytes"
"fmt"
"math/rand"
"strings"
"sync"
"testing"
@ -139,38 +140,6 @@ func TestDataTimeReel(t *testing.T) {
frame, err := m.Head()
assert.NoError(t, err)
frames := []*protobufs.ClockFrame{}
wg := sync.WaitGroup{}
wg.Add(1)
frameCh := m.NewFrameCh()
go func() {
for i := 0; i < 40; i++ {
frames = append(frames, <-frameCh)
}
wg.Done()
}()
// in order
for i := int64(0); i < 40; i++ {
frame, err = prover.ProveMasterClockFrame(
frame,
i+1,
10,
[]*protobufs.InclusionAggregateProof{},
)
assert.NoError(t, err)
err := m.Insert(frame, false)
assert.NoError(t, err)
}
wg.Wait()
for i := 0; i < 40; i++ {
assert.NotNil(t, frames[i])
assert.Equal(t, frames[i].FrameNumber, uint64(i+1))
}
filterBytes := []byte{
0x01, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
@ -201,7 +170,7 @@ func TestDataTimeReel(t *testing.T) {
},
prover,
func(txn store.Transaction, frame *protobufs.ClockFrame) error { return nil },
frames[0].Output,
bytes.Repeat([]byte{0x00}, 516),
&qcrypto.InclusionAggregateProof{
InclusionCommitments: []*qcrypto.InclusionCommitment{},
AggregateCommitment: []byte{},
@ -220,29 +189,25 @@ func TestDataTimeReel(t *testing.T) {
datawg := sync.WaitGroup{}
datawg.Add(1)
dataFrameCh := d.NewFrameCh()
targetFrameParentSelector := []byte{}
go func() {
loop:
for {
frame := <-dataFrameCh
dataFrames = append(dataFrames, frame)
if frame.FrameNumber == 40 && bytes.Equal(
frame.ParentSelector,
targetFrameParentSelector,
) {
break
select {
case frame := <-dataFrameCh:
dataFrames = append(dataFrames, frame)
if frame.FrameNumber == 500 {
break loop
}
}
}
datawg.Done()
}()
prev := make([]byte, 32)
// 1. z-dist optimal proof submission is strictly master-frame evoked leader
for i := int64(0); i < 10; i++ {
masterSelector, err := frames[i].GetSelector()
assert.NoError(t, err)
for i := int64(0); i < 100; i++ {
proverSelection := proverTrie.FindNearest(
masterSelector.FillBytes(make([]byte, 32)),
prev,
)
optimalSigner, _ := keyManager.GetSigningKey(
addrMap[string(proverSelection.External.Key)],
@ -256,18 +221,17 @@ func TestDataTimeReel(t *testing.T) {
10,
)
d.Insert(frame, false)
prevBI, _ := frame.GetSelector()
prev = prevBI.FillBytes(make([]byte, 32))
}
// 2. z-dist optimal, out of order proof submission is strictly master-frame
// evoked leader, but arrived completely backwards
insertFrames := []*protobufs.ClockFrame{}
for i := int64(10); i < 20; i++ {
masterSelector, err := frames[i].GetSelector()
assert.NoError(t, err)
for i := int64(100); i < 200; i++ {
proverSelection := proverTrie.FindNearest(
masterSelector.FillBytes(make([]byte, 32)),
prev,
)
optimalSigner, _ := keyManager.GetSigningKey(
addrMap[string(proverSelection.External.Key)],
@ -281,9 +245,12 @@ func TestDataTimeReel(t *testing.T) {
10,
)
insertFrames = append(insertFrames, frame)
prevBI, _ := frame.GetSelector()
prev = prevBI.FillBytes(make([]byte, 32))
}
for i := 9; i >= 0; i-- {
for i := 99; i >= 0; i-- {
err := d.Insert(insertFrames[i], false)
assert.NoError(t, err)
}
@ -291,12 +258,9 @@ func TestDataTimeReel(t *testing.T) {
// 3. 90% optimal, out of order
insertFrames = []*protobufs.ClockFrame{}
for i := int64(20); i < 25; i++ {
masterSelector, err := frames[i].GetSelector()
assert.NoError(t, err)
for i := int64(200); i < 300; i++ {
proverSelection := proverTrie.FindNearest(
masterSelector.FillBytes(make([]byte, 32)),
prev,
)
optimalSigner, _ := keyManager.GetSigningKey(
addrMap[string(proverSelection.External.Key)],
@ -310,13 +274,13 @@ func TestDataTimeReel(t *testing.T) {
10,
)
d.Insert(frame, false)
prevBI, _ := frame.GetSelector()
prev = prevBI.FillBytes(make([]byte, 32))
}
masterSelector, err := frames[25].GetSelector()
assert.NoError(t, err)
proverSelections := proverTrie.FindNearestAndApproximateNeighbors(
masterSelector.FillBytes(make([]byte, 32)),
prev,
)
suboptimalSigner2, _ := keyManager.GetSigningKey(
addrMap[string(proverSelections[2].External.Key)],
@ -328,17 +292,17 @@ func TestDataTimeReel(t *testing.T) {
[][]byte{},
[]*protobufs.InclusionAggregateProof{},
suboptimalSigner2,
26,
301,
10,
)
insertFrames = append(insertFrames, frame)
for i := int64(26); i < 30; i++ {
masterSelector, err := frames[i].GetSelector()
assert.NoError(t, err)
prevBI, _ := frame.GetSelector()
prev = prevBI.FillBytes(make([]byte, 32))
for i := int64(301); i < 400; i++ {
proverSelection := proverTrie.FindNearest(
masterSelector.FillBytes(make([]byte, 32)),
prev,
)
optimalSigner, _ := keyManager.GetSigningKey(
addrMap[string(proverSelection.External.Key)],
@ -352,9 +316,11 @@ func TestDataTimeReel(t *testing.T) {
10,
)
insertFrames = append(insertFrames, frame)
prevBI, _ := frame.GetSelector()
prev = prevBI.FillBytes(make([]byte, 32))
}
for i := 4; i >= 0; i-- {
for i := 99; i >= 0; i-- {
err := d.Insert(insertFrames[i], false)
assert.NoError(t, err)
}
@ -365,12 +331,9 @@ func TestDataTimeReel(t *testing.T) {
conflictFrames := []*protobufs.ClockFrame{}
optimalKeySet := [][]byte{}
suppressedFrame := frame
for i := int64(30); i < 40; i++ {
masterSelector, err := frames[i].GetSelector()
assert.NoError(t, err)
for i := int64(400); i < 500; i++ {
proverSelections := proverTrie.FindNearestAndApproximateNeighbors(
masterSelector.FillBytes(make([]byte, 32)),
prev,
)
optimalSigner, _ := keyManager.GetSigningKey(
addrMap[string(proverSelections[0].External.Key)],
@ -392,10 +355,10 @@ func TestDataTimeReel(t *testing.T) {
i+1,
10,
)
prevBI, _ := suppressedFrame.GetSelector()
prev = prevBI.FillBytes(make([]byte, 32))
insertFrames = append(insertFrames, suppressedFrame)
if i == 39 {
targetFrameParentSelector = suppressedFrame.ParentSelector
}
frame, err = prover.ProveDataClockFrame(
frame,
[][]byte{},
@ -407,15 +370,20 @@ func TestDataTimeReel(t *testing.T) {
conflictFrames = append(conflictFrames, frame)
}
for i := 9; i >= 0; i-- {
err := d.Insert(conflictFrames[i], false)
// force linear ordering
gotime.Sleep(1 * gotime.Second)
assert.NoError(t, err)
}
rand.Shuffle(100, func(i, j int) {
insertFrames[i], insertFrames[j] = insertFrames[j], insertFrames[i]
})
// todo: restore with prover rings
// for i := 99; i >= 0; i-- {
// err := d.Insert(conflictFrames[i], false)
// // force linear ordering
// gotime.Sleep(1 * gotime.Second)
// assert.NoError(t, err)
// }
// Someone is honest, but running backwards:
for i := 9; i >= 0; i-- {
for i := 99; i >= 0; i-- {
err := d.Insert(insertFrames[i], false)
gotime.Sleep(1 * gotime.Second)
assert.NoError(t, err)
@ -423,7 +391,7 @@ func TestDataTimeReel(t *testing.T) {
datawg.Wait()
assert.Equal(t, uint64(40), dataFrames[len(dataFrames)-1].FrameNumber)
assert.Equal(t, uint64(500), dataFrames[len(dataFrames)-1].FrameNumber)
assert.Equal(
t,
optimalKeySet[len(optimalKeySet)-1],

View File

@ -115,7 +115,7 @@ func (a *TokenApplication) handleMint(
},
}
return outputs, nil
} else if len(t.Proofs) != 3 && currentFrameNumber > 60480 {
} else if len(t.Proofs) != 3 && currentFrameNumber > 77000 {
if _, touched := lockMap[string(t.Signature.PublicKey.KeyValue)]; touched {
return nil, errors.Wrap(ErrInvalidStateTransition, "handle mint")
}

View File

@ -3,7 +3,6 @@ package token
import (
"bytes"
"crypto"
"encoding/binary"
"encoding/hex"
"strings"
"sync"
@ -162,7 +161,7 @@ func NewTokenExecutionEngine(
)
e.clock = data.NewDataClockConsensusEngine(
cfg.Engine,
cfg,
logger,
keyManager,
clockStore,
@ -316,26 +315,26 @@ func NewTokenExecutionEngine(
}
if err == nil {
msg := []byte("resume")
msg = binary.BigEndian.AppendUint64(msg, f.FrameNumber)
msg = append(msg, e.intrinsicFilter...)
sig, err := e.pubSub.SignMessage(msg)
if err != nil {
panic(err)
}
// msg := []byte("resume")
// msg = binary.BigEndian.AppendUint64(msg, f.FrameNumber)
// msg = append(msg, e.intrinsicFilter...)
// sig, err := e.pubSub.SignMessage(msg)
// if err != nil {
// panic(err)
// }
// need to wait for peering
gotime.Sleep(30 * gotime.Second)
e.publishMessage(e.intrinsicFilter, &protobufs.AnnounceProverResume{
Filter: e.intrinsicFilter,
FrameNumber: f.FrameNumber,
PublicKeySignatureEd448: &protobufs.Ed448Signature{
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: e.pubSub.GetPublicKey(),
},
Signature: sig,
},
})
// // need to wait for peering
// gotime.Sleep(30 * gotime.Second)
// e.publishMessage(e.intrinsicFilter, &protobufs.AnnounceProverResume{
// Filter: e.intrinsicFilter,
// FrameNumber: f.FrameNumber,
// PublicKeySignatureEd448: &protobufs.Ed448Signature{
// PublicKey: &protobufs.Ed448PublicKey{
// KeyValue: e.pubSub.GetPublicKey(),
// },
// Signature: sig,
// },
// })
}
}

View File

@ -11,6 +11,8 @@ import (
"io/fs"
"log"
"math/big"
"net/http"
npprof "net/http/pprof"
"os"
"os/exec"
"os/signal"
@ -77,6 +79,11 @@ var (
"",
"write memory profile after 20m to this file",
)
pprofServer = flag.String(
"pprof-server",
"",
"enable pprof server on specified address (e.g. localhost:6060)",
)
nodeInfo = flag.Bool(
"node-info",
false,
@ -195,7 +202,7 @@ func main() {
count++
}
if count < len(config.Signatories)/2+len(config.Signatories)%2 {
if count < ((len(config.Signatories)-4)/2)+((len(config.Signatories)-4)%2) {
fmt.Printf("Quorum on signatures not met")
os.Exit(1)
}
@ -225,10 +232,23 @@ func main() {
if err != nil {
log.Fatal(err)
}
defer f.Close()
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
if *pprofServer != "" && *core == 0 {
go func() {
mux := http.NewServeMux()
mux.HandleFunc("/debug/pprof/", npprof.Index)
mux.HandleFunc("/debug/pprof/cmdline", npprof.Cmdline)
mux.HandleFunc("/debug/pprof/profile", npprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", npprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", npprof.Trace)
log.Fatal(http.ListenAndServe(*pprofServer, mux))
}()
}
if *balance {
config, err := config.LoadConfig(*configDirectory, "", false)
if err != nil {
@ -332,7 +352,7 @@ func main() {
}
if *core != 0 {
runtime.GOMAXPROCS(2)
// runtime.GOMAXPROCS(2)
rdebug.SetGCPercent(9999)
if nodeConfig.Engine.DataWorkerMemoryLimit == 0 {
@ -429,7 +449,7 @@ func main() {
return
}
runtime.GOMAXPROCS(1)
// runtime.GOMAXPROCS(1)
if nodeConfig.ListenGRPCMultiaddr != "" {
srv, err := rpc.NewRPCServer(
@ -661,7 +681,8 @@ func printNodeInfo(cfg *config.Config) {
conn, err := app.ConnectToNode(cfg)
if err != nil {
panic(err)
fmt.Println("Could not connect to node. If it is still booting, please wait.")
os.Exit(1)
}
defer conn.Close()

View File

@ -31,6 +31,7 @@ import (
rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
"github.com/libp2p/go-libp2p/p2p/net/connmgr"
"github.com/libp2p/go-libp2p/p2p/net/swarm"
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
"github.com/mr-tron/base58"
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
@ -70,7 +71,7 @@ var BITMASK_ALL = []byte{
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
}
var ANNOUNCE_PREFIX = "quilibrium-2.0.0-dusk-"
var ANNOUNCE_PREFIX = "quilibrium-2.0.2-dusk-"
func getPeerID(p2pConfig *config.P2PConfig) peer.ID {
peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey)
@ -92,6 +93,80 @@ func getPeerID(p2pConfig *config.P2PConfig) peer.ID {
return id
}
func NewBlossomSubStreamer(
p2pConfig *config.P2PConfig,
logger *zap.Logger,
) *BlossomSub {
ctx := context.Background()
opts := []libp2pconfig.Option{
libp2p.ListenAddrStrings(p2pConfig.ListenMultiaddr),
}
bootstrappers := []peer.AddrInfo{}
peerinfo, err := peer.AddrInfoFromString("/ip4/185.209.178.191/udp/8336/quic-v1/p2p/QmcKQjpQmLpbDsiif2MuakhHFyxWvqYauPsJDaXnLav7PJ")
if err != nil {
panic(err)
}
bootstrappers = append(bootstrappers, *peerinfo)
var privKey crypto.PrivKey
if p2pConfig.PeerPrivKey != "" {
peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
}
privKey, err = crypto.UnmarshalEd448PrivateKey(peerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
}
opts = append(opts, libp2p.Identity(privKey))
}
bs := &BlossomSub{
ctx: ctx,
logger: logger,
bitmaskMap: make(map[string]*blossomsub.Bitmask),
signKey: privKey,
peerScore: make(map[string]int64),
isBootstrapPeer: false,
network: p2pConfig.Network,
}
h, err := libp2p.New(opts...)
if err != nil {
panic(errors.Wrap(err, "error constructing p2p"))
}
logger.Info("established peer id", zap.String("peer_id", h.ID().String()))
kademliaDHT := initDHT(
ctx,
p2pConfig,
logger,
h,
false,
bootstrappers,
)
routingDiscovery := routing.NewRoutingDiscovery(kademliaDHT)
util.Advertise(ctx, routingDiscovery, getNetworkNamespace(p2pConfig.Network))
if err != nil {
panic(err)
}
peerID := h.ID()
bs.peerID = peerID
bs.h = h
bs.signKey = privKey
return bs
}
func NewBlossomSub(
p2pConfig *config.P2PConfig,
logger *zap.Logger,
@ -163,6 +238,23 @@ func NewBlossomSub(
opts = append(opts, libp2p.Identity(privKey))
}
allowedPeers := []peer.AddrInfo{}
allowedPeers = append(allowedPeers, bootstrappers...)
directPeers := []peer.AddrInfo{}
if len(p2pConfig.DirectPeers) > 0 {
logger.Info("found direct peers in config")
for _, peerAddr := range p2pConfig.DirectPeers {
peerinfo, err := peer.AddrInfoFromString(peerAddr)
if err != nil {
panic(err)
}
logger.Info("adding direct peer", zap.String("peer", peerinfo.ID.String()))
directPeers = append(directPeers, *peerinfo)
}
}
allowedPeers = append(allowedPeers, directPeers...)
if p2pConfig.LowWatermarkConnections != 0 &&
p2pConfig.HighWatermarkConnections != 0 {
cm, err := connmgr.NewConnManager(
@ -176,7 +268,7 @@ func NewBlossomSub(
rm, err := resourceManager(
p2pConfig.HighWatermarkConnections,
bootstrappers,
allowedPeers,
)
if err != nil {
panic(err)
@ -223,7 +315,9 @@ func NewBlossomSub(
verifyReachability(p2pConfig)
discoverPeers(p2pConfig, ctx, logger, h, routingDiscovery)
discoverPeers(p2pConfig, ctx, logger, h, routingDiscovery, true)
go monitorPeers(ctx, logger, h)
// TODO: turn into an option flag for console logging, this is too noisy for
// default logging behavior
@ -244,6 +338,10 @@ func NewBlossomSub(
blossomsub.WithStrictSignatureVerification(true),
}
if len(directPeers) > 0 {
blossomOpts = append(blossomOpts, blossomsub.WithDirectPeers(directPeers))
}
if tracer != nil {
blossomOpts = append(blossomOpts, blossomsub.WithEventTracer(tracer))
}
@ -286,12 +384,35 @@ func NewBlossomSub(
bs.h = h
bs.signKey = privKey
allowedPeerIDs := make(map[peer.ID]struct{}, len(allowedPeers))
for _, peerInfo := range allowedPeers {
allowedPeerIDs[peerInfo.ID] = struct{}{}
}
go func() {
for {
time.Sleep(30 * time.Second)
for _, b := range bs.bitmaskMap {
bitmaskPeers := b.ListPeers()
peerCount := len(bitmaskPeers)
for _, p := range bitmaskPeers {
if _, ok := allowedPeerIDs[p]; ok {
peerCount--
}
}
if peerCount < 4 {
discoverPeers(p2pConfig, bs.ctx, logger, bs.h, routingDiscovery, false)
break
}
}
}
}()
return bs
}
// adjusted from Lotus' reference implementation, addressing
// https://github.com/libp2p/go-libp2p/issues/1640
func resourceManager(highWatermark uint, bootstrappers []peer.AddrInfo) (
func resourceManager(highWatermark uint, allowed []peer.AddrInfo) (
network.ResourceManager,
error,
) {
@ -358,18 +479,18 @@ func resourceManager(highWatermark uint, bootstrappers []peer.AddrInfo) (
)
resolver := madns.DefaultResolver
var bootstrapperMaddrs []ma.Multiaddr
for _, pi := range bootstrappers {
var allowedMaddrs []ma.Multiaddr
for _, pi := range allowed {
for _, addr := range pi.Addrs {
resolved, err := resolver.Resolve(context.Background(), addr)
if err != nil {
continue
}
bootstrapperMaddrs = append(bootstrapperMaddrs, resolved...)
allowedMaddrs = append(allowedMaddrs, resolved...)
}
}
opts = append(opts, rcmgr.WithAllowlistedMultiaddrs(bootstrapperMaddrs))
opts = append(opts, rcmgr.WithAllowlistedMultiaddrs(allowedMaddrs))
mgr, err := rcmgr.NewResourceManager(limiter, opts...)
if err != nil {
@ -477,6 +598,79 @@ func (b *BlossomSub) GetRandomPeer(bitmask []byte) ([]byte, error) {
return []byte(peers[sel.Int64()]), nil
}
// monitorPeers periodically looks up the peers connected to the host and pings them
// up to 3 times to ensure they are still reachable. If the peer is not reachable after
// 3 attempts, the connections to the peer are closed.
func monitorPeers(ctx context.Context, logger *zap.Logger, h host.Host) {
const timeout, period, attempts = time.Minute, time.Minute, 3
// Do not allow the pings to dial new connections. Adding new peers is a separate
// process and should not be done during the ping process.
ctx = network.WithNoDial(ctx, "monitor peers")
pingOnce := func(ctx context.Context, logger *zap.Logger, id peer.ID) bool {
pingCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
select {
case <-ctx.Done():
case <-pingCtx.Done():
logger.Debug("ping timeout")
return false
case res := <-ping.Ping(pingCtx, h, id):
if res.Error != nil {
logger.Debug("ping error", zap.Error(res.Error))
return false
}
logger.Debug("ping success", zap.Duration("rtt", res.RTT))
}
return true
}
ping := func(ctx context.Context, logger *zap.Logger, wg *sync.WaitGroup, id peer.ID) {
defer wg.Done()
var conns []network.Conn
for i := 0; i < attempts; i++ {
// There are no fine grained semantics in libp2p that would allow us to 'ping via
// a specific connection'. We can only ping a peer, which will attempt to open a stream via a connection.
// As such, we save a snapshot of the connections that were potentially in use before
// the ping, and close them if the ping fails. If new connections occur between the snapshot
// and the ping, they will not be closed, and will be pinged in the next iteration.
conns = h.Network().ConnsToPeer(id)
if pingOnce(ctx, logger, id) {
return
}
}
for _, conn := range conns {
_ = conn.Close()
}
}
for {
select {
case <-ctx.Done():
return
case <-time.After(period):
// This is once again a snapshot of the peers at the time of the ping. If new peers
// are added between the snapshot and the ping, they will be pinged in the next iteration.
peers := h.Network().Peers()
connected := make([]peer.ID, 0, len(peers))
for _, p := range peers {
// The connection status may change both before and after the check. Still, it is better
// to focus on pinging only connections which are potentially connected at the moment of the check.
switch h.Network().Connectedness(p) {
case network.Connected, network.Limited:
connected = append(connected, p)
}
}
logger.Debug("pinging connected peers", zap.Int("peer_count", len(connected)))
wg := &sync.WaitGroup{}
for _, id := range connected {
logger := logger.With(zap.String("peer_id", id.String()))
wg.Add(1)
go ping(ctx, logger, wg, id)
}
wg.Wait()
logger.Debug("pinged connected peers")
}
}
}
func initDHT(
ctx context.Context,
p2pConfig *config.P2PConfig,
@ -489,19 +683,12 @@ func initDHT(
var kademliaDHT *dht.IpfsDHT
var err error
if isBootstrapPeer {
if p2pConfig.Network == 0 {
panic(
"this release is for normal peers only, if you are running a " +
"bootstrap node, please use v2.0-bootstrap",
)
} else {
kademliaDHT, err = dht.New(
ctx,
h,
dht.Mode(dht.ModeServer),
dht.BootstrapPeers(bootstrappers...),
)
}
kademliaDHT, err = dht.New(
ctx,
h,
dht.Mode(dht.ModeServer),
dht.BootstrapPeers(bootstrappers...),
)
} else {
kademliaDHT, err = dht.New(
ctx,
@ -518,9 +705,13 @@ func initDHT(
}
reconnect := func() {
wg := &sync.WaitGroup{}
defer wg.Wait()
for _, peerinfo := range bootstrappers {
peerinfo := peerinfo
wg.Add(1)
go func() {
defer wg.Done()
if peerinfo.ID == h.ID() ||
h.Network().Connectedness(peerinfo.ID) == network.Connected ||
h.Network().Connectedness(peerinfo.ID) == network.Limited {
@ -542,10 +733,21 @@ func initDHT(
reconnect()
bootstrapPeerIDs := make(map[peer.ID]struct{}, len(bootstrappers))
for _, peerinfo := range bootstrappers {
bootstrapPeerIDs[peerinfo.ID] = struct{}{}
}
go func() {
for {
time.Sleep(30 * time.Second)
if len(h.Network().Peers()) == 0 {
found := false
for _, p := range h.Network().Peers() {
if _, ok := bootstrapPeerIDs[p]; ok {
found = true
break
}
}
if !found {
reconnect()
}
}
@ -554,6 +756,19 @@ func initDHT(
return kademliaDHT
}
func (b *BlossomSub) Reconnect(peerId []byte) error {
peer := peer.ID(peerId)
info := b.h.Peerstore().PeerInfo(peer)
b.h.ConnManager().Unprotect(info.ID, "bootstrap")
time.Sleep(10 * time.Second)
if err := b.h.Connect(b.ctx, info); err != nil {
return errors.Wrap(err, "reconnect")
}
b.h.ConnManager().Protect(info.ID, "bootstrap")
return nil
}
func (b *BlossomSub) GetPeerScore(peerId []byte) int64 {
b.peerScoreMx.Lock()
score := b.peerScore[string(peerId)]
@ -567,6 +782,16 @@ func (b *BlossomSub) SetPeerScore(peerId []byte, score int64) {
b.peerScoreMx.Unlock()
}
func (b *BlossomSub) AddPeerScore(peerId []byte, scoreDelta int64) {
b.peerScoreMx.Lock()
if _, ok := b.peerScore[string(peerId)]; !ok {
b.peerScore[string(peerId)] = scoreDelta
} else {
b.peerScore[string(peerId)] = b.peerScore[string(peerId)] + scoreDelta
}
b.peerScoreMx.Unlock()
}
func (b *BlossomSub) GetBitmaskPeers() map[string][]string {
peers := map[string][]string{}
@ -803,10 +1028,12 @@ func discoverPeers(
logger *zap.Logger,
h host.Host,
routingDiscovery *routing.RoutingDiscovery,
init bool,
) {
logger.Info("initiating peer discovery")
discover := func() {
logger.Info("initiating peer discovery")
defer logger.Info("completed peer discovery")
peerChan, err := routingDiscovery.FindPeers(
ctx,
getNetworkNamespace(p2pConfig.Network),
@ -816,47 +1043,46 @@ func discoverPeers(
return
}
wg := &sync.WaitGroup{}
defer wg.Wait()
for peer := range peerChan {
if len(h.Network().Peers()) >= 6 {
break
}
peer := peer
if peer.ID == h.ID() ||
h.Network().Connectedness(peer.ID) == network.Connected ||
h.Network().Connectedness(peer.ID) == network.Limited {
continue
}
wg.Add(1)
go func() {
defer wg.Done()
if peer.ID == h.ID() ||
h.Network().Connectedness(peer.ID) == network.Connected ||
h.Network().Connectedness(peer.ID) == network.Limited {
return
}
logger.Debug("found peer", zap.String("peer_id", peer.ID.String()))
err := h.Connect(ctx, peer)
if err != nil {
logger.Debug(
"error while connecting to blossomsub peer",
zap.String("peer_id", peer.ID.String()),
zap.Error(err),
)
} else {
logger.Debug(
"connected to peer",
zap.String("peer_id", peer.ID.String()),
)
}
logger.Debug("found peer", zap.String("peer_id", peer.ID.String()))
err := h.Connect(ctx, peer)
if err != nil {
logger.Debug(
"error while connecting to blossomsub peer",
zap.String("peer_id", peer.ID.String()),
zap.Error(err),
)
} else {
logger.Debug(
"connected to peer",
zap.String("peer_id", peer.ID.String()),
)
}
}()
}
}
discover()
go func() {
for {
time.Sleep(5 * time.Second)
if len(h.Network().Peers()) < 6 {
discover()
}
}
}()
logger.Info("completed initial peer discovery")
if init {
go discover()
} else {
discover()
}
}
func mergeDefaults(p2pConfig *config.P2PConfig) blossomsub.BlossomSubParams {

View File

@ -35,4 +35,6 @@ type PubSub interface {
GetPublicKey() []byte
GetPeerScore(peerId []byte) int64
SetPeerScore(peerId []byte, score int64)
AddPeerScore(peerId []byte, scoreDelta int64)
Reconnect(peerId []byte) error
}

View File

@ -876,6 +876,77 @@ func (x *PreMidnightMintStatusRequest) GetOwner() []byte {
return nil
}
type FrameRebroadcast struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
From uint64 `protobuf:"varint,1,opt,name=from,proto3" json:"from,omitempty"`
To uint64 `protobuf:"varint,2,opt,name=to,proto3" json:"to,omitempty"`
ClockFrames []*ClockFrame `protobuf:"bytes,3,rep,name=clock_frames,json=clockFrames,proto3" json:"clock_frames,omitempty"`
Random []byte `protobuf:"bytes,4,opt,name=random,proto3" json:"random,omitempty"`
}
func (x *FrameRebroadcast) Reset() {
*x = FrameRebroadcast{}
if protoimpl.UnsafeEnabled {
mi := &file_data_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *FrameRebroadcast) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FrameRebroadcast) ProtoMessage() {}
func (x *FrameRebroadcast) ProtoReflect() protoreflect.Message {
mi := &file_data_proto_msgTypes[13]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FrameRebroadcast.ProtoReflect.Descriptor instead.
func (*FrameRebroadcast) Descriptor() ([]byte, []int) {
return file_data_proto_rawDescGZIP(), []int{13}
}
func (x *FrameRebroadcast) GetFrom() uint64 {
if x != nil {
return x.From
}
return 0
}
func (x *FrameRebroadcast) GetTo() uint64 {
if x != nil {
return x.To
}
return 0
}
func (x *FrameRebroadcast) GetClockFrames() []*ClockFrame {
if x != nil {
return x.ClockFrames
}
return nil
}
func (x *FrameRebroadcast) GetRandom() []byte {
if x != nil {
return x.Random
}
return nil
}
type ChallengeProofRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@ -888,7 +959,7 @@ type ChallengeProofRequest struct {
func (x *ChallengeProofRequest) Reset() {
*x = ChallengeProofRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_data_proto_msgTypes[13]
mi := &file_data_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -901,7 +972,7 @@ func (x *ChallengeProofRequest) String() string {
func (*ChallengeProofRequest) ProtoMessage() {}
func (x *ChallengeProofRequest) ProtoReflect() protoreflect.Message {
mi := &file_data_proto_msgTypes[13]
mi := &file_data_proto_msgTypes[14]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -914,7 +985,7 @@ func (x *ChallengeProofRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ChallengeProofRequest.ProtoReflect.Descriptor instead.
func (*ChallengeProofRequest) Descriptor() ([]byte, []int) {
return file_data_proto_rawDescGZIP(), []int{13}
return file_data_proto_rawDescGZIP(), []int{14}
}
func (x *ChallengeProofRequest) GetPeerId() []byte {
@ -942,7 +1013,7 @@ type ChallengeProofResponse struct {
func (x *ChallengeProofResponse) Reset() {
*x = ChallengeProofResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_data_proto_msgTypes[14]
mi := &file_data_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -955,7 +1026,7 @@ func (x *ChallengeProofResponse) String() string {
func (*ChallengeProofResponse) ProtoMessage() {}
func (x *ChallengeProofResponse) ProtoReflect() protoreflect.Message {
mi := &file_data_proto_msgTypes[14]
mi := &file_data_proto_msgTypes[15]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -968,7 +1039,7 @@ func (x *ChallengeProofResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ChallengeProofResponse.ProtoReflect.Descriptor instead.
func (*ChallengeProofResponse) Descriptor() ([]byte, []int) {
return file_data_proto_rawDescGZIP(), []int{14}
return file_data_proto_rawDescGZIP(), []int{15}
}
func (x *ChallengeProofResponse) GetOutput() []byte {
@ -1112,79 +1183,88 @@ var file_data_proto_rawDesc = []byte{
0x1c, 0x50, 0x72, 0x65, 0x4d, 0x69, 0x64, 0x6e, 0x69, 0x67, 0x68, 0x74, 0x4d, 0x69, 0x6e, 0x74,
0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a,
0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x6f, 0x77,
0x6e, 0x65, 0x72, 0x22, 0x77, 0x0a, 0x15, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65,
0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07,
0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70,
0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x0b, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x66,
0x72, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x71, 0x75, 0x69,
0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
0x63, 0x6b, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65,
0x52, 0x0a, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x22, 0x30, 0x0a, 0x16,
0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x32, 0xff,
0x05, 0x0a, 0x0b, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x76,
0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53,
0x79, 0x6e, 0x63, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c,
0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x63,
0x6b, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62,
0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70,
0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64,
0x53, 0x79, 0x6e, 0x63, 0x30, 0x01, 0x12, 0x9a, 0x01, 0x0a, 0x1d, 0x4e, 0x65, 0x67, 0x6f, 0x74,
0x69, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x79,
0x6e, 0x63, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x39, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e,
0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65,
0x64, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73,
0x61, 0x67, 0x65, 0x1a, 0x3a, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d,
0x6e, 0x65, 0x72, 0x22, 0x97, 0x01, 0x0a, 0x10, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x62,
0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x6f, 0x6d,
0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x12, 0x0e, 0x0a, 0x02,
0x74, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x6f, 0x12, 0x47, 0x0a, 0x0c,
0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x24, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e,
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6c,
0x6f, 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x0b, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x46,
0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x18,
0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x22, 0x77, 0x0a,
0x15, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69,
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12,
0x45, 0x0a, 0x0b, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x18, 0x03,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75,
0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x70, 0x62, 0x2e,
0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x0a, 0x63, 0x6c, 0x6f, 0x63,
0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x22, 0x30, 0x0a, 0x16, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65,
0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x32, 0xff, 0x05, 0x0a, 0x0b, 0x44, 0x61, 0x74,
0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x76, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43,
0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x46, 0x72, 0x61,
0x6d, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d,
0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x70, 0x62, 0x2e, 0x43,
0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e,
0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61,
0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x30, 0x01,
0x12, 0x9a, 0x01, 0x0a, 0x1d, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x43, 0x6f,
0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x46, 0x72, 0x61, 0x6d,
0x65, 0x73, 0x12, 0x39, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e,
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74,
0x61, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x3a, 0x2e,
0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e,
0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70,
0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x28, 0x01, 0x30, 0x01, 0x12, 0x76, 0x0a,
0x10, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
0x6c, 0x12, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e,
0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50,
0x32, 0x50, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70,
0x65, 0x1a, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e,
0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50,
0x32, 0x50, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70,
0x65, 0x28, 0x01, 0x30, 0x01, 0x12, 0x68, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61,
0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69,
0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e,
0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d,
0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x61,
0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x28,
0x01, 0x30, 0x01, 0x12, 0x76, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63,
0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62,
0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65,
0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x32, 0x50, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x45,
0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x1a, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62,
0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65,
0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x32, 0x50, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x45,
0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x28, 0x01, 0x30, 0x01, 0x12, 0x68, 0x0a, 0x0c, 0x47,
0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x2e, 0x71, 0x75,
0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61,
0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x61,
0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x71, 0x75, 0x69, 0x6c,
0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61,
0x2e, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x73, 0x0a, 0x15, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x50,
0x72, 0x65, 0x4d, 0x69, 0x64, 0x6e, 0x69, 0x67, 0x68, 0x74, 0x4d, 0x69, 0x6e, 0x74, 0x12, 0x28,
0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65,
0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x69,
0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
0x74, 0x61, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
0x73, 0x0a, 0x15, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x4d, 0x69, 0x64, 0x6e,
0x69, 0x67, 0x68, 0x74, 0x4d, 0x69, 0x6e, 0x74, 0x12, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e,
0x70, 0x62, 0x2e, 0x4d, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e,
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x65,
0x4d, 0x69, 0x64, 0x6e, 0x69, 0x67, 0x68, 0x74, 0x4d, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x83, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x50, 0x72, 0x65, 0x4d,
0x69, 0x64, 0x6e, 0x69, 0x67, 0x68, 0x74, 0x4d, 0x69, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75,
0x73, 0x12, 0x35, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e,
0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x65, 0x4d,
0x69, 0x64, 0x6e, 0x69, 0x67, 0x68, 0x74, 0x4d, 0x69, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75,
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e,
0x70, 0x62, 0x2e, 0x50, 0x72, 0x65, 0x4d, 0x69, 0x64, 0x6e, 0x69, 0x67, 0x68, 0x74, 0x4d, 0x69,
0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x83, 0x01, 0x0a, 0x18, 0x47,
0x65, 0x74, 0x50, 0x72, 0x65, 0x4d, 0x69, 0x64, 0x6e, 0x69, 0x67, 0x68, 0x74, 0x4d, 0x69, 0x6e,
0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x35, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62,
0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70,
0x62, 0x2e, 0x50, 0x72, 0x65, 0x4d, 0x69, 0x64, 0x6e, 0x69, 0x67, 0x68, 0x74, 0x4d, 0x69, 0x6e,
0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30,
0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65,
0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x65, 0x4d, 0x69, 0x64, 0x6e,
0x69, 0x67, 0x68, 0x74, 0x4d, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x32, 0x8c, 0x01, 0x0a, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x49, 0x50, 0x43, 0x53, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x12, 0x7a, 0x0a, 0x17, 0x43, 0x61, 0x6c, 0x63, 0x75, 0x6c, 0x61, 0x74, 0x65,
0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x2e,
0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65,
0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e,
0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f,
0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65,
0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e,
0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42,
0x3a, 0x5a, 0x38, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62,
0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72,
0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f, 0x6e, 0x6f, 0x72, 0x65, 0x70, 0x6f, 0x2f, 0x6e, 0x6f, 0x64,
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x8c, 0x01, 0x0a, 0x0e, 0x44,
0x61, 0x74, 0x61, 0x49, 0x50, 0x43, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7a, 0x0a,
0x17, 0x43, 0x61, 0x6c, 0x63, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65,
0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e,
0x70, 0x62, 0x2e, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f,
0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e,
0x70, 0x62, 0x2e, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f,
0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3a, 0x5a, 0x38, 0x73, 0x6f, 0x75,
0x72, 0x63, 0x65, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f,
0x6e, 0x6f, 0x72, 0x65, 0x70, 0x6f, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@ -1199,7 +1279,7 @@ func file_data_proto_rawDescGZIP() []byte {
return file_data_proto_rawDescData
}
var file_data_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
var file_data_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
var file_data_proto_goTypes = []interface{}{
(*DataPeerListAnnounce)(nil), // 0: quilibrium.node.data.pb.DataPeerListAnnounce
(*DataPeer)(nil), // 1: quilibrium.node.data.pb.DataPeer
@ -1214,48 +1294,50 @@ var file_data_proto_goTypes = []interface{}{
(*DataFrameResponse)(nil), // 10: quilibrium.node.data.pb.DataFrameResponse
(*PreMidnightMintResponse)(nil), // 11: quilibrium.node.data.pb.PreMidnightMintResponse
(*PreMidnightMintStatusRequest)(nil), // 12: quilibrium.node.data.pb.PreMidnightMintStatusRequest
(*ChallengeProofRequest)(nil), // 13: quilibrium.node.data.pb.ChallengeProofRequest
(*ChallengeProofResponse)(nil), // 14: quilibrium.node.data.pb.ChallengeProofResponse
(*ClockFrame)(nil), // 15: quilibrium.node.clock.pb.ClockFrame
(*Ed448Signature)(nil), // 16: quilibrium.node.keys.pb.Ed448Signature
(*ClockFramesPreflight)(nil), // 17: quilibrium.node.clock.pb.ClockFramesPreflight
(*ClockFramesRequest)(nil), // 18: quilibrium.node.clock.pb.ClockFramesRequest
(*P2PChannelEnvelope)(nil), // 19: quilibrium.node.channel.pb.P2PChannelEnvelope
(*MintCoinRequest)(nil), // 20: quilibrium.node.node.pb.MintCoinRequest
(*FrameRebroadcast)(nil), // 13: quilibrium.node.data.pb.FrameRebroadcast
(*ChallengeProofRequest)(nil), // 14: quilibrium.node.data.pb.ChallengeProofRequest
(*ChallengeProofResponse)(nil), // 15: quilibrium.node.data.pb.ChallengeProofResponse
(*ClockFrame)(nil), // 16: quilibrium.node.clock.pb.ClockFrame
(*Ed448Signature)(nil), // 17: quilibrium.node.keys.pb.Ed448Signature
(*ClockFramesPreflight)(nil), // 18: quilibrium.node.clock.pb.ClockFramesPreflight
(*ClockFramesRequest)(nil), // 19: quilibrium.node.clock.pb.ClockFramesRequest
(*P2PChannelEnvelope)(nil), // 20: quilibrium.node.channel.pb.P2PChannelEnvelope
(*MintCoinRequest)(nil), // 21: quilibrium.node.node.pb.MintCoinRequest
}
var file_data_proto_depIdxs = []int32{
1, // 0: quilibrium.node.data.pb.DataPeerListAnnounce.peer_list:type_name -> quilibrium.node.data.pb.DataPeer
15, // 1: quilibrium.node.data.pb.DataCompressedSync.truncated_clock_frames:type_name -> quilibrium.node.clock.pb.ClockFrame
16, // 1: quilibrium.node.data.pb.DataCompressedSync.truncated_clock_frames:type_name -> quilibrium.node.clock.pb.ClockFrame
6, // 2: quilibrium.node.data.pb.DataCompressedSync.proofs:type_name -> quilibrium.node.data.pb.InclusionProofsMap
7, // 3: quilibrium.node.data.pb.DataCompressedSync.segments:type_name -> quilibrium.node.data.pb.InclusionSegmentsMap
16, // 4: quilibrium.node.data.pb.SyncRequestAuthentication.response:type_name -> quilibrium.node.keys.pb.Ed448Signature
17, // 5: quilibrium.node.data.pb.DataCompressedSyncRequestMessage.preflight:type_name -> quilibrium.node.clock.pb.ClockFramesPreflight
18, // 6: quilibrium.node.data.pb.DataCompressedSyncRequestMessage.request:type_name -> quilibrium.node.clock.pb.ClockFramesRequest
17, // 4: quilibrium.node.data.pb.SyncRequestAuthentication.response:type_name -> quilibrium.node.keys.pb.Ed448Signature
18, // 5: quilibrium.node.data.pb.DataCompressedSyncRequestMessage.preflight:type_name -> quilibrium.node.clock.pb.ClockFramesPreflight
19, // 6: quilibrium.node.data.pb.DataCompressedSyncRequestMessage.request:type_name -> quilibrium.node.clock.pb.ClockFramesRequest
3, // 7: quilibrium.node.data.pb.DataCompressedSyncRequestMessage.authentication:type_name -> quilibrium.node.data.pb.SyncRequestAuthentication
17, // 8: quilibrium.node.data.pb.DataCompressedSyncResponseMessage.preflight:type_name -> quilibrium.node.clock.pb.ClockFramesPreflight
18, // 8: quilibrium.node.data.pb.DataCompressedSyncResponseMessage.preflight:type_name -> quilibrium.node.clock.pb.ClockFramesPreflight
2, // 9: quilibrium.node.data.pb.DataCompressedSyncResponseMessage.response:type_name -> quilibrium.node.data.pb.DataCompressedSync
8, // 10: quilibrium.node.data.pb.InclusionProofsMap.commitments:type_name -> quilibrium.node.data.pb.InclusionCommitmentsMap
15, // 11: quilibrium.node.data.pb.DataFrameResponse.clock_frame:type_name -> quilibrium.node.clock.pb.ClockFrame
15, // 12: quilibrium.node.data.pb.ChallengeProofRequest.clock_frame:type_name -> quilibrium.node.clock.pb.ClockFrame
18, // 13: quilibrium.node.data.pb.DataService.GetCompressedSyncFrames:input_type -> quilibrium.node.clock.pb.ClockFramesRequest
4, // 14: quilibrium.node.data.pb.DataService.NegotiateCompressedSyncFrames:input_type -> quilibrium.node.data.pb.DataCompressedSyncRequestMessage
19, // 15: quilibrium.node.data.pb.DataService.GetPublicChannel:input_type -> quilibrium.node.channel.pb.P2PChannelEnvelope
9, // 16: quilibrium.node.data.pb.DataService.GetDataFrame:input_type -> quilibrium.node.data.pb.GetDataFrameRequest
20, // 17: quilibrium.node.data.pb.DataService.HandlePreMidnightMint:input_type -> quilibrium.node.node.pb.MintCoinRequest
12, // 18: quilibrium.node.data.pb.DataService.GetPreMidnightMintStatus:input_type -> quilibrium.node.data.pb.PreMidnightMintStatusRequest
13, // 19: quilibrium.node.data.pb.DataIPCService.CalculateChallengeProof:input_type -> quilibrium.node.data.pb.ChallengeProofRequest
2, // 20: quilibrium.node.data.pb.DataService.GetCompressedSyncFrames:output_type -> quilibrium.node.data.pb.DataCompressedSync
5, // 21: quilibrium.node.data.pb.DataService.NegotiateCompressedSyncFrames:output_type -> quilibrium.node.data.pb.DataCompressedSyncResponseMessage
19, // 22: quilibrium.node.data.pb.DataService.GetPublicChannel:output_type -> quilibrium.node.channel.pb.P2PChannelEnvelope
10, // 23: quilibrium.node.data.pb.DataService.GetDataFrame:output_type -> quilibrium.node.data.pb.DataFrameResponse
11, // 24: quilibrium.node.data.pb.DataService.HandlePreMidnightMint:output_type -> quilibrium.node.data.pb.PreMidnightMintResponse
11, // 25: quilibrium.node.data.pb.DataService.GetPreMidnightMintStatus:output_type -> quilibrium.node.data.pb.PreMidnightMintResponse
14, // 26: quilibrium.node.data.pb.DataIPCService.CalculateChallengeProof:output_type -> quilibrium.node.data.pb.ChallengeProofResponse
20, // [20:27] is the sub-list for method output_type
13, // [13:20] is the sub-list for method input_type
13, // [13:13] is the sub-list for extension type_name
13, // [13:13] is the sub-list for extension extendee
0, // [0:13] is the sub-list for field type_name
16, // 11: quilibrium.node.data.pb.DataFrameResponse.clock_frame:type_name -> quilibrium.node.clock.pb.ClockFrame
16, // 12: quilibrium.node.data.pb.FrameRebroadcast.clock_frames:type_name -> quilibrium.node.clock.pb.ClockFrame
16, // 13: quilibrium.node.data.pb.ChallengeProofRequest.clock_frame:type_name -> quilibrium.node.clock.pb.ClockFrame
19, // 14: quilibrium.node.data.pb.DataService.GetCompressedSyncFrames:input_type -> quilibrium.node.clock.pb.ClockFramesRequest
4, // 15: quilibrium.node.data.pb.DataService.NegotiateCompressedSyncFrames:input_type -> quilibrium.node.data.pb.DataCompressedSyncRequestMessage
20, // 16: quilibrium.node.data.pb.DataService.GetPublicChannel:input_type -> quilibrium.node.channel.pb.P2PChannelEnvelope
9, // 17: quilibrium.node.data.pb.DataService.GetDataFrame:input_type -> quilibrium.node.data.pb.GetDataFrameRequest
21, // 18: quilibrium.node.data.pb.DataService.HandlePreMidnightMint:input_type -> quilibrium.node.node.pb.MintCoinRequest
12, // 19: quilibrium.node.data.pb.DataService.GetPreMidnightMintStatus:input_type -> quilibrium.node.data.pb.PreMidnightMintStatusRequest
14, // 20: quilibrium.node.data.pb.DataIPCService.CalculateChallengeProof:input_type -> quilibrium.node.data.pb.ChallengeProofRequest
2, // 21: quilibrium.node.data.pb.DataService.GetCompressedSyncFrames:output_type -> quilibrium.node.data.pb.DataCompressedSync
5, // 22: quilibrium.node.data.pb.DataService.NegotiateCompressedSyncFrames:output_type -> quilibrium.node.data.pb.DataCompressedSyncResponseMessage
20, // 23: quilibrium.node.data.pb.DataService.GetPublicChannel:output_type -> quilibrium.node.channel.pb.P2PChannelEnvelope
10, // 24: quilibrium.node.data.pb.DataService.GetDataFrame:output_type -> quilibrium.node.data.pb.DataFrameResponse
11, // 25: quilibrium.node.data.pb.DataService.HandlePreMidnightMint:output_type -> quilibrium.node.data.pb.PreMidnightMintResponse
11, // 26: quilibrium.node.data.pb.DataService.GetPreMidnightMintStatus:output_type -> quilibrium.node.data.pb.PreMidnightMintResponse
15, // 27: quilibrium.node.data.pb.DataIPCService.CalculateChallengeProof:output_type -> quilibrium.node.data.pb.ChallengeProofResponse
21, // [21:28] is the sub-list for method output_type
14, // [14:21] is the sub-list for method input_type
14, // [14:14] is the sub-list for extension type_name
14, // [14:14] is the sub-list for extension extendee
0, // [0:14] is the sub-list for field type_name
}
func init() { file_data_proto_init() }
@ -1425,7 +1507,7 @@ func file_data_proto_init() {
}
}
file_data_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ChallengeProofRequest); i {
switch v := v.(*FrameRebroadcast); i {
case 0:
return &v.state
case 1:
@ -1437,6 +1519,18 @@ func file_data_proto_init() {
}
}
file_data_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ChallengeProofRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_data_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ChallengeProofResponse); i {
case 0:
return &v.state
@ -1464,7 +1558,7 @@ func file_data_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_data_proto_rawDesc,
NumEnums: 0,
NumMessages: 15,
NumMessages: 16,
NumExtensions: 0,
NumServices: 2,
},

View File

@ -88,6 +88,13 @@ message PreMidnightMintStatusRequest {
bytes owner = 1;
}
message FrameRebroadcast {
uint64 from = 1;
uint64 to = 2;
repeated quilibrium.node.clock.pb.ClockFrame clock_frames = 3;
bytes random = 4;
}
service DataService {
rpc GetCompressedSyncFrames (quilibrium.node.clock.pb.ClockFramesRequest) returns (stream DataCompressedSync);
rpc NegotiateCompressedSyncFrames (stream DataCompressedSyncRequestMessage) returns (stream DataCompressedSyncResponseMessage);

View File

@ -22,6 +22,7 @@ const (
InclusionProofsMapType = DataPrefix + "InclusionProofsMap"
InclusionSegmentsMapType = DataPrefix + "InclusionSegmentsMap"
InclusionCommitmentsMapType = DataPrefix + "InclusionCommitmentsMap"
FrameRebroadcastType = DataPrefix + "FrameRebroadcast"
ApplicationType = AppPrefix + "Application"
ExecutionContextType = AppPrefix + "ExecutionContext"
MessageType = AppPrefix + "Message"

View File

@ -161,9 +161,14 @@ func (r *RPCServer) GetNodeInfo(
}
peerScore := r.pubSub.GetPeerScore(r.pubSub.GetPeerID())
head := r.executionEngines[0].GetFrame()
frame := uint64(0)
if head != nil {
frame = head.FrameNumber
}
return &protobufs.NodeInfoResponse{
PeerId: peerID.String(),
MaxFrame: r.masterClock.GetFrame().GetFrameNumber(),
MaxFrame: frame,
PeerScore: uint64(peerScore),
Version: append(
append([]byte{}, config.GetVersion()...), config.GetPatchNumber(),

View File

@ -63,6 +63,10 @@ type ClockStore interface {
parentSelector []byte,
truncate bool,
) (*protobufs.ClockFrame, error)
GetStagedDataClockFramesForFrameNumber(
filter []byte,
frameNumber uint64,
) ([]*protobufs.ClockFrame, error)
SetLatestDataClockFrameNumber(
filter []byte,
frameNumber uint64,
@ -816,6 +820,45 @@ func (p *PebbleClockStore) GetStagedDataClockFrame(
return parent, nil
}
func (p *PebbleClockStore) GetStagedDataClockFramesForFrameNumber(
filter []byte,
frameNumber uint64,
) ([]*protobufs.ClockFrame, error) {
iter, err := p.db.NewIter(
clockDataParentIndexKey(filter, frameNumber, bytes.Repeat([]byte{0x00}, 32)),
clockDataParentIndexKey(filter, frameNumber, bytes.Repeat([]byte{0xff}, 32)),
)
if err != nil {
if errors.Is(err, pebble.ErrNotFound) {
return nil, errors.Wrap(ErrNotFound, "get staged data clock frames")
}
return nil, errors.Wrap(err, "get staged data clock frames")
}
frames := []*protobufs.ClockFrame{}
for iter.First(); iter.Valid(); iter.Next() {
data := iter.Value()
frame := &protobufs.ClockFrame{}
if err := proto.Unmarshal(data, frame); err != nil {
return nil, errors.Wrap(err, "get staged data clock frames")
}
if err := p.fillAggregateProofs(frame, false); err != nil {
return nil, errors.Wrap(
errors.Wrap(err, ErrInvalidData.Error()),
"get staged data clock frames",
)
}
frames = append(frames, frame)
}
iter.Close()
return frames, nil
}
// StageDataClockFrame implements ClockStore.
func (p *PebbleClockStore) StageDataClockFrame(
selector []byte,