Merge branch 'develop' into v2.0.4

This commit is contained in:
Cassandra Heart 2024-11-19 00:06:01 -06:00
commit 0b54808fe8
No known key found for this signature in database
GPG Key ID: 6352152859385958
55 changed files with 2978 additions and 1858 deletions

View File

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"math/big"
"time"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/spf13/cobra"
@ -13,7 +14,20 @@ import (
var coinsCmd = &cobra.Command{
Use: "coins",
Short: "Lists all coins under control of the managing account",
Long: `Lists all coins under control of the managing account.
coins [metadata]
When "metadata" is added to the command, includes timestamps and frame numbers.
`,
Run: func(cmd *cobra.Command, args []string) {
includeMetadata := false
for _, arg := range args {
if arg == "metadata" {
includeMetadata = true
}
}
conn, err := GetGRPCClient()
if err != nil {
panic(err)
@ -41,7 +55,8 @@ var coinsCmd = &cobra.Command{
resp, err := client.GetTokensByAccount(
context.Background(),
&protobufs.GetTokensByAccountRequest{
Address: addrBytes,
Address: addrBytes,
IncludeMetadata: includeMetadata,
},
)
if err != nil {
@ -61,7 +76,8 @@ var coinsCmd = &cobra.Command{
resp2, err := client.GetTokensByAccount(
context.Background(),
&protobufs.GetTokensByAccountRequest{
Address: altAddrBytes,
Address: altAddrBytes,
IncludeMetadata: includeMetadata,
},
)
if err != nil {
@ -76,19 +92,45 @@ var coinsCmd = &cobra.Command{
amount := new(big.Int).SetBytes(coin.Amount)
conversionFactor, _ := new(big.Int).SetString("1DCD65000", 16)
r := new(big.Rat).SetFrac(amount, conversionFactor)
fmt.Println(
r.FloatString(12),
fmt.Sprintf("QUIL (Coin 0x%x)", resp.Addresses[i]),
)
if !includeMetadata || len(resp.Timestamps) == 0 {
fmt.Println(
r.FloatString(12),
fmt.Sprintf("QUIL (Coin 0x%x)", resp.Addresses[i]),
)
} else {
frame := resp.FrameNumbers[i]
timestamp := resp.Timestamps[i]
t := time.UnixMilli(timestamp)
fmt.Println(
r.FloatString(12),
fmt.Sprintf("QUIL (Coin 0x%x)", resp.Addresses[i]),
fmt.Sprintf("Frame %d, Timestamp %s", frame, t.Format(time.RFC3339)),
)
}
}
for i, coin := range resp2.Coins {
amount := new(big.Int).SetBytes(coin.Amount)
conversionFactor, _ := new(big.Int).SetString("1DCD65000", 16)
r := new(big.Rat).SetFrac(amount, conversionFactor)
fmt.Println(
r.FloatString(12),
fmt.Sprintf("QUIL (Coin 0x%x)", resp.Addresses[i]),
)
if !includeMetadata || len(resp.Timestamps) == 0 {
fmt.Println(
r.FloatString(12),
fmt.Sprintf("QUIL (Coin 0x%x)", resp.Addresses[i]),
)
} else {
frame := resp.FrameNumbers[i]
timestamp := resp.Timestamps[i]
t := time.UnixMilli(timestamp)
fmt.Println(
r.FloatString(12),
fmt.Sprintf("QUIL (Coin 0x%x)", resp.Addresses[i]),
fmt.Sprintf("Frame %d, Timestamp %s", frame, t.Format(time.RFC3339)),
)
}
}
},
}

View File

@ -2,7 +2,9 @@ package cmd
import (
"bytes"
"fmt"
"strings"
"time"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/libp2p/go-libp2p/core/crypto"
@ -29,7 +31,7 @@ var proverPauseCmd = &cobra.Command{
pubsub := p2p.NewBlossomSub(NodeConfig.P2P, logger)
intrinsicFilter := p2p.GetBloomFilter(application.TOKEN_ADDRESS, 256, 3)
pubsub.Subscribe(
intrinsicFilter,
append([]byte{0x00}, intrinsicFilter...),
func(message *pb.Message) error { return nil },
)
key, err := GetPrivKeyFromConfig(NodeConfig)
@ -52,10 +54,28 @@ var proverPauseCmd = &cobra.Command{
panic(err)
}
loop:
for {
peers := pubsub.GetBitmaskPeers()
if len(peers) == 0 {
fmt.Println("Waiting for peer list to form before broadcasting pause...")
time.Sleep(5 * time.Second)
continue loop
}
for _, set := range peers {
if len(set) < 3 {
fmt.Println("Waiting for more peers before broadcasting pause...")
time.Sleep(5 * time.Second)
continue loop
}
break loop
}
}
err = publishMessage(
key,
pubsub,
intrinsicFilter,
append([]byte{0x00}, intrinsicFilter...),
&protobufs.AnnounceProverPause{
Filter: filter,
PublicKeySignatureEd448: &protobufs.Ed448Signature{

View File

@ -26,6 +26,7 @@ var NodeConfig *config.Config
var simulateFail bool
var LightNode bool = false
var DryRun bool = false
var publicRPC bool = false
var rootCmd = &cobra.Command{
Use: "qclient",
@ -109,7 +110,7 @@ var rootCmd = &cobra.Command{
os.Exit(1)
}
if NodeConfig.ListenGRPCMultiaddr == "" {
if publicRPC {
fmt.Println("gRPC not enabled, using light node")
LightNode = true
}
@ -184,4 +185,10 @@ func init() {
signatureCheckDefault(),
"bypass signature check (not recommended for binaries) (default true or value of QUILIBRIUM_SIGNATURE_CHECK env var)",
)
rootCmd.PersistentFlags().BoolVar(
&publicRPC,
"public-rpc",
false,
"uses the public RPC",
)
}

View File

@ -29,7 +29,7 @@ require (
github.com/multiformats/go-multiaddr v0.12.4
github.com/stretchr/testify v1.9.0
golang.org/x/crypto v0.24.0
google.golang.org/grpc v1.58.2
google.golang.org/grpc v1.63.2
source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub v0.0.0-00010101000000-000000000000
source.quilibrium.com/quilibrium/monorepo/node v0.0.0-00010101000000-000000000000
)
@ -62,12 +62,13 @@ require (
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
github.com/google/uuid v1.4.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 // indirect
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
@ -167,8 +168,8 @@ require (
golang.org/x/text v0.16.0 // indirect
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
gonum.org/v1/gonum v0.13.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.2.1 // indirect

View File

@ -128,8 +128,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -146,9 +146,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -159,7 +158,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@ -174,8 +172,8 @@ github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0Z
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@ -184,6 +182,10 @@ github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORR
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA=
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1/go.mod h1:lXGCsh6c22WGtjr+qGHj1otzZpV/1kwTMAqkwZsnWRU=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0/go.mod h1:TzP6duP4Py2pHLVPPQp42aoYI92+PCrVotyR5e8Vqlk=
@ -734,12 +736,12 @@ google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g=
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0=
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be h1:LG9vZxsWGOmUKieR8wPAUR3u3MpnYFQZROPIMaXh7/A=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
@ -748,8 +750,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I=
google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -759,8 +761,6 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -30,8 +30,8 @@ const (
// Defines the default BlossomSub parameters.
var (
BlossomSubD = 6
BlossomSubDlo = 5
BlossomSubD = 8
BlossomSubDlo = 6
BlossomSubDhi = 12
BlossomSubDscore = 4
BlossomSubDout = 2
@ -41,7 +41,7 @@ var (
BlossomSubGossipRetransmission = 3
BlossomSubBitmaskWidth = 256
BlossomSubHeartbeatInitialDelay = 100 * time.Millisecond
BlossomSubHeartbeatInterval = 1 * time.Second
BlossomSubHeartbeatInterval = 700 * time.Millisecond
BlossomSubFanoutTTL = 60 * time.Second
BlossomSubPrunePeers = 16
BlossomSubPruneBackoff = time.Minute
@ -633,6 +633,19 @@ loop:
func (bs *BlossomSubRouter) RemovePeer(p peer.ID) {
log.Debugf("PEERDOWN: Remove disconnected peer %s", p)
masks := make([][]byte, 0)
bs.meshMx.Lock()
for bitmask, peers := range bs.mesh {
if _, ok := peers[p]; !ok {
continue
}
masks = append(masks, []byte(bitmask))
}
bs.meshMx.Unlock()
for _, bitmask := range masks {
log.Debugf("PEERDOWN: Pruning peer %s from bitmask %s", p, bitmask)
bs.tracer.Prune(p, bitmask)
}
bs.tracer.RemovePeer(p)
delete(bs.peers, p)
bs.meshMx.Lock()

View File

@ -11,9 +11,12 @@ import (
)
const (
defaultValidateQueueSize = 32
defaultValidateConcurrency = 1024
defaultValidateThrottle = 8192
// DefaultValidateQueueSize is the default size of the validation queue.
DefaultValidateQueueSize = 16384
// DefaultValidateConcurrency is the default number of concurrent instances of a validator per bitmask.
DefaultValidateConcurrency = 1024
// DefaultValidateThrottle is the default number of concurrent instances of all validators.
DefaultValidateThrottle = 8192
)
// ValidationError is an error that may be signalled from message publication when the message
@ -121,8 +124,8 @@ type rmValReq struct {
func newValidation() *validation {
return &validation{
bitmaskVals: make(map[string]*validatorImpl),
validateQ: make(chan *validateReq, defaultValidateQueueSize),
validateThrottle: make(chan struct{}, defaultValidateThrottle),
validateQ: make(chan *validateReq, DefaultValidateQueueSize),
validateThrottle: make(chan struct{}, DefaultValidateThrottle),
validateWorkers: runtime.NumCPU(),
}
}
@ -196,7 +199,7 @@ func (v *validation) makeValidator(req *addValReq) (*validatorImpl, error) {
bitmask: req.bitmask,
validate: validator,
validateTimeout: 0,
validateThrottle: make(chan struct{}, defaultValidateConcurrency),
validateThrottle: make(chan struct{}, DefaultValidateConcurrency),
validateInline: req.inline,
}

View File

@ -22,6 +22,7 @@ import (
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials/insecure"
"source.quilibrium.com/quilibrium/monorepo/node/config"
qgrpc "source.quilibrium.com/quilibrium/monorepo/node/internal/grpc"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
@ -624,7 +625,8 @@ func ConnectToNode(nodeConfig *config.Config) (*grpc.ClientConn, error) {
}
}
return grpc.Dial(
return qgrpc.DialContext(
context.Background(),
addr,
grpc.WithTransportCredentials(
insecure.NewCredentials(),

View File

@ -1,5 +1,7 @@
package config
import "time"
type EngineConfig struct {
ProvingKeyId string `yaml:"provingKeyId"`
Filter string `yaml:"filter"`
@ -24,6 +26,8 @@ type EngineConfig struct {
// Automatically merges coins after minting once a sufficient number has been
// accrued
AutoMergeCoins bool `yaml:"autoMergeCoins"`
// Maximum wait time for a frame to be downloaded from a peer.
SyncTimeout time.Duration `yaml:"syncTimeout"`
// Values used only for testing do not override these in production, your
// node will get kicked out

View File

@ -35,10 +35,18 @@ type P2PConfig struct {
ListenMultiaddr string `yaml:"listenMultiaddr"`
PeerPrivKey string `yaml:"peerPrivKey"`
TraceLogFile string `yaml:"traceLogFile"`
MinPeers int `yaml:"minPeers"`
Network uint8 `yaml:"network"`
LowWatermarkConnections uint `yaml:"lowWatermarkConnections"`
HighWatermarkConnections uint `yaml:"highWatermarkConnections"`
LowWatermarkConnections int `yaml:"lowWatermarkConnections"`
HighWatermarkConnections int `yaml:"highWatermarkConnections"`
DirectPeers []string `yaml:"directPeers"`
GrpcServerRateLimit int `yaml:"grpcServerRateLimit"`
MinBootstrapPeers int `yaml:"minBootstrapPeers"`
BootstrapParallelism int `yaml:"bootstrapParallelism"`
DiscoveryParallelism int `yaml:"discoveryParallelism"`
DiscoveryPeerLookupLimit int `yaml:"discoveryPeerLookupLimit"`
PingTimeout time.Duration `yaml:"pingTimeout"`
PingPeriod time.Duration `yaml:"pingPeriod"`
PingAttempts int `yaml:"pingAttempts"`
ValidateQueueSize int `yaml:"validateQueueSize"`
ValidateWorkers int `yaml:"validateWorkers"`
}

View File

@ -14,7 +14,7 @@ func GetMinimumVersion() []byte {
}
func GetVersion() []byte {
return []byte{0x02, 0x00, 0x03}
return []byte{0x02, 0x00, 0x04}
}
func GetVersionString() string {
@ -36,9 +36,9 @@ func FormatVersion(version []byte) string {
}
func GetPatchNumber() byte {
return 0x04
return 0x00
}
func GetRCNumber() byte {
return 0x0a
return 0x00
}

View File

@ -20,6 +20,8 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
const defaultSyncTimeout = 2 * time.Second
func (e *DataClockConsensusEngine) collect(
enqueuedFrame *protobufs.ClockFrame,
) (*protobufs.ClockFrame, error) {
@ -280,73 +282,70 @@ func (e *DataClockConsensusEngine) sync(
zap.Uint64("current_frame", latest.FrameNumber),
zap.Uint64("max_frame", maxFrame),
)
var cooperative bool = true
defer func() {
if cooperative {
return
}
e.peerMapMx.Lock()
defer e.peerMapMx.Unlock()
if _, ok := e.peerMap[string(peerId)]; ok {
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
e.uncooperativePeersMap[string(peerId)].timestamp = time.Now().UnixMilli()
delete(e.peerMap, string(peerId))
}
}()
cc, err := e.pubSub.GetDirectChannel(peerId, "sync")
if err != nil {
e.logger.Debug(
"could not establish direct channel",
zap.Error(err),
)
e.peerMapMx.Lock()
if _, ok := e.peerMap[string(peerId)]; ok {
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
e.uncooperativePeersMap[string(peerId)].timestamp = time.Now().UnixMilli()
delete(e.peerMap, string(peerId))
}
e.peerMapMx.Unlock()
cooperative = false
return latest, errors.Wrap(err, "sync")
}
defer func() {
if err := cc.Close(); err != nil {
e.logger.Error("error while closing connection", zap.Error(err))
}
}()
client := protobufs.NewDataServiceClient(cc)
syncTimeout := e.config.Engine.SyncTimeout
if syncTimeout == 0 {
syncTimeout = defaultSyncTimeout
}
for e.GetState() < consensus.EngineStateStopping {
ctx, cancel := context.WithTimeout(e.ctx, syncTimeout)
response, err := client.GetDataFrame(
context.TODO(),
ctx,
&protobufs.GetDataFrameRequest{
FrameNumber: latest.FrameNumber + 1,
},
grpc.MaxCallRecvMsgSize(600*1024*1024),
)
cancel()
if err != nil {
e.logger.Debug(
"could not get frame",
zap.Error(err),
)
e.peerMapMx.Lock()
if _, ok := e.peerMap[string(peerId)]; ok {
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
e.uncooperativePeersMap[string(peerId)].timestamp = time.Now().UnixMilli()
delete(e.peerMap, string(peerId))
}
e.peerMapMx.Unlock()
if err := cc.Close(); err != nil {
e.logger.Error("error while closing connection", zap.Error(err))
}
cooperative = false
return latest, errors.Wrap(err, "sync")
}
if response == nil {
e.logger.Debug("received no response from peer")
if err := cc.Close(); err != nil {
e.logger.Error("error while closing connection", zap.Error(err))
}
return latest, nil
}
if response.ClockFrame == nil ||
response.ClockFrame.FrameNumber != latest.FrameNumber+1 ||
response.ClockFrame.Timestamp < latest.Timestamp {
e.logger.Debug("received invalid response from peer")
e.peerMapMx.Lock()
if _, ok := e.peerMap[string(peerId)]; ok {
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
e.uncooperativePeersMap[string(peerId)].timestamp = time.Now().UnixMilli()
delete(e.peerMap, string(peerId))
}
e.peerMapMx.Unlock()
if err := cc.Close(); err != nil {
e.logger.Error("error while closing connection", zap.Error(err))
}
cooperative = false
return latest, nil
}
e.logger.Info(
@ -357,13 +356,7 @@ func (e *DataClockConsensusEngine) sync(
if !e.IsInProverTrie(
response.ClockFrame.GetPublicKeySignatureEd448().PublicKey.KeyValue,
) {
e.peerMapMx.Lock()
if _, ok := e.peerMap[string(peerId)]; ok {
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
e.uncooperativePeersMap[string(peerId)].timestamp = time.Now().UnixMilli()
delete(e.peerMap, string(peerId))
}
e.peerMapMx.Unlock()
cooperative = false
}
if err := e.frameProver.VerifyDataClockFrame(
response.ClockFrame,
@ -373,11 +366,8 @@ func (e *DataClockConsensusEngine) sync(
e.dataTimeReel.Insert(response.ClockFrame, true)
latest = response.ClockFrame
if latest.FrameNumber >= maxFrame {
break
return latest, nil
}
}
if err := cc.Close(); err != nil {
e.logger.Error("error while closing connection", zap.Error(err))
}
return latest, nil
}

View File

@ -28,6 +28,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/execution"
"source.quilibrium.com/quilibrium/monorepo/node/internal/cas"
"source.quilibrium.com/quilibrium/monorepo/node/internal/frametime"
qgrpc "source.quilibrium.com/quilibrium/monorepo/node/internal/grpc"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
@ -61,6 +62,10 @@ type ChannelServer = protobufs.DataService_GetPublicChannelServer
type DataClockConsensusEngine struct {
protobufs.UnimplementedDataServiceServer
ctx context.Context
cancel context.CancelFunc
lastProven uint64
difficulty uint32
config *config.Config
@ -126,6 +131,7 @@ type DataClockConsensusEngine struct {
previousFrameProven *protobufs.ClockFrame
previousTree *mt.MerkleTree
clientReconnectTest int
requestSyncCh chan *protobufs.ClockFrame
}
var _ consensus.DataConsensusEngine = (*DataClockConsensusEngine)(nil)
@ -215,7 +221,10 @@ func NewDataClockConsensusEngine(
rateLimit = 10
}
ctx, cancel := context.WithCancel(context.Background())
e := &DataClockConsensusEngine{
ctx: ctx,
cancel: cancel,
difficulty: difficulty,
logger: logger,
state: consensus.EngineStateStopped,
@ -256,6 +265,7 @@ func NewDataClockConsensusEngine(
rateLimit,
time.Minute,
),
requestSyncCh: make(chan *protobufs.ClockFrame, 1),
}
logger.Info("constructing consensus engine")
@ -305,14 +315,14 @@ func (e *DataClockConsensusEngine) Start() <-chan error {
go e.runInfoMessageHandler()
e.logger.Info("subscribing to pubsub messages")
e.pubSub.RegisterValidator(e.frameFilter, e.validateFrameMessage)
e.pubSub.RegisterValidator(e.txFilter, e.validateTxMessage)
e.pubSub.RegisterValidator(e.infoFilter, e.validateInfoMessage)
e.pubSub.RegisterValidator(e.frameFilter, e.validateFrameMessage, true)
e.pubSub.RegisterValidator(e.txFilter, e.validateTxMessage, true)
e.pubSub.RegisterValidator(e.infoFilter, e.validateInfoMessage, true)
e.pubSub.Subscribe(e.frameFilter, e.handleFrameMessage)
e.pubSub.Subscribe(e.txFilter, e.handleTxMessage)
e.pubSub.Subscribe(e.infoFilter, e.handleInfoMessage)
go func() {
server := grpc.NewServer(
server := qgrpc.NewServer(
grpc.MaxSendMsgSize(20*1024*1024),
grpc.MaxRecvMsgSize(20*1024*1024),
)
@ -328,7 +338,7 @@ func (e *DataClockConsensusEngine) Start() <-chan error {
go func() {
if e.dataTimeReel.GetFrameProverTries()[0].Contains(e.provingKeyAddress) {
server := grpc.NewServer(
server := qgrpc.NewServer(
grpc.MaxSendMsgSize(1*1024*1024),
grpc.MaxRecvMsgSize(1*1024*1024),
)
@ -479,6 +489,9 @@ func (e *DataClockConsensusEngine) Start() <-chan error {
}()
go e.runLoop()
go e.runSync()
go e.runFramePruning()
go func() {
time.Sleep(30 * time.Second)
e.logger.Info("checking for snapshots to play forward")
@ -558,11 +571,13 @@ func (e *DataClockConsensusEngine) PerformTimeProof(
go func() {
resp, err :=
client.client.CalculateChallengeProof(
context.Background(),
e.ctx,
&protobufs.ChallengeProofRequest{
PeerId: e.pubSub.GetPeerID(),
Core: uint32(i),
ClockFrame: frame,
PeerId: e.pubSub.GetPeerID(),
Core: uint32(i),
Output: frame.Output,
FrameNumber: frame.FrameNumber,
Difficulty: frame.Difficulty,
},
)
if err != nil {
@ -594,6 +609,7 @@ func (e *DataClockConsensusEngine) PerformTimeProof(
func (e *DataClockConsensusEngine) Stop(force bool) <-chan error {
e.logger.Info("stopping ceremony consensus engine")
e.cancel()
e.stateMx.Lock()
e.state = consensus.EngineStateStopping
e.stateMx.Unlock()
@ -620,6 +636,7 @@ func (e *DataClockConsensusEngine) Stop(force bool) <-chan error {
},
},
},
Timestamp: time.Now().UnixMilli(),
})
wg := sync.WaitGroup{}
@ -765,9 +782,9 @@ func (e *DataClockConsensusEngine) createParallelDataClientsFromListAndIndex(
return nil, errors.Wrap(err, "create parallel data client")
}
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
ctx, cancel := context.WithTimeout(e.ctx, 1*time.Second)
defer cancel()
conn, err := grpc.DialContext(
conn, err := qgrpc.DialContext(
ctx,
addr,
grpc.WithTransportCredentials(
@ -828,9 +845,9 @@ func (
return nil, errors.Wrap(err, "create parallel data client")
}
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
ctx, cancel := context.WithTimeout(e.ctx, 1*time.Second)
defer cancel()
conn, err := grpc.DialContext(
conn, err := qgrpc.DialContext(
ctx,
addr,
grpc.WithTransportCredentials(
@ -880,9 +897,9 @@ func (e *DataClockConsensusEngine) createParallelDataClientsFromList() (
continue
}
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
ctx, cancel := context.WithTimeout(e.ctx, 1*time.Second)
defer cancel()
conn, err := grpc.DialContext(
conn, err := qgrpc.DialContext(
ctx,
addr,
grpc.WithTransportCredentials(
@ -943,9 +960,9 @@ func (e *DataClockConsensusEngine) createParallelDataClientsFromBaseMultiaddr(
e.logger.Error("could not get dial args", zap.Error(err))
continue
}
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
ctx, cancel := context.WithTimeout(e.ctx, 1*time.Second)
defer cancel()
conn, err := grpc.DialContext(
conn, err := qgrpc.DialContext(
ctx,
addr,
grpc.WithTransportCredentials(
@ -971,3 +988,14 @@ func (e *DataClockConsensusEngine) createParallelDataClientsFromBaseMultiaddr(
)
return clients, nil
}
func (e *DataClockConsensusEngine) GetWorkerCount() uint32 {
count := uint32(0)
for _, client := range e.clients {
if client != nil {
count++
}
}
return count
}

View File

@ -0,0 +1,13 @@
package data
import "go.uber.org/zap"
func (e *DataClockConsensusEngine) pruneFrames(maxFrame uint64) error {
e.logger.Info("pruning frames", zap.Uint64("max_frame_to_prune", maxFrame))
err := e.clockStore.DeleteDataClockFrameRange(e.filter, 1, maxFrame)
if err != nil {
e.logger.Error("failed to prune frames", zap.Error(err))
return err
}
return nil
}

View File

@ -8,6 +8,7 @@ import (
"github.com/iden3/go-iden3-crypto/poseidon"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token/application"
"source.quilibrium.com/quilibrium/monorepo/node/internal/cas"
"source.quilibrium.com/quilibrium/monorepo/node/internal/frametime"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
@ -41,6 +42,75 @@ func (
return frameProverTries
}
func (e *DataClockConsensusEngine) runFramePruning() {
// A full prover should _never_ do this
if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) ||
e.config.Engine.MaxFrames == -1 || e.config.Engine.FullProver {
e.logger.Info("frame pruning not enabled")
return
}
if e.config.Engine.MaxFrames < 1000 {
e.logger.Warn(
"max frames for pruning too low, pruning disabled",
zap.Int64("max_frames", e.config.Engine.MaxFrames),
)
return
}
for {
select {
case <-e.ctx.Done():
return
case <-time.After(1 * time.Hour):
head, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
}
if head.FrameNumber < uint64(e.config.Engine.MaxFrames)+1 ||
head.FrameNumber <= application.PROOF_FRAME_SENIORITY_REPAIR+1 {
continue
}
if err := e.pruneFrames(
head.FrameNumber - uint64(e.config.Engine.MaxFrames),
); err != nil {
e.logger.Error("could not prune", zap.Error(err))
}
}
}
}
func (e *DataClockConsensusEngine) runSync() {
// small optimization, beacon should never collect for now:
if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) {
return
}
for {
select {
case <-e.ctx.Done():
return
case enqueuedFrame := <-e.requestSyncCh:
if _, err := e.collect(enqueuedFrame); err != nil {
e.logger.Error("could not collect", zap.Error(err))
}
case <-time.After(20 * time.Second):
if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) {
continue
}
head, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
}
if _, err := e.collect(head); err != nil {
e.logger.Error("could not collect", zap.Error(err))
}
}
}
}
func (e *DataClockConsensusEngine) runLoop() {
dataFrameCh := e.dataTimeReel.NewFrameCh()
runOnce := true
@ -58,9 +128,22 @@ func (e *DataClockConsensusEngine) runLoop() {
panic(err)
}
select {
case dataFrame := <-dataFrameCh:
if runOnce {
if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) {
dataFrame, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
}
latestFrame = e.processFrame(latestFrame, dataFrame)
}
runOnce = false
}
select {
case <-e.ctx.Done():
return
case dataFrame := <-dataFrameCh:
if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) {
if err = e.publishProof(dataFrame); err != nil {
e.logger.Error("could not publish", zap.Error(err))
@ -71,21 +154,6 @@ func (e *DataClockConsensusEngine) runLoop() {
e.stateMx.Unlock()
}
}
latestFrame = e.processFrame(latestFrame, dataFrame)
case <-time.After(20 * time.Second):
if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) && !runOnce {
continue
}
if runOnce {
runOnce = false
}
dataFrame, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
}
latestFrame = e.processFrame(latestFrame, dataFrame)
}
}
@ -103,8 +171,9 @@ func (e *DataClockConsensusEngine) processFrame(
)
var err error
if !e.GetFrameProverTries()[0].Contains(e.provingKeyBytes) {
if latestFrame, err = e.collect(dataFrame); err != nil {
e.logger.Error("could not collect", zap.Error(err))
select {
case e.requestSyncCh <- dataFrame:
default:
}
}
@ -267,6 +336,7 @@ func (e *DataClockConsensusEngine) processFrame(
},
},
},
Timestamp: time.Now().UnixMilli(),
})
if e.config.Engine.AutoMergeCoins {
@ -307,6 +377,7 @@ func (e *DataClockConsensusEngine) processFrame(
},
},
},
Timestamp: time.Now().UnixMilli(),
})
}
}

View File

@ -1,10 +1,10 @@
package data
import (
"encoding/binary"
"time"
"github.com/libp2p/go-libp2p/core/peer"
"go.uber.org/zap"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
@ -15,28 +15,23 @@ import (
func (e *DataClockConsensusEngine) validateFrameMessage(peerID peer.ID, message *pb.Message) p2p.ValidationResult {
msg := &protobufs.Message{}
if err := proto.Unmarshal(message.Data, msg); err != nil {
e.logger.Debug("could not unmarshal message", zap.Error(err))
return p2p.ValidationResultReject
}
a := &anypb.Any{}
if err := proto.Unmarshal(msg.Payload, a); err != nil {
e.logger.Debug("could not unmarshal payload", zap.Error(err))
return p2p.ValidationResultReject
}
switch a.TypeUrl {
case protobufs.ClockFrameType:
frame := &protobufs.ClockFrame{}
if err := proto.Unmarshal(a.Value, frame); err != nil {
e.logger.Debug("could not unmarshal frame", zap.Error(err))
return p2p.ValidationResultReject
}
if ts := time.UnixMilli(frame.Timestamp); time.Since(ts) > time.Hour {
e.logger.Debug("frame is too old", zap.Time("timestamp", ts))
if ts := time.UnixMilli(frame.Timestamp); time.Since(ts) > 2*time.Minute {
return p2p.ValidationResultIgnore
}
return p2p.ValidationResultAccept
default:
e.logger.Debug("unknown message type", zap.String("type_url", a.TypeUrl))
return p2p.ValidationResultReject
}
}
@ -44,25 +39,47 @@ func (e *DataClockConsensusEngine) validateFrameMessage(peerID peer.ID, message
func (e *DataClockConsensusEngine) validateTxMessage(peerID peer.ID, message *pb.Message) p2p.ValidationResult {
msg := &protobufs.Message{}
if err := proto.Unmarshal(message.Data, msg); err != nil {
e.logger.Debug("could not unmarshal message", zap.Error(err))
return p2p.ValidationResultReject
}
a := &anypb.Any{}
if err := proto.Unmarshal(msg.Payload, a); err != nil {
e.logger.Debug("could not unmarshal payload", zap.Error(err))
return p2p.ValidationResultReject
}
switch a.TypeUrl {
case protobufs.TokenRequestType:
tx := &protobufs.TokenRequest{}
if err := proto.Unmarshal(a.Value, tx); err != nil {
e.logger.Debug("could not unmarshal token request", zap.Error(err))
return p2p.ValidationResultReject
}
// NOTE: There are no timestamps to be validated for token requests.
if mint := tx.GetMint(); mint != nil {
if len(mint.Proofs) < 3 {
return p2p.ValidationResultReject
}
if len(mint.Proofs[1]) != 4 {
return p2p.ValidationResultReject
}
if len(mint.Proofs[2]) != 8 {
return p2p.ValidationResultReject
}
head, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
}
if frameNumber := binary.BigEndian.Uint64(mint.Proofs[2]); frameNumber+2 < head.FrameNumber {
return p2p.ValidationResultIgnore
}
}
if tx.Timestamp == 0 {
// NOTE: The timestamp was added in later versions of the protocol,
// and as such it is possible to receive requests without it.
// We avoid logging due to this reason.
return p2p.ValidationResultAccept
}
if ts := time.UnixMilli(tx.Timestamp); time.Since(ts) > 10*time.Minute {
return p2p.ValidationResultIgnore
}
return p2p.ValidationResultAccept
default:
e.logger.Debug("unknown message type", zap.String("type_url", a.TypeUrl))
return p2p.ValidationResultReject
}
}
@ -70,32 +87,26 @@ func (e *DataClockConsensusEngine) validateTxMessage(peerID peer.ID, message *pb
func (e *DataClockConsensusEngine) validateInfoMessage(peerID peer.ID, message *pb.Message) p2p.ValidationResult {
msg := &protobufs.Message{}
if err := proto.Unmarshal(message.Data, msg); err != nil {
e.logger.Debug("could not unmarshal message", zap.Error(err))
return p2p.ValidationResultReject
}
a := &anypb.Any{}
if err := proto.Unmarshal(msg.Payload, a); err != nil {
e.logger.Debug("could not unmarshal payload", zap.Error(err))
return p2p.ValidationResultReject
}
switch a.TypeUrl {
case protobufs.DataPeerListAnnounceType:
announce := &protobufs.DataPeerListAnnounce{}
if err := proto.Unmarshal(a.Value, announce); err != nil {
e.logger.Debug("could not unmarshal network info request", zap.Error(err))
return p2p.ValidationResultReject
}
if announce.Peer == nil {
e.logger.Debug("peer list announce is missing peer")
return p2p.ValidationResultIgnore
}
if ts := time.UnixMilli(announce.Peer.Timestamp); time.Since(ts) > 10*time.Minute {
e.logger.Debug("peer list announce is too old", zap.Time("timestamp", ts))
return p2p.ValidationResultIgnore
}
return p2p.ValidationResultAccept
default:
e.logger.Debug("unknown message type", zap.String("type_url", a.TypeUrl))
return p2p.ValidationResultReject
}
}

View File

@ -19,7 +19,7 @@ import (
"google.golang.org/grpc/status"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token/application"
grpc_internal "source.quilibrium.com/quilibrium/monorepo/node/internal/grpc"
qgrpc "source.quilibrium.com/quilibrium/monorepo/node/internal/grpc"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
@ -31,7 +31,7 @@ func (e *DataClockConsensusEngine) GetDataFrame(
ctx context.Context,
request *protobufs.GetDataFrameRequest,
) (*protobufs.DataFrameResponse, error) {
peerID, ok := grpc_internal.PeerIDFromContext(ctx)
peerID, ok := qgrpc.PeerIDFromContext(ctx)
if !ok {
return nil, status.Error(codes.Internal, "remote peer ID not found")
}
@ -428,7 +428,7 @@ func (e *DataClockConsensusEngine) handleMint(
highestIncrement = previousIncrement
}
txn, err := e.coinStore.NewTransaction()
txn, err := e.coinStore.NewTransaction(false)
if err != nil {
return nil, errors.Wrap(err, "handle mint")
}
@ -543,6 +543,7 @@ func (e *DataClockConsensusEngine) handleMint(
},
},
},
Timestamp: time.Now().UnixMilli(),
},
)
if err != nil {
@ -610,7 +611,7 @@ func (e *DataClockConsensusEngine) GetPublicChannelForProvingKey(
)
after := time.After(20 * time.Second)
go func() {
server := grpc.NewServer(
server := qgrpc.NewServer(
grpc.MaxSendMsgSize(600*1024*1024),
grpc.MaxRecvMsgSize(600*1024*1024),
)
@ -652,7 +653,7 @@ func (e *DataClockConsensusEngine) GetPublicChannelForProvingKey(
}
client := protobufs.NewDataServiceClient(cc)
s, err := client.GetPublicChannel(
context.Background(),
e.ctx,
grpc.MaxCallSendMsgSize(600*1024*1024),
grpc.MaxCallRecvMsgSize(600*1024*1024),
)

View File

@ -2,7 +2,6 @@ package data
import (
"bytes"
"context"
"encoding/binary"
"strings"
"time"
@ -127,7 +126,7 @@ func (e *DataClockConsensusEngine) runPreMidnightProofWorker() {
if bytes.Equal(resume, make([]byte, 32)) {
status, err := client.GetPreMidnightMintStatus(
context.Background(),
e.ctx,
&protobufs.PreMidnightMintStatusRequest{
Owner: addr,
},
@ -210,7 +209,7 @@ func (e *DataClockConsensusEngine) runPreMidnightProofWorker() {
}
resp, err := client.HandlePreMidnightMint(
context.Background(),
e.ctx,
&protobufs.MintCoinRequest{
Proofs: proofs,
Signature: &protobufs.Ed448Signature{

View File

@ -44,7 +44,7 @@ func (pubsub) Publish(address []byte, data []byte) error
func (pubsub) PublishToBitmask(bitmask []byte, data []byte) error { return nil }
func (pubsub) Subscribe(bitmask []byte, handler func(message *pb.Message) error) error { return nil }
func (pubsub) Unsubscribe(bitmask []byte, raw bool) {}
func (pubsub) RegisterValidator(bitmask []byte, validator func(peerID peer.ID, message *pb.Message) p2p.ValidationResult) error {
func (pubsub) RegisterValidator(bitmask []byte, validator func(peerID peer.ID, message *pb.Message) p2p.ValidationResult, sync bool) error {
return nil
}
func (pubsub) UnregisterValidator(bitmask []byte) error { return nil }
@ -661,7 +661,7 @@ func TestHandlePreMidnightMint(t *testing.T) {
assert.Len(t, success.Requests, 1)
assert.Len(t, fail.Requests, 1)
txn, _ := app.CoinStore.NewTransaction()
txn, _ := app.CoinStore.NewTransaction(false)
for i, o := range app.TokenOutputs.Outputs {
switch e := o.Output.(type) {
case *protobufs.TokenOutput_Coin:
@ -670,7 +670,7 @@ func TestHandlePreMidnightMint(t *testing.T) {
err = app.CoinStore.PutCoin(txn, 1, a, e.Coin)
assert.NoError(t, err)
case *protobufs.TokenOutput_DeletedCoin:
c, err := app.CoinStore.GetCoinByAddress(txn, e.DeletedCoin.Address)
c, err := app.CoinStore.GetCoinByAddress(nil, e.DeletedCoin.Address)
assert.NoError(t, err)
err = app.CoinStore.DeleteCoin(txn, e.DeletedCoin.Address, c)
assert.NoError(t, err)

View File

@ -290,7 +290,7 @@ func (d *DataTimeReel) createGenesisFrame() (
panic(err)
}
txn, err := d.clockStore.NewTransaction()
txn, err := d.clockStore.NewTransaction(false)
if err != nil {
panic(err)
}
@ -311,7 +311,7 @@ func (d *DataTimeReel) createGenesisFrame() (
panic(err)
}
txn, err = d.clockStore.NewTransaction()
txn, err = d.clockStore.NewTransaction(false)
if err != nil {
panic(err)
}
@ -531,7 +531,7 @@ func (d *DataTimeReel) storePending(
zap.String("distance", distance.Text(16)),
)
txn, err := d.clockStore.NewTransaction()
txn, err := d.clockStore.NewTransaction(false)
if err != nil {
panic(err)
}
@ -638,7 +638,7 @@ func (d *DataTimeReel) setHead(frame *protobufs.ClockFrame, distance *big.Int) e
zap.Uint64("head_number", d.head.FrameNumber),
zap.String("head_output_tag", hex.EncodeToString(d.head.Output[:64])),
)
txn, err := d.clockStore.NewTransaction()
txn, err := d.clockStore.NewTransaction(false)
if err != nil {
panic(err)
}
@ -933,7 +933,7 @@ func (d *DataTimeReel) forkChoice(
rightReplaySelectors =
rightReplaySelectors[1:]
txn, err := d.clockStore.NewTransaction()
txn, err := d.clockStore.NewTransaction(false)
if err != nil {
panic(err)
}
@ -956,7 +956,7 @@ func (d *DataTimeReel) forkChoice(
frameNumber++
}
txn, err := d.clockStore.NewTransaction()
txn, err := d.clockStore.NewTransaction(false)
if err != nil {
panic(err)
}

View File

@ -187,6 +187,7 @@ func TestDataTimeReel(t *testing.T) {
},
pubKeys,
true,
func() []*tries.RollingFrecencyCritbitTrie { return []*tries.RollingFrecencyCritbitTrie{} },
)
err = d.Start()

View File

@ -164,7 +164,7 @@ func (m *MasterTimeReel) createGenesisFrame() *protobufs.ClockFrame {
panic(err)
}
txn, err := m.clockStore.NewTransaction()
txn, err := m.clockStore.NewTransaction(false)
if err != nil {
panic(err)
}
@ -212,7 +212,7 @@ func (m *MasterTimeReel) runLoop() {
continue
}
txn, err := m.clockStore.NewTransaction()
txn, err := m.clockStore.NewTransaction(false)
if err != nil {
panic(err)
}
@ -281,7 +281,7 @@ func (m *MasterTimeReel) processPending() {
continue
}
txn, err := m.clockStore.NewTransaction()
txn, err := m.clockStore.NewTransaction(false)
if err != nil {
panic(err)
}

View File

@ -20,4 +20,5 @@ type ExecutionEngine interface {
GetSeniority() *big.Int
GetRingPosition() int
AnnounceProverJoin()
GetWorkerCount() uint32
}

View File

@ -1,8 +1,13 @@
package application
import (
"bytes"
"crypto"
"encoding/binary"
"runtime"
"sync"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/protobuf/proto"
@ -148,35 +153,82 @@ func (a *TokenApplication) ApplyTransitions(
seen := map[string]struct{}{}
for _, transition := range requests {
set := make([]*protobufs.TokenRequest, len(requests))
fails := make([]*protobufs.TokenRequest, len(set))
outputsSet := make([][]*protobufs.TokenOutput, len(set))
for i, transition := range requests {
i := i
switch t := transition.Request.(type) {
case *protobufs.TokenRequest_Mint:
ring, parallelism, err := t.Mint.RingAndParallelism(
func(addr []byte) int {
if _, ok := seen[string(addr)]; ok {
return -1
}
if t == nil || t.Mint.Proofs == nil || t.Mint.Signature == nil {
continue
}
ring := -1
for i, t := range a.Tries[1:] {
if t.Contains(addr) {
ring = i
seen[string(addr)] = struct{}{}
}
}
payload := []byte("mint")
for _, p := range t.Mint.Proofs {
payload = append(payload, p...)
}
if err := t.Mint.Signature.Verify(payload); err != nil {
continue
}
return ring
},
addr, err := poseidon.HashBytes(
t.Mint.Signature.PublicKey.KeyValue,
)
if err != nil {
continue
}
parallelismMap[ring] = parallelismMap[ring] + uint64(parallelism)
if len(t.Mint.Proofs) == 1 && a.Tries[0].Contains(
addr.FillBytes(make([]byte, 32)),
) && bytes.Equal(t.Mint.Signature.PublicKey.KeyValue, a.Beacon) {
if _, ok := seen[string(t.Mint.Proofs[0][32:])]; !ok {
set[i] = transition
seen[string(t.Mint.Proofs[0][32:])] = struct{}{}
}
} else if len(t.Mint.Proofs) >= 3 && currentFrameNumber > PROOF_FRAME_CUTOFF {
frameNumber := binary.BigEndian.Uint64(t.Mint.Proofs[2])
if frameNumber < currentFrameNumber-2 {
fails[i] = transition
continue
}
ring, parallelism, err := t.Mint.RingAndParallelism(
func(addr []byte) int {
if _, ok := seen[string(addr)]; ok {
return -1
}
ring := -1
for i, t := range a.Tries[1:] {
if t.Contains(addr) {
ring = i
seen[string(addr)] = struct{}{}
}
}
return ring
},
)
if err == nil {
// fmt.Println(i, "checked ring test")
set[i] = transition
parallelismMap[ring] = parallelismMap[ring] + uint64(parallelism)
} else {
// fmt.Println(i, "failed ring test", err)
fails[i] = transition
}
}
default:
set[i] = transition
}
}
for _, transition := range requests {
successes := make([]*protobufs.TokenRequest, len(set))
for i, transition := range set {
if transition == nil {
continue
}
req:
switch t := transition.Request.(type) {
case *protobufs.TokenRequest_Announce:
@ -194,11 +246,8 @@ func (a *TokenApplication) ApplyTransitions(
)
break req
}
outputs.Outputs = append(outputs.Outputs, success...)
finalizedTransitions.Requests = append(
finalizedTransitions.Requests,
transition,
)
outputsSet[i] = success
successes[i] = transition
case *protobufs.TokenRequest_Join:
success, err := a.handleDataAnnounceProverJoin(
currentFrameNumber,
@ -212,17 +261,11 @@ func (a *TokenApplication) ApplyTransitions(
"apply transitions",
)
}
failedTransitions.Requests = append(
failedTransitions.Requests,
transition,
)
fails[i] = transition
break req
}
outputs.Outputs = append(outputs.Outputs, success...)
finalizedTransitions.Requests = append(
finalizedTransitions.Requests,
transition,
)
outputsSet[i] = success
successes[i] = transition
case *protobufs.TokenRequest_Leave:
success, err := a.handleDataAnnounceProverLeave(
currentFrameNumber,
@ -236,17 +279,11 @@ func (a *TokenApplication) ApplyTransitions(
"apply transitions",
)
}
failedTransitions.Requests = append(
failedTransitions.Requests,
transition,
)
fails[i] = transition
break req
}
outputs.Outputs = append(outputs.Outputs, success...)
finalizedTransitions.Requests = append(
finalizedTransitions.Requests,
transition,
)
outputsSet[i] = success
successes[i] = transition
case *protobufs.TokenRequest_Resume:
success, err := a.handleDataAnnounceProverResume(
currentFrameNumber,
@ -260,17 +297,11 @@ func (a *TokenApplication) ApplyTransitions(
"apply transitions",
)
}
failedTransitions.Requests = append(
failedTransitions.Requests,
transition,
)
fails[i] = transition
break req
}
outputs.Outputs = append(outputs.Outputs, success...)
finalizedTransitions.Requests = append(
finalizedTransitions.Requests,
transition,
)
outputsSet[i] = success
successes[i] = transition
case *protobufs.TokenRequest_Pause:
success, err := a.handleDataAnnounceProverPause(
currentFrameNumber,
@ -284,17 +315,11 @@ func (a *TokenApplication) ApplyTransitions(
"apply transitions",
)
}
failedTransitions.Requests = append(
failedTransitions.Requests,
transition,
)
fails[i] = transition
break req
}
outputs.Outputs = append(outputs.Outputs, success...)
finalizedTransitions.Requests = append(
finalizedTransitions.Requests,
transition,
)
outputsSet[i] = success
successes[i] = transition
case *protobufs.TokenRequest_Merge:
success, err := a.handleMerge(currentFrameNumber, lockMap, t.Merge)
if err != nil {
@ -304,17 +329,11 @@ func (a *TokenApplication) ApplyTransitions(
"apply transitions",
)
}
failedTransitions.Requests = append(
failedTransitions.Requests,
transition,
)
fails[i] = transition
break req
}
outputs.Outputs = append(outputs.Outputs, success...)
finalizedTransitions.Requests = append(
finalizedTransitions.Requests,
transition,
)
outputsSet[i] = success
successes[i] = transition
case *protobufs.TokenRequest_Split:
success, err := a.handleSplit(currentFrameNumber, lockMap, t.Split)
if err != nil {
@ -324,17 +343,11 @@ func (a *TokenApplication) ApplyTransitions(
"apply transitions",
)
}
failedTransitions.Requests = append(
failedTransitions.Requests,
transition,
)
fails[i] = transition
break req
}
outputs.Outputs = append(outputs.Outputs, success...)
finalizedTransitions.Requests = append(
finalizedTransitions.Requests,
transition,
)
outputsSet[i] = success
successes[i] = transition
case *protobufs.TokenRequest_Transfer:
success, err := a.handleTransfer(currentFrameNumber, lockMap, t.Transfer)
if err != nil {
@ -344,48 +357,78 @@ func (a *TokenApplication) ApplyTransitions(
"apply transitions",
)
}
failedTransitions.Requests = append(
failedTransitions.Requests,
transition,
)
fails[i] = transition
break req
}
outputs.Outputs = append(outputs.Outputs, success...)
finalizedTransitions.Requests = append(
finalizedTransitions.Requests,
transition,
)
outputsSet[i] = success
successes[i] = transition
}
}
wg := sync.WaitGroup{}
throttle := make(chan struct{}, runtime.GOMAXPROCS(-1))
for i, transition := range set {
if transition == nil {
continue
}
i, transition := i, transition
switch t := transition.Request.(type) {
case *protobufs.TokenRequest_Mint:
success, err := a.handleMint(
currentFrameNumber,
lockMap,
t.Mint,
frame,
parallelismMap,
)
if err != nil {
if !skipFailures {
return nil, nil, nil, errors.Wrap(
err,
"apply transitions",
)
}
failedTransitions.Requests = append(
failedTransitions.Requests,
transition,
throttle <- struct{}{}
wg.Add(1)
go func() {
defer func() { <-throttle }()
defer wg.Done()
success, err := a.handleMint(
currentFrameNumber,
t.Mint,
frame,
parallelismMap,
)
break req
if err != nil {
fails[i] = transition
return
}
outputsSet[i] = success
successes[i] = transition
}()
}
}
wg.Wait()
finalFails := []*protobufs.TokenRequest{}
for _, fail := range fails {
if fail != nil {
finalFails = append(finalFails, fail)
}
}
if len(finalFails) != 0 && !skipFailures {
return nil, nil, nil, errors.Wrap(
err,
"apply transitions",
)
}
finalSuccesses := []*protobufs.TokenRequest{}
for _, success := range successes {
if success != nil {
finalSuccesses = append(finalSuccesses, success)
}
}
outputs.Outputs = []*protobufs.TokenOutput{}
for _, out := range outputsSet {
if out != nil {
for _, o := range out {
outputs.Outputs = append(outputs.Outputs, o)
}
outputs.Outputs = append(outputs.Outputs, success...)
finalizedTransitions.Requests = append(
finalizedTransitions.Requests,
transition,
)
}
}
a.TokenOutputs = outputs
finalizedTransitions.Requests = finalSuccesses
failedTransitions.Requests = finalFails
return a, finalizedTransitions, failedTransitions, nil
}

View File

@ -21,10 +21,10 @@ import (
const PROOF_FRAME_CUTOFF = 46500
const PROOF_FRAME_RING_RESET = 52000
const PROOF_FRAME_RING_RESET_2 = 53028
const PROOF_FRAME_SENIORITY_REPAIR = 59029
func (a *TokenApplication) handleMint(
currentFrameNumber uint64,
lockMap map[string]struct{},
t *protobufs.MintCoinRequest,
frame *protobufs.ClockFrame,
parallelismMap map[int]uint64,
@ -72,10 +72,6 @@ func (a *TokenApplication) handleMint(
return nil, errors.Wrap(ErrInvalidStateTransition, "handle mint")
}
if _, touched := lockMap[string(t.Proofs[0][32:])]; touched {
return nil, errors.Wrap(ErrInvalidStateTransition, "handle mint")
}
_, pr, err := a.CoinStore.GetPreCoinProofsForOwner(t.Proofs[0][32:])
if err != nil && !errors.Is(err, store.ErrNotFound) {
return nil, errors.Wrap(ErrInvalidStateTransition, "handle mint")
@ -87,8 +83,6 @@ func (a *TokenApplication) handleMint(
}
}
lockMap[string(t.Proofs[0][32:])] = struct{}{}
outputs := []*protobufs.TokenOutput{
&protobufs.TokenOutput{
Output: &protobufs.TokenOutput_Proof{
@ -130,28 +124,12 @@ func (a *TokenApplication) handleMint(
zap.String("peer_id", base58.Encode([]byte(peerId))),
zap.Uint64("frame_number", currentFrameNumber),
)
if _, touched := lockMap[string(t.Signature.PublicKey.KeyValue)]; touched {
a.Logger.Debug(
"already received",
zap.String("peer_id", base58.Encode([]byte(peerId))),
zap.Uint64("frame_number", currentFrameNumber),
)
return nil, errors.Wrap(ErrInvalidStateTransition, "handle mint")
}
ring := -1
for i, t := range a.Tries[1:] {
if t.Contains(altAddr.FillBytes(make([]byte, 32))) {
ring = i
}
}
if ring == -1 {
a.Logger.Debug(
"not in ring",
zap.String("peer_id", base58.Encode([]byte(peerId))),
zap.Uint64("frame_number", currentFrameNumber),
)
return nil, errors.Wrap(ErrInvalidStateTransition, "handle mint")
}
_, prfs, err := a.CoinStore.GetPreCoinProofsForOwner(
altAddr.FillBytes(make([]byte, 32)),
@ -181,7 +159,6 @@ func (a *TokenApplication) handleMint(
zap.String("peer_id", base58.Encode([]byte(peerId))),
zap.Uint64("frame_number", currentFrameNumber),
)
lockMap[string(t.Signature.PublicKey.KeyValue)] = struct{}{}
return []*protobufs.TokenOutput{&protobufs.TokenOutput{
Output: &protobufs.TokenOutput_Penalty{
Penalty: &protobufs.ProverPenalty{
@ -210,7 +187,6 @@ func (a *TokenApplication) handleMint(
zap.String("peer_id", base58.Encode([]byte(peerId))),
zap.Uint64("frame_number", currentFrameNumber),
)
lockMap[string(t.Signature.PublicKey.KeyValue)] = struct{}{}
return []*protobufs.TokenOutput{&protobufs.TokenOutput{
Output: &protobufs.TokenOutput_Penalty{
Penalty: &protobufs.ProverPenalty{
@ -271,7 +247,6 @@ func (a *TokenApplication) handleMint(
zap.String("peer_id", base58.Encode([]byte(peerId))),
zap.Uint64("frame_number", currentFrameNumber),
)
lockMap[string(t.Signature.PublicKey.KeyValue)] = struct{}{}
return []*protobufs.TokenOutput{&protobufs.TokenOutput{
Output: &protobufs.TokenOutput_Penalty{
Penalty: &protobufs.ProverPenalty{
@ -310,7 +285,6 @@ func (a *TokenApplication) handleMint(
zap.Uint64("frame_number", currentFrameNumber),
zap.Int("proof_size", len(leaf)),
)
lockMap[string(t.Signature.PublicKey.KeyValue)] = struct{}{}
return []*protobufs.TokenOutput{&protobufs.TokenOutput{
Output: &protobufs.TokenOutput_Penalty{
Penalty: &protobufs.ProverPenalty{
@ -359,7 +333,11 @@ func (a *TokenApplication) handleMint(
}
if verified && delete != nil && len(t.Proofs) > 3 && wesoVerified {
storage := PomwBasis(1, ring, currentFrameNumber)
storage.Quo(storage, big.NewInt(int64(parallelismMap[ring])))
m := parallelismMap[ring]
if m == 0 {
m = 1
}
storage.Quo(storage, big.NewInt(int64(m)))
storage.Mul(storage, big.NewInt(int64(parallelism)))
a.Logger.Debug(
@ -452,7 +430,6 @@ func (a *TokenApplication) handleMint(
})
}
}
lockMap[string(t.Signature.PublicKey.KeyValue)] = struct{}{}
return outputs, nil
}
a.Logger.Debug(

View File

@ -83,7 +83,7 @@ func TestHandleProverJoin(t *testing.T) {
[][]byte{bpub},
)
selbi, _ := gen.GetSelector()
txn, _ := app.ClockStore.NewTransaction()
txn, _ := app.ClockStore.NewTransaction(false)
app.ClockStore.StageDataClockFrame(selbi.FillBytes(make([]byte, 32)), gen, txn)
app.ClockStore.CommitDataClockFrame(gen.Filter, 0, selbi.FillBytes(make([]byte, 32)), app.Tries, txn, false)
txn.Commit()
@ -115,7 +115,7 @@ func TestHandleProverJoin(t *testing.T) {
assert.Len(t, fail.Requests, 0)
app.Tries = append(app.Tries, &tries.RollingFrecencyCritbitTrie{})
app.Tries[1].Add(addr, 0)
txn, _ = app.ClockStore.NewTransaction()
txn, _ = app.ClockStore.NewTransaction(false)
frame1, _ := wprover.ProveDataClockFrame(gen, [][]byte{}, []*protobufs.InclusionAggregateProof{}, bprivKey, time.Now().UnixMilli(), 10000)
selbi, _ = frame1.GetSelector()
app.ClockStore.StageDataClockFrame(selbi.FillBytes(make([]byte, 32)), frame1, txn)
@ -144,7 +144,7 @@ func TestHandleProverJoin(t *testing.T) {
false,
)
assert.Error(t, err)
txn, _ = app.ClockStore.NewTransaction()
txn, _ = app.ClockStore.NewTransaction(false)
frame2, _ := wprover.ProveDataClockFrame(frame1, [][]byte{}, []*protobufs.InclusionAggregateProof{}, bprivKey, time.Now().UnixMilli(), 10000)
selbi, _ = frame2.GetSelector()
app.ClockStore.StageDataClockFrame(selbi.FillBytes(make([]byte, 32)), frame2, txn)
@ -205,7 +205,7 @@ func TestHandleProverJoin(t *testing.T) {
assert.NoError(t, err)
assert.Len(t, success.Requests, 1)
assert.Len(t, app.TokenOutputs.Outputs, 1)
txn, _ = app.CoinStore.NewTransaction()
txn, _ = app.CoinStore.NewTransaction(false)
for i, o := range app.TokenOutputs.Outputs {
switch e := o.Output.(type) {
case *protobufs.TokenOutput_Coin:
@ -233,7 +233,7 @@ func TestHandleProverJoin(t *testing.T) {
}
}
err = txn.Commit()
txn, _ = app.ClockStore.NewTransaction()
txn, _ = app.ClockStore.NewTransaction(false)
frame3, _ := wprover.ProveDataClockFrame(frame2, [][]byte{}, []*protobufs.InclusionAggregateProof{}, bprivKey, time.Now().UnixMilli(), 10000)
selbi, _ = frame3.GetSelector()
app.ClockStore.StageDataClockFrame(selbi.FillBytes(make([]byte, 32)), frame3, txn)

View File

@ -185,7 +185,7 @@ func NewTokenExecutionEngine(
panic(err)
}
txn, err := clockStore.NewTransaction()
txn, err := clockStore.NewTransaction(false)
if err != nil {
panic(err)
}
@ -249,7 +249,7 @@ func NewTokenExecutionEngine(
panic(err)
}
txn, err := clockStore.NewTransaction()
txn, err := clockStore.NewTransaction(false)
if err != nil {
panic(err)
}
@ -390,6 +390,7 @@ func NewTokenExecutionEngine(
},
},
},
Timestamp: gotime.Now().UnixMilli(),
},
)
}
@ -536,6 +537,8 @@ func (e *TokenExecutionEngine) ProcessFrame(
proverTrieJoinRequests := [][]byte{}
proverTrieLeaveRequests := [][]byte{}
mapSnapshot := ToSerializedMap(e.peerSeniority)
activeMap := NewFromMap(mapSnapshot)
for i, output := range app.TokenOutputs.Outputs {
switch o := output.Output.(type) {
@ -556,7 +559,7 @@ func (e *TokenExecutionEngine) ProcessFrame(
return nil, errors.Wrap(err, "process frame")
}
case *protobufs.TokenOutput_DeletedCoin:
coin, err := e.coinStore.GetCoinByAddress(txn, o.DeletedCoin.Address)
coin, err := e.coinStore.GetCoinByAddress(nil, o.DeletedCoin.Address)
if err != nil {
txn.Abort()
return nil, errors.Wrap(err, "process frame")
@ -596,14 +599,14 @@ func (e *TokenExecutionEngine) ProcessFrame(
break
}
}
if _, ok := (*e.peerSeniority)[addr]; !ok {
(*e.peerSeniority)[addr] = PeerSeniorityItem{
if _, ok := (*activeMap)[addr]; !ok {
(*activeMap)[addr] = PeerSeniorityItem{
seniority: 10,
addr: addr,
}
} else {
(*e.peerSeniority)[addr] = PeerSeniorityItem{
seniority: (*e.peerSeniority)[addr].seniority + 10,
(*activeMap)[addr] = PeerSeniorityItem{
seniority: (*activeMap)[addr].seniority + 10,
addr: addr,
}
}
@ -649,7 +652,7 @@ func (e *TokenExecutionEngine) ProcessFrame(
return nil, errors.Wrap(err, "process frame")
}
sen, ok := (*e.peerSeniority)[string(addr)]
sen, ok := (*activeMap)[string(addr)]
if !ok {
logger(
"peer announced with no seniority",
@ -706,7 +709,7 @@ func (e *TokenExecutionEngine) ProcessFrame(
logger("combined aggregate and 1.4.19-21 seniority", zap.Uint64("seniority", total))
(*e.peerSeniority)[string(addr)] = PeerSeniorityItem{
(*activeMap)[string(addr)] = PeerSeniorityItem{
seniority: aggregated + additional,
addr: string(addr),
}
@ -720,7 +723,7 @@ func (e *TokenExecutionEngine) ProcessFrame(
return nil, errors.Wrap(err, "process frame")
}
(*e.peerSeniority)[string(addr)] = PeerSeniorityItem{
(*activeMap)[string(addr)] = PeerSeniorityItem{
seniority: 0,
addr: string(addr),
}
@ -734,7 +737,7 @@ func (e *TokenExecutionEngine) ProcessFrame(
return nil, errors.Wrap(err, "process frame")
}
sen, ok := (*e.peerSeniority)[string(addr)]
sen, ok := (*activeMap)[string(addr)]
if !ok {
logger(
"peer announced with no seniority",
@ -777,7 +780,7 @@ func (e *TokenExecutionEngine) ProcessFrame(
}
total := GetAggregatedSeniority([]string{peerIds[0]}).Uint64() + additional
logger("combined aggregate and 1.4.19-21 seniority", zap.Uint64("seniority", total))
(*e.peerSeniority)[string(addr)] = PeerSeniorityItem{
(*activeMap)[string(addr)] = PeerSeniorityItem{
seniority: total,
addr: string(addr),
}
@ -789,14 +792,14 @@ func (e *TokenExecutionEngine) ProcessFrame(
return nil, errors.Wrap(err, "process frame")
}
if _, ok := (*e.peerSeniority)[string(addr)]; !ok {
(*e.peerSeniority)[string(addr)] = PeerSeniorityItem{
if _, ok := (*activeMap)[string(addr)]; !ok {
(*activeMap)[string(addr)] = PeerSeniorityItem{
seniority: 20,
addr: string(addr),
}
} else {
(*e.peerSeniority)[string(addr)] = PeerSeniorityItem{
seniority: (*e.peerSeniority)[string(addr)].seniority + 20,
(*activeMap)[string(addr)] = PeerSeniorityItem{
seniority: (*activeMap)[string(addr)].seniority + 20,
addr: string(addr),
}
}
@ -822,14 +825,14 @@ func (e *TokenExecutionEngine) ProcessFrame(
}
case *protobufs.TokenOutput_Penalty:
addr := string(o.Penalty.Account.GetImplicitAccount().Address)
if _, ok := (*e.peerSeniority)[addr]; !ok {
(*e.peerSeniority)[addr] = PeerSeniorityItem{
if _, ok := (*activeMap)[addr]; !ok {
(*activeMap)[addr] = PeerSeniorityItem{
seniority: 0,
addr: addr,
}
proverTrieLeaveRequests = append(proverTrieLeaveRequests, []byte(addr))
} else {
if (*e.peerSeniority)[addr].seniority > o.Penalty.Quantity {
if (*activeMap)[addr].seniority > o.Penalty.Quantity {
for _, t := range app.Tries {
if t.Contains([]byte(addr)) {
v := t.Get([]byte(addr))
@ -840,12 +843,12 @@ func (e *TokenExecutionEngine) ProcessFrame(
break
}
}
(*e.peerSeniority)[addr] = PeerSeniorityItem{
seniority: (*e.peerSeniority)[addr].seniority - o.Penalty.Quantity,
(*activeMap)[addr] = PeerSeniorityItem{
seniority: (*activeMap)[addr].seniority - o.Penalty.Quantity,
addr: addr,
}
} else {
(*e.peerSeniority)[addr] = PeerSeniorityItem{
(*activeMap)[addr] = PeerSeniorityItem{
seniority: 0,
addr: addr,
}
@ -858,23 +861,23 @@ func (e *TokenExecutionEngine) ProcessFrame(
joinAddrs := tries.NewMinHeap[PeerSeniorityItem]()
leaveAddrs := tries.NewMinHeap[PeerSeniorityItem]()
for _, addr := range proverTrieJoinRequests {
if _, ok := (*e.peerSeniority)[string(addr)]; !ok {
if _, ok := (*activeMap)[string(addr)]; !ok {
joinAddrs.Push(PeerSeniorityItem{
addr: string(addr),
seniority: 0,
})
} else {
joinAddrs.Push((*e.peerSeniority)[string(addr)])
joinAddrs.Push((*activeMap)[string(addr)])
}
}
for _, addr := range proverTrieLeaveRequests {
if _, ok := (*e.peerSeniority)[string(addr)]; !ok {
if _, ok := (*activeMap)[string(addr)]; !ok {
leaveAddrs.Push(PeerSeniorityItem{
addr: string(addr),
seniority: 0,
})
} else {
leaveAddrs.Push((*e.peerSeniority)[string(addr)])
leaveAddrs.Push((*activeMap)[string(addr)])
}
}
@ -887,10 +890,14 @@ func (e *TokenExecutionEngine) ProcessFrame(
ProcessJoinsAndLeaves(joinReqs, leaveReqs, app, e.peerSeniority, frame)
if frame.FrameNumber == application.PROOF_FRAME_SENIORITY_REPAIR {
e.performSeniorityMapRepair(activeMap, frame)
}
err = e.clockStore.PutPeerSeniorityMap(
txn,
e.intrinsicFilter,
ToSerializedMap(e.peerSeniority),
ToSerializedMap(activeMap),
)
if err != nil {
txn.Abort()
@ -903,6 +910,8 @@ func (e *TokenExecutionEngine) ProcessFrame(
return nil, errors.Wrap(err, "process frame")
}
e.peerSeniority = activeMap
if frame.FrameNumber == application.PROOF_FRAME_RING_RESET ||
frame.FrameNumber == application.PROOF_FRAME_RING_RESET_2 {
e.logger.Info("performing ring reset")
@ -930,6 +939,95 @@ func (e *TokenExecutionEngine) ProcessFrame(
return app.Tries, nil
}
func (e *TokenExecutionEngine) performSeniorityMapRepair(
activeMap *PeerSeniority,
frame *protobufs.ClockFrame,
) {
if e.pubSub.GetNetwork() != 0 {
return
}
e.logger.Info(
"repairing seniority map from historic data, this may take a while",
)
RebuildPeerSeniority(0)
for f := uint64(application.PROOF_FRAME_RING_RESET_2); f < frame.FrameNumber; f++ {
frame, _, err := e.clockStore.GetDataClockFrame(e.intrinsicFilter, f, false)
if err != nil {
break
}
reqs, _, _ := application.GetOutputsFromClockFrame(frame)
for _, req := range reqs.Requests {
switch t := req.Request.(type) {
case *protobufs.TokenRequest_Join:
if t.Join.Announce != nil && len(
t.Join.Announce.PublicKeySignaturesEd448,
) > 0 {
addr, err := e.getAddressFromSignature(
t.Join.Announce.PublicKeySignaturesEd448[0],
)
if err != nil {
continue
}
peerId, err := e.getPeerIdFromSignature(
t.Join.Announce.PublicKeySignaturesEd448[0],
)
if err != nil {
continue
}
additional := uint64(0)
_, prfs, err := e.coinStore.GetPreCoinProofsForOwner(addr)
for _, pr := range prfs {
if pr.IndexProof == nil && pr.Difficulty == 0 && pr.Commitment == nil {
// approximate average per interval:
add := new(big.Int).SetBytes(pr.Amount)
add.Quo(add, big.NewInt(58800000))
if add.Cmp(big.NewInt(4000000)) > 0 {
add = big.NewInt(4000000)
}
additional = add.Uint64()
}
}
if err != nil && !errors.Is(err, store.ErrNotFound) {
continue
}
peerIds := []string{peerId.String()}
if len(t.Join.Announce.PublicKeySignaturesEd448) > 1 {
for _, announce := range t.Join.Announce.PublicKeySignaturesEd448[1:] {
peerId, err := e.getPeerIdFromSignature(
announce,
)
if err != nil {
continue
}
peerIds = append(peerIds, peerId.String())
}
}
aggregated := GetAggregatedSeniority(peerIds).Uint64()
total := aggregated + additional
sen, ok := (*activeMap)[string(addr)]
if !ok || sen.seniority < total {
(*activeMap)[string(addr)] = PeerSeniorityItem{
seniority: total,
addr: string(addr),
}
}
}
}
}
}
}
func ProcessJoinsAndLeaves(
joinReqs []PeerSeniorityItem,
leaveReqs []PeerSeniorityItem,
@ -1046,9 +1144,7 @@ func (e *TokenExecutionEngine) VerifyExecution(
}
parent, tries, err := e.clockStore.GetDataClockFrame(
append(
p2p.GetBloomFilter(application.TOKEN_ADDRESS, 256, 3),
),
p2p.GetBloomFilter(application.TOKEN_ADDRESS, 256, 3),
frame.FrameNumber-1,
false,
)
@ -1370,6 +1466,7 @@ func (e *TokenExecutionEngine) AnnounceProverJoin() {
Announce: e.AnnounceProverMerge(),
},
},
Timestamp: gotime.Now().UnixMilli(),
},
)
}
@ -1442,3 +1539,7 @@ func (e *TokenExecutionEngine) getAddressFromSignature(
return altAddr.FillBytes(make([]byte, 32)), nil
}
func (e *TokenExecutionEngine) GetWorkerCount() uint32 {
return e.clock.GetWorkerCount()
}

View File

@ -848,7 +848,7 @@ func CreateGenesisState(
totalExecutions,
),
)
txn, err := coinStore.NewTransaction()
txn, err := coinStore.NewTransaction(false)
for _, output := range genesisState.Outputs {
if err != nil {
panic(err)
@ -872,7 +872,7 @@ func CreateGenesisState(
panic(err)
}
txn, err = clockStore.NewTransaction()
txn, err = clockStore.NewTransaction(false)
if err != nil {
panic(err)
}
@ -989,7 +989,7 @@ func CreateGenesisState(
})
logger.Info("serializing execution state to store")
txn, err := coinStore.NewTransaction()
txn, err := coinStore.NewTransaction(false)
for _, output := range genesisState.Outputs {
if err != nil {
panic(err)

View File

@ -26,6 +26,7 @@ replace github.com/cockroachdb/pebble => ../pebble
require (
github.com/cockroachdb/pebble v0.0.0-20231210175920-b4d301aeb46a
github.com/deiu/rdf2go v0.0.0-20240619132609-81222e324bb9
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1
github.com/libp2p/go-libp2p v0.35.4
github.com/libp2p/go-libp2p-kad-dht v0.23.0
github.com/shopspring/decimal v1.4.0
@ -41,6 +42,7 @@ require (
require (
filippo.io/edwards25519 v1.0.0-rc.1 // indirect
github.com/deiu/gon3 v0.0.0-20230411081920-f0f8f879f597 // indirect
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect
github.com/libp2p/go-libp2p-routing-helpers v0.7.2 // indirect
github.com/linkeddata/gojsonld v0.0.0-20170418210642-4f5db6791326 // indirect
github.com/pion/datachannel v1.5.6 // indirect
@ -60,7 +62,6 @@ require (
github.com/pion/turn/v2 v2.1.6 // indirect
github.com/pion/webrtc/v3 v3.2.40 // indirect
github.com/rychipman/easylex v0.0.0-20160129204217-49ee7767142f // indirect
github.com/tatsushid/go-critbit v0.0.0-20180327152158-487ef94b52c1 // indirect
go.opentelemetry.io/otel v1.16.0 // indirect
go.opentelemetry.io/otel/metric v1.16.0 // indirect
go.opentelemetry.io/otel/trace v1.16.0 // indirect
@ -98,8 +99,8 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
golang.org/x/term v0.21.0
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
@ -121,10 +122,9 @@ require (
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
github.com/google/uuid v1.4.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/google/wire v0.5.0
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
@ -203,6 +203,6 @@ require (
golang.org/x/text v0.16.0 // indirect
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
gonum.org/v1/gonum v0.13.0 // indirect
google.golang.org/grpc v1.58.2
google.golang.org/grpc v1.63.2
lukechampine.com/blake3 v1.2.1 // indirect
)

View File

@ -139,8 +139,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -157,9 +157,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -170,7 +169,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@ -186,8 +184,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/wire v0.5.0 h1:I7ELFeVBr3yfPIcc8+MWvrjk+3VjbcSzoXm3JVa+jD8=
github.com/google/wire v0.5.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
@ -198,6 +196,10 @@ github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORR
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA=
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1/go.mod h1:lXGCsh6c22WGtjr+qGHj1otzZpV/1kwTMAqkwZsnWRU=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0/go.mod h1:TzP6duP4Py2pHLVPPQp42aoYI92+PCrVotyR5e8Vqlk=
@ -522,8 +524,6 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/tatsushid/go-critbit v0.0.0-20180327152158-487ef94b52c1 h1:hCGjUxpUUmnZg0yt5aJPdRkDndH/1e8ptiV73urNUBQ=
github.com/tatsushid/go-critbit v0.0.0-20180327152158-487ef94b52c1/go.mod h1:iM17aYTnMeqDSIETK30CkHnWIAeogWYHFBz9ceCGaks=
github.com/txaty/go-merkletree v0.2.2 h1:K5bHDFK+Q3KK+gEJeyTOECKuIwl/LVo4CI+cm0/p34g=
github.com/txaty/go-merkletree v0.2.2/go.mod h1:w5HPEu7ubNw5LzS+91m+1/GtuZcWHKiPU3vEGi+ThJM=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
@ -768,12 +768,12 @@ google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g=
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0=
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be h1:LG9vZxsWGOmUKieR8wPAUR3u3MpnYFQZROPIMaXh7/A=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
@ -782,8 +782,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I=
google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -793,8 +793,6 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -0,0 +1,17 @@
package grpc
import (
"context"
"google.golang.org/grpc"
)
// NewServer returns a new grpc.Server with the given options.
func NewServer(opts ...grpc.ServerOption) *grpc.Server {
return grpc.NewServer(ServerOptions(opts...)...)
}
// DialContext returns a new grpc.ClientConn with the given target and options.
func DialContext(ctx context.Context, target string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
return grpc.DialContext(ctx, target, ClientOptions(opts...)...)
}

View File

@ -0,0 +1,27 @@
package grpc
import "google.golang.org/grpc"
// ServerOptions returns a list of grpc.ServerOptions which are commonly used.
func ServerOptions(opts ...grpc.ServerOption) []grpc.ServerOption {
return append(opts,
grpc.ChainUnaryInterceptor(
serverMetrics.UnaryServerInterceptor(),
),
grpc.ChainStreamInterceptor(
serverMetrics.StreamServerInterceptor(),
),
)
}
// ClientOptions returns a list of grpc.DialOptions which are commonly used.
func ClientOptions(opts ...grpc.DialOption) []grpc.DialOption {
return append(opts,
grpc.WithChainStreamInterceptor(
clientMetrics.StreamClientInterceptor(),
),
grpc.WithChainUnaryInterceptor(
clientMetrics.UnaryClientInterceptor(),
),
)
}

View File

@ -0,0 +1,16 @@
package grpc
import (
prom_middleware "github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus"
"github.com/prometheus/client_golang/prometheus"
)
var (
serverMetrics = prom_middleware.NewServerMetrics()
clientMetrics = prom_middleware.NewClientMetrics()
)
func init() {
prometheus.MustRegister(serverMetrics)
prometheus.MustRegister(clientMetrics)
}

View File

@ -0,0 +1,285 @@
package observability
import (
"encoding/base64"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/prometheus/client_golang/prometheus"
blossomsub "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub"
)
const blossomSubNamespace = "blossomsub"
var binaryEncoding = base64.RawStdEncoding
type blossomSubRawTracer struct {
addPeerTotal *prometheus.CounterVec
removePeerTotal prometheus.Counter
joinTotal *prometheus.CounterVec
leaveTotal *prometheus.CounterVec
graftTotal *prometheus.CounterVec
pruneTotal *prometheus.CounterVec
validateMessageTotal *prometheus.CounterVec
deliverMessageTotal *prometheus.CounterVec
rejectMessageTotal *prometheus.CounterVec
duplicateMessageTotal *prometheus.CounterVec
throttlePeerTotal prometheus.Counter
recvRPCTotal prometheus.Counter
sendRPCTotal prometheus.Counter
dropRPCTotal prometheus.Counter
undeliverableMessageTotal *prometheus.CounterVec
}
var _ blossomsub.RawTracer = (*blossomSubRawTracer)(nil)
// AddPeer implements blossomsub.RawTracer.
func (b *blossomSubRawTracer) AddPeer(p peer.ID, proto protocol.ID) {
b.addPeerTotal.WithLabelValues(string(proto)).Inc()
}
// RemovePeer implements blossomsub.RawTracer.
func (b *blossomSubRawTracer) RemovePeer(p peer.ID) {
b.removePeerTotal.Inc()
}
// Join implements blossomsub.RawTracer.
func (b *blossomSubRawTracer) Join(bitmask []byte) {
b.joinTotal.WithLabelValues(binaryEncoding.EncodeToString(bitmask)).Inc()
}
// Leave implements blossomsub.RawTracer.
func (b *blossomSubRawTracer) Leave(bitmask []byte) {
b.leaveTotal.WithLabelValues(binaryEncoding.EncodeToString(bitmask)).Inc()
}
// Graft implements blossomsub.RawTracer.
func (b *blossomSubRawTracer) Graft(p peer.ID, bitmask []byte) {
b.graftTotal.WithLabelValues(binaryEncoding.EncodeToString(bitmask)).Inc()
}
// Prune implements blossomsub.RawTracer.
func (b *blossomSubRawTracer) Prune(p peer.ID, bitmask []byte) {
b.pruneTotal.WithLabelValues(binaryEncoding.EncodeToString(bitmask)).Inc()
}
// ValidateMessage implements blossomsub.RawTracer.
func (b *blossomSubRawTracer) ValidateMessage(msg *blossomsub.Message) {
b.validateMessageTotal.WithLabelValues(binaryEncoding.EncodeToString(msg.GetBitmask())).Inc()
}
// SignMessage implements blossomsub.RawTracer.
func (b *blossomSubRawTracer) DeliverMessage(msg *blossomsub.Message) {
b.deliverMessageTotal.WithLabelValues(binaryEncoding.EncodeToString(msg.GetBitmask())).Inc()
}
// RejectMessage implements blossomsub.RawTracer.
func (b *blossomSubRawTracer) RejectMessage(msg *blossomsub.Message, reason string) {
b.rejectMessageTotal.WithLabelValues(binaryEncoding.EncodeToString(msg.GetBitmask()), reason).Inc()
}
// DuplicateMessage implements blossomsub.RawTracer.
func (b *blossomSubRawTracer) DuplicateMessage(msg *blossomsub.Message) {
b.duplicateMessageTotal.WithLabelValues(binaryEncoding.EncodeToString(msg.GetBitmask())).Inc()
}
// ThrottlePeer implements blossomsub.RawTracer.
func (b *blossomSubRawTracer) ThrottlePeer(p peer.ID) {
b.throttlePeerTotal.Inc()
}
// RecvRPC implements blossomsub.RawTracer.
func (b *blossomSubRawTracer) RecvRPC(rpc *blossomsub.RPC) {
b.recvRPCTotal.Inc()
}
// SendRPC implements blossomsub.RawTracer.
func (b *blossomSubRawTracer) SendRPC(rpc *blossomsub.RPC, p peer.ID) {
b.sendRPCTotal.Inc()
}
// DropRPC implements blossomsub.RawTracer.
func (b *blossomSubRawTracer) DropRPC(rpc *blossomsub.RPC, p peer.ID) {
b.dropRPCTotal.Inc()
}
// UndeliverableMessage implements blossomsub.RawTracer.
func (b *blossomSubRawTracer) UndeliverableMessage(msg *blossomsub.Message) {
b.undeliverableMessageTotal.WithLabelValues(binaryEncoding.EncodeToString(msg.GetBitmask())).Inc()
}
var _ prometheus.Collector = (*blossomSubRawTracer)(nil)
// Describe implements prometheus.Collector.
func (b *blossomSubRawTracer) Describe(ch chan<- *prometheus.Desc) {
b.addPeerTotal.Describe(ch)
b.removePeerTotal.Describe(ch)
b.joinTotal.Describe(ch)
b.leaveTotal.Describe(ch)
b.graftTotal.Describe(ch)
b.pruneTotal.Describe(ch)
b.validateMessageTotal.Describe(ch)
b.deliverMessageTotal.Describe(ch)
b.rejectMessageTotal.Describe(ch)
b.duplicateMessageTotal.Describe(ch)
b.throttlePeerTotal.Describe(ch)
b.recvRPCTotal.Describe(ch)
b.sendRPCTotal.Describe(ch)
b.dropRPCTotal.Describe(ch)
b.undeliverableMessageTotal.Describe(ch)
}
// Collect implements prometheus.Collector.
func (b *blossomSubRawTracer) Collect(ch chan<- prometheus.Metric) {
b.addPeerTotal.Collect(ch)
b.removePeerTotal.Collect(ch)
b.joinTotal.Collect(ch)
b.leaveTotal.Collect(ch)
b.graftTotal.Collect(ch)
b.pruneTotal.Collect(ch)
b.validateMessageTotal.Collect(ch)
b.deliverMessageTotal.Collect(ch)
b.rejectMessageTotal.Collect(ch)
b.duplicateMessageTotal.Collect(ch)
b.throttlePeerTotal.Collect(ch)
b.recvRPCTotal.Collect(ch)
b.sendRPCTotal.Collect(ch)
b.dropRPCTotal.Collect(ch)
b.undeliverableMessageTotal.Collect(ch)
}
type BlossomSubRawTracer interface {
blossomsub.RawTracer
prometheus.Collector
}
func NewBlossomSubRawTracer() BlossomSubRawTracer {
b := &blossomSubRawTracer{
addPeerTotal: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: blossomSubNamespace,
Name: "add_peer_total",
Help: "Total number of peers added to the mesh.",
},
[]string{"protocol"},
),
removePeerTotal: prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: blossomSubNamespace,
Name: "remove_peer_total",
Help: "Total number of peers removed from the mesh.",
},
),
joinTotal: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: blossomSubNamespace,
Name: "join_total",
Help: "Total number of joins to the mesh.",
},
[]string{"bitmask"},
),
leaveTotal: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: blossomSubNamespace,
Name: "leave_total",
Help: "Total number of leaves from the mesh.",
},
[]string{"bitmask"},
),
graftTotal: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: blossomSubNamespace,
Name: "graft_total",
Help: "Total number of grafts.",
},
[]string{"bitmask"},
),
pruneTotal: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: blossomSubNamespace,
Name: "prune_total",
Help: "Total number of prunes.",
},
[]string{"bitmask"},
),
validateMessageTotal: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: blossomSubNamespace,
Name: "validate_message_total",
Help: "Total number of messages validated.",
},
[]string{"bitmask"},
),
deliverMessageTotal: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: blossomSubNamespace,
Name: "deliver_message_total",
Help: "Total number of messages delivered.",
},
[]string{"bitmask"},
),
rejectMessageTotal: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: blossomSubNamespace,
Name: "reject_message_total",
Help: "Total number of messages rejected.",
},
[]string{"bitmask", "reason"},
),
duplicateMessageTotal: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: blossomSubNamespace,
Name: "duplicate_message_total",
Help: "Total number of messages duplicated.",
},
[]string{"bitmask"},
),
throttlePeerTotal: prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: blossomSubNamespace,
Name: "throttle_peer_total",
Help: "Total number of peers throttled.",
},
),
recvRPCTotal: prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: blossomSubNamespace,
Name: "recv_rpc_total",
Help: "Total number of RPCs received.",
},
),
sendRPCTotal: prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: blossomSubNamespace,
Name: "send_rpc_total",
Help: "Total number of RPCs sent.",
},
),
dropRPCTotal: prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: blossomSubNamespace,
Name: "drop_rpc_total",
Help: "Total number of RPCs dropped.",
},
),
undeliverableMessageTotal: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: blossomSubNamespace,
Name: "undeliverable_message_total",
Help: "Total number of messages undeliverable.",
},
[]string{"bitmask"},
),
}
return b
}
var globalBlossomSubRawTracer = NewBlossomSubRawTracer()
func init() {
prometheus.MustRegister(globalBlossomSubRawTracer)
}
func WithPrometheusRawTracer() blossomsub.Option {
return blossomsub.WithRawTracer(globalBlossomSubRawTracer)
}

View File

@ -45,6 +45,7 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pbnjay/memory"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus/promhttp"
"source.quilibrium.com/quilibrium/monorepo/node/app"
"source.quilibrium.com/quilibrium/monorepo/node/config"
qcrypto "source.quilibrium.com/quilibrium/monorepo/node/crypto"
@ -93,6 +94,11 @@ var (
"",
"enable pprof server on specified address (e.g. localhost:6060)",
)
prometheusServer = flag.String(
"prometheus-server",
"",
"enable prometheus server on specified address (e.g. localhost:8080)",
)
nodeInfo = flag.Bool(
"node-info",
false,
@ -263,6 +269,14 @@ func main() {
}()
}
if *prometheusServer != "" && *core == 0 {
go func() {
mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.Handler())
log.Fatal(http.ListenAndServe(*prometheusServer, mux))
}()
}
if *balance {
config, err := config.LoadConfig(*configDirectory, "", false)
if err != nil {
@ -584,14 +598,14 @@ func RunForkRepairIfNeeded(
if bytes.Equal(badFrameSelector, compareSel.FillBytes(make([]byte, 32))) {
logger.Info("performing fork repair")
txn, _ := coinStore.NewTransaction()
txn, _ := coinStore.NewTransaction(false)
_, outs, _ := application.GetOutputsFromClockFrame(frame)
logger.Info("removing invalid frame at position 48995")
for i, output := range outs.Outputs {
switch o := output.Output.(type) {
case *protobufs.TokenOutput_Coin:
address, _ := token.GetAddressOfCoin(o.Coin, frame.FrameNumber, uint64(i))
coin, err := coinStore.GetCoinByAddress(txn, address)
coin, err := coinStore.GetCoinByAddress(nil, address)
if err != nil {
fmt.Println(err)
return
@ -642,7 +656,7 @@ func RunForkRepairIfNeeded(
return
}
txn, _ := clockStore.NewTransaction()
txn, _ := clockStore.NewTransaction(false)
if err := overrideHead(
txn,
clockStore,
@ -1261,5 +1275,6 @@ func printNodeInfo(cfg *config.Config) {
fmt.Println("Seniority: " + new(big.Int).SetBytes(
nodeInfo.PeerSeniority,
).String())
fmt.Println("Active Workers:", nodeInfo.Workers)
printBalance(cfg)
}

View File

@ -12,6 +12,7 @@ import (
"math/bits"
"net"
"net/http"
"runtime"
"strconv"
"strings"
"sync"
@ -32,7 +33,6 @@ import (
"github.com/libp2p/go-libp2p/p2p/net/connmgr"
"github.com/libp2p/go-libp2p/p2p/net/gostream"
"github.com/libp2p/go-libp2p/p2p/net/swarm"
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
"github.com/mr-tron/base58"
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
@ -45,16 +45,25 @@ import (
blossomsub "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
"source.quilibrium.com/quilibrium/monorepo/node/config"
qgrpc "source.quilibrium.com/quilibrium/monorepo/node/internal/grpc"
"source.quilibrium.com/quilibrium/monorepo/node/internal/observability"
"source.quilibrium.com/quilibrium/monorepo/node/p2p/internal"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
// The default watermarks are the defaults used by libp2p.DefaultConnectionManager.
// We explicitly set them here in order to force internal consistency between the
// connection manager and the resource manager.
const (
minPeersPerBitmask = 4
minBootstrapPeers = 3
discoveryParallelism = 10
discoveryPeerLimit = 1000
bootstrapParallelism = 10
defaultLowWatermarkConnections = 160
defaultHighWatermarkConnections = 192
defaultMinBootstrapPeers = 3
defaultBootstrapParallelism = 10
defaultDiscoveryParallelism = 50
defaultDiscoveryPeerLookupLimit = 1000
defaultPingTimeout = 5 * time.Second
defaultPingPeriod = 30 * time.Second
defaultPingAttempts = 3
)
type BlossomSub struct {
@ -175,6 +184,7 @@ func NewBlossomSub(
logger *zap.Logger,
) *BlossomSub {
ctx := context.Background()
p2pConfig = withDefaults(p2pConfig)
opts := []libp2pconfig.Option{
libp2p.ListenAddrStrings(p2pConfig.ListenMultiaddr),
@ -258,11 +268,11 @@ func NewBlossomSub(
}
allowedPeers = append(allowedPeers, directPeers...)
if p2pConfig.LowWatermarkConnections != 0 &&
p2pConfig.HighWatermarkConnections != 0 {
if p2pConfig.LowWatermarkConnections != -1 &&
p2pConfig.HighWatermarkConnections != -1 {
cm, err := connmgr.NewConnManager(
int(p2pConfig.LowWatermarkConnections),
int(p2pConfig.HighWatermarkConnections),
p2pConfig.LowWatermarkConnections,
p2pConfig.HighWatermarkConnections,
connmgr.WithEmergencyTrim(true),
)
if err != nil {
@ -319,17 +329,15 @@ func NewBlossomSub(
util.Advertise(ctx, routingDiscovery, getNetworkNamespace(p2pConfig.Network))
verifyReachability(p2pConfig)
minBootstraps := minBootstrapPeers
if p2pConfig.Network != 0 {
minBootstraps = 1
}
minBootstrapPeers := min(len(bootstrappers), p2pConfig.MinBootstrapPeers)
bootstrap := internal.NewPeerConnector(
ctx,
logger.Named("bootstrap"),
h,
idService,
minBootstraps,
bootstrapParallelism,
minBootstrapPeers,
p2pConfig.BootstrapParallelism,
internal.NewStaticPeerSource(bootstrappers, true),
)
if err := bootstrap.Connect(ctx); err != nil {
@ -339,7 +347,7 @@ func NewBlossomSub(
ctx,
internal.NewNotEnoughPeersCondition(
h,
minBootstraps,
minBootstrapPeers,
internal.PeerAddrInfosToPeerIDMap(bootstrappers),
),
bootstrap,
@ -350,12 +358,12 @@ func NewBlossomSub(
logger.Named("discovery"),
h,
idService,
minPeersPerBitmask,
discoveryParallelism,
p2pConfig.D,
p2pConfig.DiscoveryParallelism,
internal.NewRoutingDiscoveryPeerSource(
routingDiscovery,
getNetworkNamespace(p2pConfig.Network),
discoveryPeerLimit,
p2pConfig.DiscoveryPeerLookupLimit,
),
)
if err := discovery.Connect(ctx); err != nil {
@ -364,7 +372,14 @@ func NewBlossomSub(
discovery = internal.NewChainedPeerConnector(ctx, bootstrap, discovery)
bs.discovery = discovery
go monitorPeers(ctx, logger, h)
internal.MonitorPeers(
ctx,
logger.Named("peer-monitor"),
h,
p2pConfig.PingTimeout,
p2pConfig.PingPeriod,
p2pConfig.PingAttempts,
)
// TODO: turn into an option flag for console logging, this is too noisy for
// default logging behavior
@ -416,9 +431,25 @@ func NewBlossomSub(
GraylistThreshold: -10000,
AcceptPXThreshold: 1,
OpportunisticGraftThreshold: 2,
}))
},
))
blossomOpts = append(blossomOpts,
blossomsub.WithValidateQueueSize(p2pConfig.ValidateQueueSize),
blossomsub.WithValidateWorkers(p2pConfig.ValidateWorkers),
)
blossomOpts = append(blossomOpts, observability.WithPrometheusRawTracer())
blossomOpts = append(blossomOpts, blossomsub.WithPeerFilter(internal.NewStaticPeerFilter(
// We filter out the bootstrap peers explicitly from BlossomSub
// as they do not subscribe to relevant topics anymore.
// However, the beacon is one of the bootstrap peers usually
// and as such it gets special treatment - it is the only bootstrap
// peer which is engaged in the network.
[]peer.ID{internal.BeaconPeerID(uint(p2pConfig.Network))},
internal.PeerAddrInfosToPeerIDSlice(bootstrappers),
true,
)))
params := mergeDefaults(p2pConfig)
params := toBlossomSubParams(p2pConfig)
rt := blossomsub.NewBlossomSubRouter(h, params, bs.network)
blossomOpts = append(blossomOpts, rt.WithDefaultTagTracer())
pubsub, err := blossomsub.NewBlossomSubWithRouter(ctx, h, rt, blossomOpts...)
@ -432,22 +463,11 @@ func NewBlossomSub(
bs.h = h
bs.signKey = privKey
allowedPeerIDs := make(map[peer.ID]struct{}, len(allowedPeers))
for _, peerInfo := range allowedPeers {
allowedPeerIDs[peerInfo.ID] = struct{}{}
}
go func() {
for {
time.Sleep(30 * time.Second)
for _, b := range bs.bitmaskMap {
bitmaskPeers := b.ListPeers()
peerCount := len(bitmaskPeers)
for _, p := range bitmaskPeers {
if _, ok := allowedPeerIDs[p]; ok {
peerCount--
}
}
if peerCount < minPeersPerBitmask {
for _, mask := range pubsub.GetBitmasks() {
if !rt.EnoughPeers([]byte(mask), 0) {
_ = discovery.Connect(ctx)
break
}
@ -460,7 +480,7 @@ func NewBlossomSub(
// adjusted from Lotus' reference implementation, addressing
// https://github.com/libp2p/go-libp2p/issues/1640
func resourceManager(highWatermark uint, allowed []peer.AddrInfo) (
func resourceManager(highWatermark int, allowed []peer.AddrInfo) (
network.ResourceManager,
error,
) {
@ -625,7 +645,7 @@ func (b *BlossomSub) Unsubscribe(bitmask []byte, raw bool) {
}
func (b *BlossomSub) RegisterValidator(
bitmask []byte, validator func(peerID peer.ID, message *pb.Message) ValidationResult,
bitmask []byte, validator func(peerID peer.ID, message *pb.Message) ValidationResult, sync bool,
) error {
validatorEx := func(
ctx context.Context, peerID peer.ID, message *blossomsub.Message,
@ -642,7 +662,7 @@ func (b *BlossomSub) RegisterValidator(
}
}
var _ blossomsub.ValidatorEx = validatorEx
return b.ps.RegisterBitmaskValidator(bitmask, validatorEx)
return b.ps.RegisterBitmaskValidator(bitmask, validatorEx, blossomsub.WithValidatorInline(sync))
}
func (b *BlossomSub) UnregisterValidator(bitmask []byte) error {
@ -671,70 +691,6 @@ func (b *BlossomSub) GetRandomPeer(bitmask []byte) ([]byte, error) {
return []byte(peers[sel.Int64()]), nil
}
// monitorPeers periodically looks up the peers connected to the host and pings them
// up to 3 times to ensure they are still reachable. If the peer is not reachable after
// 3 attempts, the connections to the peer are closed.
func monitorPeers(ctx context.Context, logger *zap.Logger, h host.Host) {
const timeout, period, attempts = 20 * time.Second, time.Minute, 3
// Do not allow the pings to dial new connections. Adding new peers is a separate
// process and should not be done during the ping process.
ctx = network.WithNoDial(ctx, "monitor peers")
pingOnce := func(ctx context.Context, logger *zap.Logger, id peer.ID) bool {
pingCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
select {
case <-ctx.Done():
case <-pingCtx.Done():
logger.Debug("ping timeout")
return false
case res := <-ping.Ping(pingCtx, h, id):
if res.Error != nil {
logger.Debug("ping error", zap.Error(res.Error))
return false
}
logger.Debug("ping success", zap.Duration("rtt", res.RTT))
}
return true
}
ping := func(ctx context.Context, logger *zap.Logger, wg *sync.WaitGroup, id peer.ID) {
defer wg.Done()
var conns []network.Conn
for i := 0; i < attempts; i++ {
// There are no fine grained semantics in libp2p that would allow us to 'ping via
// a specific connection'. We can only ping a peer, which will attempt to open a stream via a connection.
// As such, we save a snapshot of the connections that were potentially in use before
// the ping, and close them if the ping fails. If new connections occur between the snapshot
// and the ping, they will not be closed, and will be pinged in the next iteration.
conns = h.Network().ConnsToPeer(id)
if pingOnce(ctx, logger, id) {
return
}
}
for _, conn := range conns {
_ = conn.Close()
}
}
for {
select {
case <-ctx.Done():
return
case <-time.After(period):
// This is once again a snapshot of the connected peers at the time of the ping. If new peers
// are added between the snapshot and the ping, they will be pinged in the next iteration.
peers := h.Network().Peers()
logger.Debug("pinging connected peers", zap.Int("peer_count", len(peers)))
wg := &sync.WaitGroup{}
for _, id := range peers {
logger := logger.With(zap.String("peer_id", id.String()))
wg.Add(1)
go ping(ctx, logger, wg, id)
}
wg.Wait()
logger.Debug("pinged connected peers")
}
}
}
func initDHT(
ctx context.Context,
logger *zap.Logger,
@ -906,7 +862,7 @@ func (b *BlossomSub) GetDirectChannel(key []byte, purpose string) (
// Open question: should we prefix this so a node can run both in mainnet and
// testnet? Feels like a bad idea and would be preferable to discourage.
dialCtx, err = grpc.DialContext(
dialCtx, err = qgrpc.DialContext(
b.ctx,
base58.Encode(key),
grpc.WithDialer(
@ -1045,7 +1001,9 @@ func verifyReachability(cfg *config.P2PConfig) bool {
return true
}
func mergeDefaults(p2pConfig *config.P2PConfig) blossomsub.BlossomSubParams {
func withDefaults(p2pConfig *config.P2PConfig) *config.P2PConfig {
cfg := *p2pConfig
p2pConfig = &cfg
if p2pConfig.D == 0 {
p2pConfig.D = blossomsub.BlossomSubD
}
@ -1127,7 +1085,43 @@ func mergeDefaults(p2pConfig *config.P2PConfig) blossomsub.BlossomSubParams {
if p2pConfig.IWantFollowupTime == 0 {
p2pConfig.IWantFollowupTime = blossomsub.BlossomSubIWantFollowupTime
}
if p2pConfig.LowWatermarkConnections == 0 {
p2pConfig.LowWatermarkConnections = defaultLowWatermarkConnections
}
if p2pConfig.HighWatermarkConnections == 0 {
p2pConfig.HighWatermarkConnections = defaultHighWatermarkConnections
}
if p2pConfig.MinBootstrapPeers == 0 {
p2pConfig.MinBootstrapPeers = defaultMinBootstrapPeers
}
if p2pConfig.BootstrapParallelism == 0 {
p2pConfig.BootstrapParallelism = defaultBootstrapParallelism
}
if p2pConfig.DiscoveryParallelism == 0 {
p2pConfig.DiscoveryParallelism = defaultDiscoveryParallelism
}
if p2pConfig.DiscoveryPeerLookupLimit == 0 {
p2pConfig.DiscoveryPeerLookupLimit = defaultDiscoveryPeerLookupLimit
}
if p2pConfig.PingTimeout == 0 {
p2pConfig.PingTimeout = defaultPingTimeout
}
if p2pConfig.PingPeriod == 0 {
p2pConfig.PingPeriod = defaultPingPeriod
}
if p2pConfig.PingAttempts == 0 {
p2pConfig.PingAttempts = defaultPingAttempts
}
if p2pConfig.ValidateQueueSize == 0 {
p2pConfig.ValidateQueueSize = blossomsub.DefaultValidateQueueSize
}
if p2pConfig.ValidateWorkers == 0 {
p2pConfig.ValidateWorkers = runtime.NumCPU()
}
return p2pConfig
}
func toBlossomSubParams(p2pConfig *config.P2PConfig) blossomsub.BlossomSubParams {
return blossomsub.BlossomSubParams{
D: p2pConfig.D,
Dlo: p2pConfig.DLo,

View File

@ -0,0 +1,24 @@
package internal
import (
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
"source.quilibrium.com/quilibrium/monorepo/node/config"
)
// BeaconPeerID returns the peer ID of the beacon node.
func BeaconPeerID(network uint) peer.ID {
genesis, err := config.DownloadAndVerifyGenesis(network)
if err != nil {
panic(err)
}
pub, err := crypto.UnmarshalEd448PublicKey(genesis.Beacon)
if err != nil {
panic(err)
}
peerID, err := peer.IDFromPublicKey(pub)
if err != nil {
panic(err)
}
return peerID
}

View File

@ -0,0 +1,31 @@
package internal
import (
"github.com/libp2p/go-libp2p/core/peer"
blossomsub "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub"
)
// NewStaticPeerFilter creates a new static peer filter.
// The allowList is a list of peers that are allowed to mesh.
// The blockList is a list of peers that are blocked from meshing.
// The def is the default value for peers that are not in the allowList or blockList.
// The allowList has priority over the blockList.
func NewStaticPeerFilter(allowList, blockList []peer.ID, def bool) blossomsub.PeerFilter {
allowed := make(map[peer.ID]struct{})
for _, p := range allowList {
allowed[p] = struct{}{}
}
blocked := make(map[peer.ID]struct{})
for _, p := range blockList {
blocked[p] = struct{}{}
}
return func(peerID peer.ID, _ []byte) bool {
if _, ok := allowed[peerID]; ok {
return true
}
if _, ok := blocked[peerID]; ok {
return false
}
return def
}
}

View File

@ -0,0 +1,97 @@
package internal
import (
"context"
"sync"
"time"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
"go.uber.org/zap"
)
type peerMonitor struct {
h host.Host
timeout time.Duration
period time.Duration
attempts int
}
func (pm *peerMonitor) pingOnce(ctx context.Context, logger *zap.Logger, id peer.ID) bool {
pingCtx, cancel := context.WithTimeout(ctx, pm.timeout)
defer cancel()
select {
case <-ctx.Done():
case <-pingCtx.Done():
logger.Debug("ping timeout")
return false
case res := <-ping.Ping(pingCtx, pm.h, id):
if res.Error != nil {
logger.Debug("ping error", zap.Error(res.Error))
return false
}
logger.Debug("ping success", zap.Duration("rtt", res.RTT))
}
return true
}
func (pm *peerMonitor) ping(ctx context.Context, logger *zap.Logger, wg *sync.WaitGroup, id peer.ID) {
defer wg.Done()
var conns []network.Conn
for i := 0; i < pm.attempts; i++ {
// There are no fine grained semantics in libp2p that would allow us to 'ping via
// a specific connection'. We can only ping a peer, which will attempt to open a stream via a connection.
// As such, we save a snapshot of the connections that were potentially in use before
// the ping, and close them if the ping fails. If new connections occur between the snapshot
// and the ping, they will not be closed, and will be pinged in the next iteration.
conns = pm.h.Network().ConnsToPeer(id)
if pm.pingOnce(ctx, logger, id) {
return
}
}
for _, conn := range conns {
_ = conn.Close()
}
}
func (pm *peerMonitor) run(ctx context.Context, logger *zap.Logger) {
// Do not allow the pings to dial new connections. Adding new peers is a separate
// process and should not be done during the ping process.
ctx = network.WithNoDial(ctx, "monitor peers")
for {
select {
case <-ctx.Done():
return
case <-time.After(pm.period):
// This is once again a snapshot of the connected peers at the time of the ping. If new peers
// are added between the snapshot and the ping, they will be pinged in the next iteration.
peers := pm.h.Network().Peers()
logger.Debug("pinging connected peers", zap.Int("peer_count", len(peers)))
wg := &sync.WaitGroup{}
for _, id := range peers {
logger := logger.With(zap.String("peer_id", id.String()))
wg.Add(1)
go pm.ping(ctx, logger, wg, id)
}
wg.Wait()
logger.Debug("pinged connected peers")
}
}
}
// MonitorPeers periodically looks up the peers connected to the host and pings them
// repeatedly to ensure they are still reachable. If the peer is not reachable after
// the attempts, the connections to the peer are closed.
func MonitorPeers(
ctx context.Context, logger *zap.Logger, h host.Host, timeout, period time.Duration, attempts int,
) {
pm := &peerMonitor{
h: h,
timeout: timeout,
period: period,
attempts: attempts,
}
go pm.run(ctx, logger)
}

View File

@ -26,6 +26,7 @@ type PubSub interface {
RegisterValidator(
bitmask []byte,
validator func(peerID peer.ID, message *pb.Message) ValidationResult,
sync bool,
) error
UnregisterValidator(bitmask []byte) error
GetPeerID() []byte

View File

@ -944,9 +944,12 @@ type ChallengeProofRequest struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
PeerId []byte `protobuf:"bytes,1,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
Core uint32 `protobuf:"varint,2,opt,name=core,proto3" json:"core,omitempty"`
ClockFrame *ClockFrame `protobuf:"bytes,3,opt,name=clock_frame,json=clockFrame,proto3" json:"clock_frame,omitempty"`
PeerId []byte `protobuf:"bytes,1,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
Core uint32 `protobuf:"varint,2,opt,name=core,proto3" json:"core,omitempty"`
ClockFrame *ClockFrame `protobuf:"bytes,3,opt,name=clock_frame,json=clockFrame,proto3" json:"clock_frame,omitempty"`
Output []byte `protobuf:"bytes,4,opt,name=output,proto3" json:"output,omitempty"`
FrameNumber uint64 `protobuf:"varint,5,opt,name=frame_number,json=frameNumber,proto3" json:"frame_number,omitempty"`
Difficulty uint32 `protobuf:"varint,6,opt,name=difficulty,proto3" json:"difficulty,omitempty"`
}
func (x *ChallengeProofRequest) Reset() {
@ -1002,6 +1005,27 @@ func (x *ChallengeProofRequest) GetClockFrame() *ClockFrame {
return nil
}
func (x *ChallengeProofRequest) GetOutput() []byte {
if x != nil {
return x.Output
}
return nil
}
func (x *ChallengeProofRequest) GetFrameNumber() uint64 {
if x != nil {
return x.FrameNumber
}
return 0
}
func (x *ChallengeProofRequest) GetDifficulty() uint32 {
if x != nil {
return x.Difficulty
}
return 0
}
type ChallengeProofResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@ -1193,7 +1217,7 @@ var file_data_proto_rawDesc = []byte{
0x6b, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52,
0x0b, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06,
0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x61,
0x6e, 0x64, 0x6f, 0x6d, 0x22, 0x8b, 0x01, 0x0a, 0x15, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e,
0x6e, 0x64, 0x6f, 0x6d, 0x22, 0xe6, 0x01, 0x0a, 0x15, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e,
0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17,
0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x65, 0x18,
@ -1202,71 +1226,77 @@ var file_data_proto_rawDesc = []byte{
0x32, 0x24, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f,
0x64, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6c, 0x6f, 0x63,
0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x0a, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x72, 0x61,
0x6d, 0x65, 0x22, 0x30, 0x0a, 0x16, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x50,
0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06,
0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75,
0x74, 0x70, 0x75, 0x74, 0x32, 0xff, 0x05, 0x0a, 0x0b, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x12, 0x76, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x72,
0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12,
0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64,
0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6c, 0x6f, 0x63, 0x6b,
0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e,
0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e,
0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70,
0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x30, 0x01, 0x12, 0x9a, 0x01, 0x0a,
0x1d, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65,
0x73, 0x73, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x39,
0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65,
0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d,
0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x3a, 0x2e, 0x71, 0x75, 0x69, 0x6c,
0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72,
0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04,
0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1e, 0x0a,
0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28,
0x0d, 0x52, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x22, 0x30, 0x0a,
0x16, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75,
0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x32,
0xff, 0x05, 0x0a, 0x0b, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12,
0x76, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64,
0x53, 0x79, 0x6e, 0x63, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x71, 0x75, 0x69,
0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
0x63, 0x6b, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65,
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e,
0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65,
0x64, 0x53, 0x79, 0x6e, 0x63, 0x30, 0x01, 0x12, 0x9a, 0x01, 0x0a, 0x1d, 0x4e, 0x65, 0x67, 0x6f,
0x74, 0x69, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53,
0x79, 0x6e, 0x63, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x39, 0x2e, 0x71, 0x75, 0x69, 0x6c,
0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61,
0x2e, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73,
0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x28, 0x01, 0x30, 0x01, 0x12, 0x76, 0x0a, 0x10, 0x47, 0x65, 0x74,
0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x2e, 0x2e,
0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e,
0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x32, 0x50, 0x43, 0x68,
0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x1a, 0x2e, 0x2e,
0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e,
0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x32, 0x50, 0x43, 0x68,
0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x28, 0x01, 0x30,
0x01, 0x12, 0x68, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x61, 0x6d,
0x65, 0x12, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e,
0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x44,
0x61, 0x74, 0x61, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x2a, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64,
0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72,
0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x73, 0x0a, 0x15, 0x48,
0x61, 0x6e, 0x64, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x4d, 0x69, 0x64, 0x6e, 0x69, 0x67, 0x68, 0x74,
0x4d, 0x69, 0x6e, 0x74, 0x12, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75,
0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4d,
0x69, 0x6e, 0x74, 0x43, 0x6f, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30,
0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65,
0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x65, 0x4d, 0x69, 0x64, 0x6e,
0x69, 0x67, 0x68, 0x74, 0x4d, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x12, 0x83, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x50, 0x72, 0x65, 0x4d, 0x69, 0x64, 0x6e, 0x69,
0x67, 0x68, 0x74, 0x4d, 0x69, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x35, 0x2e,
0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e,
0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x65, 0x4d, 0x69, 0x64, 0x6e, 0x69,
0x67, 0x68, 0x74, 0x4d, 0x69, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75,
0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x50,
0x72, 0x65, 0x4d, 0x69, 0x64, 0x6e, 0x69, 0x67, 0x68, 0x74, 0x4d, 0x69, 0x6e, 0x74, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x8c, 0x01, 0x0a, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x49,
0x50, 0x43, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7a, 0x0a, 0x17, 0x43, 0x61, 0x6c,
0x63, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x50,
0x72, 0x6f, 0x6f, 0x66, 0x12, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75,
0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x43,
0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75,
0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x43,
0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3a, 0x5a, 0x38, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e,
0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x71,
0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f, 0x6e, 0x6f, 0x72, 0x65,
0x70, 0x6f, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x1a, 0x3a, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75,
0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x44,
0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x79, 0x6e,
0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
0x28, 0x01, 0x30, 0x01, 0x12, 0x76, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69,
0x63, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e,
0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x32, 0x50, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x1a, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e,
0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x32, 0x50, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x28, 0x01, 0x30, 0x01, 0x12, 0x68, 0x0a, 0x0c,
0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x2e, 0x71,
0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64,
0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72,
0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x71, 0x75, 0x69,
0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74,
0x61, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x73, 0x0a, 0x15, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65,
0x50, 0x72, 0x65, 0x4d, 0x69, 0x64, 0x6e, 0x69, 0x67, 0x68, 0x74, 0x4d, 0x69, 0x6e, 0x74, 0x12,
0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64,
0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x69, 0x6e, 0x74, 0x43, 0x6f,
0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c,
0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61,
0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x65, 0x4d, 0x69, 0x64, 0x6e, 0x69, 0x67, 0x68, 0x74, 0x4d,
0x69, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x83, 0x01, 0x0a, 0x18,
0x47, 0x65, 0x74, 0x50, 0x72, 0x65, 0x4d, 0x69, 0x64, 0x6e, 0x69, 0x67, 0x68, 0x74, 0x4d, 0x69,
0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x35, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e,
0x70, 0x62, 0x2e, 0x50, 0x72, 0x65, 0x4d, 0x69, 0x64, 0x6e, 0x69, 0x67, 0x68, 0x74, 0x4d, 0x69,
0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64,
0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x65, 0x4d, 0x69, 0x64,
0x6e, 0x69, 0x67, 0x68, 0x74, 0x4d, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x32, 0x8c, 0x01, 0x0a, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x49, 0x50, 0x43, 0x53, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x12, 0x7a, 0x0a, 0x17, 0x43, 0x61, 0x6c, 0x63, 0x75, 0x6c, 0x61, 0x74,
0x65, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12,
0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64,
0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65,
0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64,
0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65,
0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x42, 0x3a, 0x5a, 0x38, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62,
0x72, 0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f, 0x6e, 0x6f, 0x72, 0x65, 0x70, 0x6f, 0x2f, 0x6e, 0x6f,
0x64, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x73, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (

View File

@ -113,6 +113,9 @@ message ChallengeProofRequest {
bytes peer_id = 1;
uint32 core = 2;
quilibrium.node.clock.pb.ClockFrame clock_frame = 3;
bytes output = 4;
uint64 frame_number = 5;
uint32 difficulty = 6;
}
message ChallengeProofResponse {

View File

@ -29,6 +29,7 @@ func (t *MintCoinRequest) RingAndParallelism(
if err := t.Signature.Verify(payload); err != nil {
return -1, 0, errors.New("invalid")
}
pk, err := pcrypto.UnmarshalEd448PublicKey(
t.Signature.PublicKey.KeyValue,
)

File diff suppressed because it is too large Load Diff

View File

@ -63,6 +63,7 @@ message NodeInfoResponse {
bytes version = 4;
bytes peer_seniority = 5;
int32 prover_ring = 6;
uint32 workers = 7;
}
message PutPeerInfoRequest {
@ -245,6 +246,7 @@ message TokenRequest {
AnnounceProverPause pause = 8;
AnnounceProverResume resume = 9;
}
int64 timestamp = 10;
}
message TokenRequests {
@ -635,12 +637,14 @@ message SendMessageResponse{}
message GetTokensByAccountRequest {
bytes address = 1;
bool include_metadata = 2;
}
message TokensByAccountResponse {
repeated Coin coins = 1;
repeated uint64 frame_numbers = 2;
repeated bytes addresses = 3;
repeated int64 timestamps = 4;
}
message GetPreCoinProofsByAccountRequest {

View File

@ -9,18 +9,18 @@ import (
"syscall"
"time"
"golang.org/x/crypto/sha3"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
pcrypto "github.com/libp2p/go-libp2p/core/crypto"
"github.com/multiformats/go-multiaddr"
mn "github.com/multiformats/go-multiaddr/net"
"github.com/pkg/errors"
"go.uber.org/zap"
"golang.org/x/crypto/sha3"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
qgrpc "source.quilibrium.com/quilibrium/monorepo/node/internal/grpc"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
@ -41,16 +41,42 @@ func (r *DataWorkerIPCServer) CalculateChallengeProof(
) (*protobufs.ChallengeProofResponse, error) {
challenge := []byte{}
challenge = append(challenge, req.PeerId...)
challenge = binary.BigEndian.AppendUint64(
challenge,
req.ClockFrame.FrameNumber,
)
challenge = binary.BigEndian.AppendUint32(challenge, req.Core)
challenge = append(challenge, req.ClockFrame.Output...)
difficulty := req.Difficulty
frameNumber := req.FrameNumber
if req.ClockFrame != nil {
challenge = binary.BigEndian.AppendUint64(
challenge,
req.ClockFrame.FrameNumber,
)
challenge = binary.BigEndian.AppendUint32(challenge, req.Core)
challenge = append(challenge, req.ClockFrame.Output...)
difficulty = req.ClockFrame.Difficulty
frameNumber = req.ClockFrame.FrameNumber
} else if req.Output != nil {
challenge = binary.BigEndian.AppendUint64(
challenge,
frameNumber,
)
challenge = binary.BigEndian.AppendUint32(challenge, req.Core)
challenge = append(challenge, req.Output...)
} else {
return nil, errors.Wrap(
errors.New("invalid request"),
"calculate challenge proof",
)
}
if difficulty == 0 || frameNumber == 0 {
return nil, errors.Wrap(
errors.New("invalid request"),
"calculate challenge proof",
)
}
proof, err := r.prover.CalculateChallengeProof(
challenge,
req.ClockFrame.Difficulty,
difficulty,
)
if err != nil {
return nil, errors.Wrap(err, "calculate challenge proof")
@ -109,7 +135,7 @@ func NewDataWorkerIPCServer(
}
func (r *DataWorkerIPCServer) Start() error {
s := grpc.NewServer(
s := qgrpc.NewServer(
grpc.MaxRecvMsgSize(600*1024*1024),
grpc.MaxSendMsgSize(600*1024*1024),
)

View File

@ -6,14 +6,11 @@ import (
"math/big"
"net/http"
"strings"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token/application"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/libp2p/go-libp2p/core/peer"
"time"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/multiformats/go-multiaddr"
mn "github.com/multiformats/go-multiaddr/net"
"github.com/pkg/errors"
@ -23,8 +20,11 @@ import (
"google.golang.org/grpc/reflection"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/master"
"source.quilibrium.com/quilibrium/monorepo/node/execution"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token/application"
qgrpc "source.quilibrium.com/quilibrium/monorepo/node/internal/grpc"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
@ -184,6 +184,7 @@ func (r *RPCServer) GetNodeInfo(
),
PeerSeniority: seniority.FillBytes(make([]byte, 32)),
ProverRing: int32(ring),
Workers: r.executionEngines[0].GetWorkerCount(),
}, nil
}
@ -218,6 +219,8 @@ func (r *RPCServer) SendMessage(
ctx context.Context,
req *protobufs.TokenRequest,
) (*protobufs.SendMessageResponse, error) {
req.Timestamp = time.Now().UnixMilli()
any := &anypb.Any{}
if err := any.MarshalFrom(req); err != nil {
return nil, errors.Wrap(err, "publish message")
@ -269,10 +272,30 @@ func (r *RPCServer) GetTokensByAccount(
return nil, err
}
timestamps := []int64{}
if req.IncludeMetadata {
tcache := map[uint64]int64{}
intrinsicFilter := p2p.GetBloomFilter(application.TOKEN_ADDRESS, 256, 3)
for _, frame := range frameNumbers {
if t, ok := tcache[frame]; ok {
timestamps = append(timestamps, t)
continue
}
f, _, err := r.clockStore.GetDataClockFrame(intrinsicFilter, frame, true)
if err != nil {
return nil, err
}
timestamps = append(timestamps, f.Timestamp)
}
}
return &protobufs.TokensByAccountResponse{
Coins: coins,
FrameNumbers: frameNumbers,
Addresses: addresses,
Timestamps: timestamps,
}, nil
}
@ -376,7 +399,7 @@ func NewRPCServer(
}
func (r *RPCServer) Start() error {
s := grpc.NewServer(
s := qgrpc.NewServer(
grpc.MaxRecvMsgSize(600*1024*1024),
grpc.MaxSendMsgSize(600*1024*1024),
)
@ -417,13 +440,13 @@ func (r *RPCServer) Start() error {
go func() {
mux := runtime.NewServeMux()
opts := []grpc.DialOption{
opts := qgrpc.ClientOptions(
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(600*1024*1024),
grpc.MaxCallSendMsgSize(600*1024*1024),
),
}
)
if err := protobufs.RegisterNodeServiceHandlerFromEndpoint(
context.Background(),

View File

@ -18,7 +18,7 @@ import (
)
type ClockStore interface {
NewTransaction() (Transaction, error)
NewTransaction(indexed bool) (Transaction, error)
GetLatestMasterClockFrame(filter []byte) (*protobufs.ClockFrame, error)
GetEarliestMasterClockFrame(filter []byte) (*protobufs.ClockFrame, error)
GetMasterClockFrame(
@ -98,6 +98,11 @@ type ClockStore interface {
frame *protobufs.ClockFrame,
tries []*tries.RollingFrecencyCritbitTrie,
) error
DeleteDataClockFrameRange(
filter []byte,
minFrameNumber uint64,
maxFrameNumber uint64,
) error
}
type PebbleClockStore struct {
@ -448,8 +453,8 @@ func clockDataSeniorityKey(
return key
}
func (p *PebbleClockStore) NewTransaction() (Transaction, error) {
return p.db.NewBatch(), nil
func (p *PebbleClockStore) NewTransaction(indexed bool) (Transaction, error) {
return p.db.NewBatch(indexed), nil
}
// GetEarliestMasterClockFrame implements ClockStore.
@ -714,6 +719,26 @@ func (p *PebbleClockStore) fillAggregateProofs(
return nil
}
func (p *PebbleClockStore) deleteAggregateProofs(
txn Transaction,
frame *protobufs.ClockFrame,
) error {
for i := 0; i < len(frame.Input[516:])/74; i++ {
commit := frame.Input[516+(i*74) : 516+((i+1)*74)]
err := internalDeleteAggregateProof(
p.db,
txn,
frame.AggregateProofs[i],
commit,
)
if err != nil {
return errors.Wrap(err, "delete aggregate proofs")
}
}
return nil
}
func (p *PebbleClockStore) saveAggregateProofs(
txn Transaction,
frame *protobufs.ClockFrame,
@ -721,7 +746,7 @@ func (p *PebbleClockStore) saveAggregateProofs(
shouldClose := false
if txn == nil {
var err error
txn, err = p.NewTransaction()
txn, err = p.NewTransaction(false)
if err != nil {
return err
}
@ -1025,16 +1050,46 @@ func (p *PebbleClockStore) DeleteDataClockFrameRange(
fromFrameNumber uint64,
toFrameNumber uint64,
) error {
err := p.db.DeleteRange(
clockDataFrameKey(
filter,
fromFrameNumber,
),
clockDataFrameKey(
filter,
toFrameNumber,
),
)
txn, err := p.NewTransaction(false)
if err != nil {
return errors.Wrap(err, "delete data clock frame range")
}
for i := fromFrameNumber; i < toFrameNumber; i++ {
frames, err := p.GetStagedDataClockFramesForFrameNumber(filter, i)
if err != nil {
return errors.Wrap(err, "delete data clock frame range")
}
for _, frame := range frames {
err = p.deleteAggregateProofs(txn, frame)
if err != nil {
txn.Abort()
return errors.Wrap(err, "delete data clock frame range")
}
}
err = txn.DeleteRange(
clockDataParentIndexKey(filter, i, bytes.Repeat([]byte{0x00}, 32)),
clockDataParentIndexKey(filter, i, bytes.Repeat([]byte{0xff}, 32)),
)
if err != nil {
txn.Abort()
return errors.Wrap(err, "delete data clock frame range")
}
err = txn.Delete(clockDataFrameKey(filter, i))
if err != nil {
txn.Abort()
return errors.Wrap(err, "delete data clock frame range")
}
}
if err = txn.Commit(); err != nil {
txn.Abort()
return errors.Wrap(err, "delete data clock frame range")
}
return errors.Wrap(err, "delete data clock frame range")
}

View File

@ -14,7 +14,7 @@ import (
)
type CoinStore interface {
NewTransaction() (Transaction, error)
NewTransaction(indexed bool) (Transaction, error)
GetCoinsForOwner(owner []byte) ([]uint64, [][]byte, []*protobufs.Coin, error)
GetPreCoinProofsForOwner(owner []byte) (
[]uint64,
@ -117,8 +117,8 @@ func genesisSeedKey() []byte {
return []byte{COIN, GENESIS}
}
func (p *PebbleCoinStore) NewTransaction() (Transaction, error) {
return p.db.NewBatch(), nil
func (p *PebbleCoinStore) NewTransaction(indexed bool) (Transaction, error) {
return p.db.NewBatch(indexed), nil
}
func (p *PebbleCoinStore) GetCoinsForOwner(
@ -415,7 +415,7 @@ func (p *PebbleCoinStore) SetMigrationVersion(
return errors.Wrap(err, "migrate")
}
txn, err := p.NewTransaction()
txn, err := p.NewTransaction(false)
if err != nil {
return nil
}
@ -493,7 +493,7 @@ func (p *PebbleCoinStore) internalMigrate(
panic(err)
}
txn, err := p.NewTransaction()
txn, err := p.NewTransaction(false)
if err != nil {
return nil
}
@ -537,7 +537,7 @@ func (p *PebbleCoinStore) Migrate(filter []byte, genesisSeedHex string) error {
return errors.Wrap(err, "migrate")
}
txn, err := p.NewTransaction()
txn, err := p.NewTransaction(false)
if err != nil {
return nil
}

View File

@ -122,7 +122,7 @@ func dataTimeProofLatestKey(peerId []byte) []byte {
}
func (p *PebbleDataProofStore) NewTransaction() (Transaction, error) {
return p.db.NewBatch(), nil
return p.db.NewBatch(false), nil
}
func internalGetAggregateProof(
@ -325,6 +325,58 @@ func (p *PebbleDataProofStore) GetAggregateProof(
)
}
func internalDeleteAggregateProof(
db KVDB,
txn Transaction,
aggregateProof *protobufs.InclusionAggregateProof,
commitment []byte,
) error {
buf := binary.BigEndian.AppendUint64(
nil,
uint64(len(aggregateProof.InclusionCommitments)),
)
buf = append(buf, aggregateProof.Proof...)
for i, inc := range aggregateProof.InclusionCommitments {
var segments [][]byte
if inc.TypeUrl == protobufs.IntrinsicExecutionOutputType {
o := &protobufs.IntrinsicExecutionOutput{}
if err := proto.Unmarshal(inc.Data, o); err != nil {
return errors.Wrap(err, "delete aggregate proof")
}
leftBits := append([]byte{}, o.Address...)
leftBits = append(leftBits, o.Output...)
rightBits := o.Proof
segments = [][]byte{leftBits, rightBits}
} else {
segments = [][]byte{inc.Data}
}
for _, segment := range segments {
hash := sha3.Sum256(segment)
if err := txn.Delete(
dataProofSegmentKey(aggregateProof.Filter, hash[:]),
); err != nil {
return errors.Wrap(err, "delete aggregate proof")
}
}
if err := txn.Delete(
dataProofInclusionKey(aggregateProof.Filter, commitment, uint64(i)),
); err != nil {
return errors.Wrap(err, "delete aggregate proof")
}
}
if err := txn.Delete(
dataProofMetadataKey(aggregateProof.Filter, commitment),
); err != nil {
return errors.Wrap(err, "delete aggregate proof")
}
return nil
}
func internalPutAggregateProof(
db KVDB,
txn Transaction,

View File

@ -243,6 +243,7 @@ func (t *InMemKVDBTransaction) Delete(key []byte) error {
if !t.db.open {
return errors.New("inmem db closed")
}
t.changes = append(t.changes, InMemKVDBOperation{
op: DeleteOperation,
key: key,
@ -268,6 +269,32 @@ func (t *InMemKVDBTransaction) NewIter(lowerBound []byte, upperBound []byte) (
}, nil
}
func (t *InMemKVDBTransaction) DeleteRange(
lowerBound []byte,
upperBound []byte,
) error {
if !t.db.open {
return errors.New("inmem db closed")
}
iter, err := t.NewIter(lowerBound, upperBound)
if err != nil {
return err
}
for iter.First(); iter.Valid(); iter.Next() {
t.changes = append(t.changes, InMemKVDBOperation{
op: DeleteOperation,
key: iter.Key(),
})
if err != nil {
return err
}
}
return nil
}
func (t *InMemKVDBTransaction) Abort() error {
return nil
}
@ -336,7 +363,7 @@ func (d *InMemKVDB) Delete(key []byte) error {
return nil
}
func (d *InMemKVDB) NewBatch() Transaction {
func (d *InMemKVDB) NewBatch(indexed bool) Transaction {
if !d.open {
return nil
}

View File

@ -217,7 +217,7 @@ func keyBundleEarliestKey(provingKey []byte) []byte {
}
func (p *PebbleKeyStore) NewTransaction() (Transaction, error) {
return p.db.NewBatch(), nil
return p.db.NewBatch(false), nil
}
// Stages a proving key for later inclusion on proof of meaningful work.

View File

@ -8,7 +8,7 @@ type KVDB interface {
Get(key []byte) ([]byte, io.Closer, error)
Set(key, value []byte) error
Delete(key []byte) error
NewBatch() Transaction
NewBatch(indexed bool) Transaction
NewIter(lowerBound []byte, upperBound []byte) (Iterator, error)
Compact(start, end []byte, parallelize bool) error
CompactAll() error

View File

@ -33,9 +33,15 @@ func (p *PebbleDB) Delete(key []byte) error {
return p.db.Delete(key, &pebble.WriteOptions{Sync: true})
}
func (p *PebbleDB) NewBatch() Transaction {
return &PebbleTransaction{
b: p.db.NewIndexedBatch(),
func (p *PebbleDB) NewBatch(indexed bool) Transaction {
if indexed {
return &PebbleTransaction{
b: p.db.NewIndexedBatch(),
}
} else {
return &PebbleTransaction{
b: p.db.NewBatch(),
}
}
}
@ -94,6 +100,7 @@ type Transaction interface {
Delete(key []byte) error
Abort() error
NewIter(lowerBound []byte, upperBound []byte) (Iterator, error)
DeleteRange(lowerBound []byte, upperBound []byte) error
}
type PebbleTransaction struct {
@ -130,6 +137,17 @@ func (t *PebbleTransaction) NewIter(lowerBound []byte, upperBound []byte) (
})
}
func (t *PebbleTransaction) DeleteRange(
lowerBound []byte,
upperBound []byte,
) error {
return t.b.DeleteRange(
lowerBound,
upperBound,
&pebble.WriteOptions{Sync: true},
)
}
var _ Transaction = (*PebbleTransaction)(nil)
func rightAlign(data []byte, size int) []byte {

View File

@ -180,7 +180,7 @@ func (d *PeerstoreDatastore) Close() (err error) {
func (d *PeerstoreDatastore) Batch(ctx context.Context) (ds.Batch, error) {
return &batch{
b: &transaction{tx: d.db.NewBatch()},
b: &transaction{tx: d.db.NewBatch(false)},
db: d.db,
}, nil
}
@ -189,7 +189,7 @@ func (d *PeerstoreDatastore) NewTransaction(
ctx context.Context,
readOnly bool,
) (ds.Txn, error) {
tx := d.db.NewBatch()
tx := d.db.NewBatch(false)
return &transaction{tx}, nil
}