From f5216924c0251f36a34008d6f6dc9daa2a6d6f8a Mon Sep 17 00:00:00 2001 From: Cassandra Heart Date: Tue, 9 Dec 2025 16:30:43 -0600 Subject: [PATCH] v2.1.0.15 --- config/config.go | 6 +- config/version.go | 2 +- go-libp2p/config/config.go | 44 +- go-libp2p/defaults.go | 25 - go-libp2p/options.go | 18 - .../p2p/net/swarm/black_hole_detector.go | 274 ------ .../p2p/net/swarm/black_hole_detector_test.go | 250 ------ go-libp2p/p2p/net/swarm/dial_worker.go | 8 +- go-libp2p/p2p/net/swarm/swarm.go | 47 -- go-libp2p/p2p/net/swarm/swarm_dial.go | 14 - go-libp2p/p2p/net/swarm/swarm_dial_test.go | 45 - go-libp2p/p2p/net/swarm/swarm_metrics.go | 40 - go-libp2p/p2p/net/swarm/swarm_metrics_test.go | 11 - .../p2p/protocol/autonatv2/autonat_test.go | 5 +- .../p2p/protocol/autonatv2/server_test.go | 3 +- hypergraph/hypergraph.go | 6 + hypergraph/id_set.go | 16 + hypergraph/snapshot_manager.go | 106 ++- hypergraph/sync.go | 789 ++++++++++++++++-- .../global/consensus_leader_provider.go | 16 +- .../global/consensus_liveness_provider.go | 84 +- .../global/global_consensus_engine.go | 146 +++- node/consensus/global/message_processors.go | 25 +- node/consensus/global/message_validation.go | 9 + node/consensus/provers/prover_registry.go | 48 ++ node/consensus/sync/sync_client.go | 40 +- .../intrinsics/global/global_prover_join.go | 28 +- .../intrinsics/global/global_prover_kick.go | 26 +- .../token/token_intrinsic_mint_transaction.go | 32 +- .../token/token_intrinsic_transaction.go | 2 +- node/p2p/blossomsub.go | 469 ++++++++--- node/rpc/hypergraph_sync_rpc_server_test.go | 105 ++- node/rpc/node_rpc_server.go | 1 + node/store/hypergraph.go | 244 +++++- node/store/pebble.go | 15 + node/tests/simnet.go | 18 +- protobufs/node.pb.go | 742 +++++++++------- protobufs/node.pb.gw.go | 139 +++ protobufs/node.proto | 16 + protobufs/node_grpc.pb.go | 90 ++ types/consensus/prover_registry.go | 11 + types/hypergraph/id_set.go | 7 + types/mocks/prover_registry.go | 9 + types/tries/lazy_proof_tree.go | 40 + 44 files changed, 2633 insertions(+), 1438 deletions(-) delete mode 100644 go-libp2p/p2p/net/swarm/black_hole_detector.go delete mode 100644 go-libp2p/p2p/net/swarm/black_hole_detector_test.go diff --git a/config/config.go b/config/config.go index 64f18d4..c0ec4ac 100644 --- a/config/config.go +++ b/config/config.go @@ -85,9 +85,9 @@ func NewConfig(configPath string) (*Config, error) { } var BootstrapPeers = []string{ - "/dns/bootstrap.quilibrium.com/udp/8336/quic-v1/p2p/Qme3g6rJWuz8HVXxpDb7aV2hiFq8bZJNqxMmwzmASzfq1M", - "/dns/quecifer.quilibrium.com/udp/8336/quic-v1/p2p/QmdWF9bGTH5mwJXkxrG859HA5r34MxXtMSTuEikSMDSESv", - "/dns/quagmire.quilibrium.com/udp/8336/quic-v1/p2p/QmaQ9KAaKtqXhYSQ5ARQNnn8B8474cWGvvD6PgJ4gAtMrx", + "/dnsaddr/quinoa.quilibrium.com/udp/8339/p2p/QmP9NNzAzRjCL8gdQBkKHwyBCWJGVb3jPrQzTveYdU24kH", + "/dnsaddr/qualia.quilibrium.com/udp/8339/p2p/QmRP1UPiDg1enHgN6wEL1Y4uUh1XKg7V3QExdBKV9BUUQf", + "/dnsaddr/quetzalcoatl.quilibrium.com/udp/8339/p2p/QmNq4xSqrxTKKtK7J6UFEa4unjsoULP2G4qWwwH5EKmoJj", // "/ip4/204.186.74.46/udp/8316/quic-v1/p2p/QmeqBjm3iX7sdTieyto1gys5ruQrQNPKfaTGcVQQWJPYDV", "/ip4/65.109.17.13/udp/8336/quic-v1/p2p/Qmc35n99eojSvW3PkbfBczJoSX92WmnnKh3Fg114ok3oo4", "/ip4/65.108.194.84/udp/8336/quic-v1/p2p/QmP8C7g9ZRiWzhqN2AgFu5onS6HwHzR6Vv1TCHxAhnCSnq", diff --git a/config/version.go b/config/version.go index 02aaac7..e22811e 100644 --- a/config/version.go +++ b/config/version.go @@ -43,7 +43,7 @@ func FormatVersion(version []byte) string { } func GetPatchNumber() byte { - return 0x0e + return 0x0f } func GetRCNumber() byte { diff --git a/go-libp2p/config/config.go b/go-libp2p/config/config.go index c2accf0..09427c7 100644 --- a/go-libp2p/config/config.go +++ b/go-libp2p/config/config.go @@ -145,11 +145,6 @@ type Config struct { EnableAutoNATv2 bool - UDPBlackHoleSuccessCounter *swarm.BlackHoleSuccessCounter - CustomUDPBlackHoleSuccessCounter bool - IPv6BlackHoleSuccessCounter *swarm.BlackHoleSuccessCounter - CustomIPv6BlackHoleSuccessCounter bool - UserFxOptions []fx.Option ShareTCPListener bool @@ -187,10 +182,7 @@ func (cfg *Config) makeSwarm(eventBus event.Bus, enableMetrics bool) (*swarm.Swa return nil, err } - opts := append(cfg.SwarmOpts, - swarm.WithUDPBlackHoleSuccessCounter(cfg.UDPBlackHoleSuccessCounter), - swarm.WithIPv6BlackHoleSuccessCounter(cfg.IPv6BlackHoleSuccessCounter), - ) + opts := cfg.SwarmOpts if cfg.Reporter != nil { opts = append(opts, swarm.WithMetrics(cfg.Reporter)) } @@ -229,23 +221,18 @@ func (cfg *Config) makeAutoNATV2Host() (host.Host, error) { } autoNatCfg := Config{ - Transports: cfg.Transports, - Muxers: cfg.Muxers, - SecurityTransports: cfg.SecurityTransports, - Insecure: cfg.Insecure, - PSK: cfg.PSK, - ConnectionGater: cfg.ConnectionGater, - Reporter: cfg.Reporter, - PeerKey: autonatPrivKey, - Peerstore: ps, - DialRanker: swarm.NoDelayDialRanker, - UDPBlackHoleSuccessCounter: cfg.UDPBlackHoleSuccessCounter, - IPv6BlackHoleSuccessCounter: cfg.IPv6BlackHoleSuccessCounter, - ResourceManager: cfg.ResourceManager, - SwarmOpts: []swarm.Option{ - // Don't update black hole state for failed autonat dials - swarm.WithReadOnlyBlackHoleDetector(), - }, + Transports: cfg.Transports, + Muxers: cfg.Muxers, + SecurityTransports: cfg.SecurityTransports, + Insecure: cfg.Insecure, + PSK: cfg.PSK, + ConnectionGater: cfg.ConnectionGater, + Reporter: cfg.Reporter, + PeerKey: autonatPrivKey, + Peerstore: ps, + DialRanker: swarm.NoDelayDialRanker, + ResourceManager: cfg.ResourceManager, + SwarmOpts: []swarm.Option{}, } fxopts, err := autoNatCfg.addTransports() if err != nil { @@ -715,10 +702,7 @@ func (cfg *Config) addAutoNAT(h *bhost.BasicHost) error { Peerstore: ps, DialRanker: swarm.NoDelayDialRanker, ResourceManager: cfg.ResourceManager, - SwarmOpts: []swarm.Option{ - swarm.WithUDPBlackHoleSuccessCounter(nil), - swarm.WithIPv6BlackHoleSuccessCounter(nil), - }, + SwarmOpts: []swarm.Option{}, } fxopts, err := autoNatCfg.addTransports() diff --git a/go-libp2p/defaults.go b/go-libp2p/defaults.go index 31de6f0..86ee8b2 100644 --- a/go-libp2p/defaults.go +++ b/go-libp2p/defaults.go @@ -10,7 +10,6 @@ import ( rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" "github.com/libp2p/go-libp2p/p2p/muxer/yamux" "github.com/libp2p/go-libp2p/p2p/net/connmgr" - "github.com/libp2p/go-libp2p/p2p/net/swarm" "github.com/libp2p/go-libp2p/p2p/security/noise" tls "github.com/libp2p/go-libp2p/p2p/security/tls" quic "github.com/libp2p/go-libp2p/p2p/transport/quic" @@ -132,18 +131,6 @@ var DefaultPrometheusRegisterer = func(cfg *Config) error { return cfg.Apply(PrometheusRegisterer(prometheus.DefaultRegisterer)) } -var defaultUDPBlackHoleDetector = func(cfg *Config) error { - // A black hole is a binary property. On a network if UDP dials are blocked, all dials will - // fail. So a low success rate of 5 out 100 dials is good enough. - return cfg.Apply(UDPBlackHoleSuccessCounter(&swarm.BlackHoleSuccessCounter{N: 100, MinSuccesses: 5, Name: "UDP"})) -} - -var defaultIPv6BlackHoleDetector = func(cfg *Config) error { - // A black hole is a binary property. On a network if there is no IPv6 connectivity, all - // dials will fail. So a low success rate of 5 out 100 dials is good enough. - return cfg.Apply(IPv6BlackHoleSuccessCounter(&swarm.BlackHoleSuccessCounter{N: 100, MinSuccesses: 5, Name: "IPv6"})) -} - // Complete list of default options and when to fallback on them. // // Please *DON'T* specify default options any other way. Putting this all here @@ -196,18 +183,6 @@ var defaults = []struct { fallback: func(cfg *Config) bool { return !cfg.DisableMetrics && cfg.PrometheusRegisterer == nil }, opt: DefaultPrometheusRegisterer, }, - { - fallback: func(cfg *Config) bool { - return !cfg.CustomUDPBlackHoleSuccessCounter && cfg.UDPBlackHoleSuccessCounter == nil - }, - opt: defaultUDPBlackHoleDetector, - }, - { - fallback: func(cfg *Config) bool { - return !cfg.CustomIPv6BlackHoleSuccessCounter && cfg.IPv6BlackHoleSuccessCounter == nil - }, - opt: defaultIPv6BlackHoleDetector, - }, } // Defaults configures libp2p to use the default options. Can be combined with diff --git a/go-libp2p/options.go b/go-libp2p/options.go index 0329b7e..971a9fd 100644 --- a/go-libp2p/options.go +++ b/go-libp2p/options.go @@ -617,24 +617,6 @@ func EnableAutoNATv2() Option { } } -// UDPBlackHoleSuccessCounter configures libp2p to use f as the black hole filter for UDP addrs -func UDPBlackHoleSuccessCounter(f *swarm.BlackHoleSuccessCounter) Option { - return func(cfg *Config) error { - cfg.UDPBlackHoleSuccessCounter = f - cfg.CustomUDPBlackHoleSuccessCounter = true - return nil - } -} - -// IPv6BlackHoleSuccessCounter configures libp2p to use f as the black hole filter for IPv6 addrs -func IPv6BlackHoleSuccessCounter(f *swarm.BlackHoleSuccessCounter) Option { - return func(cfg *Config) error { - cfg.IPv6BlackHoleSuccessCounter = f - cfg.CustomIPv6BlackHoleSuccessCounter = true - return nil - } -} - // WithFxOption adds a user provided fx.Option to the libp2p constructor. // Experimental: This option is subject to change or removal. func WithFxOption(opts ...fx.Option) Option { diff --git a/go-libp2p/p2p/net/swarm/black_hole_detector.go b/go-libp2p/p2p/net/swarm/black_hole_detector.go deleted file mode 100644 index bc615bc..0000000 --- a/go-libp2p/p2p/net/swarm/black_hole_detector.go +++ /dev/null @@ -1,274 +0,0 @@ -package swarm - -import ( - "fmt" - "sync" - - ma "github.com/multiformats/go-multiaddr" - manet "github.com/multiformats/go-multiaddr/net" -) - -type BlackHoleState int - -const ( - blackHoleStateProbing BlackHoleState = iota - blackHoleStateAllowed - blackHoleStateBlocked -) - -func (st BlackHoleState) String() string { - switch st { - case blackHoleStateProbing: - return "Probing" - case blackHoleStateAllowed: - return "Allowed" - case blackHoleStateBlocked: - return "Blocked" - default: - return fmt.Sprintf("Unknown %d", st) - } -} - -// BlackHoleSuccessCounter provides black hole filtering for dials. This filter should be used in concert -// with a UDP or IPv6 address filter to detect UDP or IPv6 black hole. In a black holed environment, -// dial requests are refused Requests are blocked if the number of successes in the last N dials is -// less than MinSuccesses. -// If a request succeeds in Blocked state, the filter state is reset and N subsequent requests are -// allowed before reevaluating black hole state. Dials cancelled when some other concurrent dial -// succeeded are counted as failures. A sufficiently large N prevents false negatives in such cases. -type BlackHoleSuccessCounter struct { - // N is - // 1. The minimum number of completed dials required before evaluating black hole state - // 2. the minimum number of requests after which we probe the state of the black hole in - // blocked state - N int - // MinSuccesses is the minimum number of Success required in the last n dials - // to consider we are not blocked. - MinSuccesses int - // Name for the detector. - Name string - - mu sync.Mutex - // requests counts number of dial requests to peers. We handle request at a peer - // level and record results at individual address dial level. - requests int - // dialResults of the last `n` dials. A successful dial is true. - dialResults []bool - // successes is the count of successful dials in outcomes - successes int - // state is the current state of the detector - state BlackHoleState -} - -// RecordResult records the outcome of a dial. A successful dial in Blocked state will change the -// state of the filter to Probing. A failed dial only blocks subsequent requests if the success -// fraction over the last n outcomes is less than the minSuccessFraction of the filter. -func (b *BlackHoleSuccessCounter) RecordResult(success bool) { - b.mu.Lock() - defer b.mu.Unlock() - - if b.state == blackHoleStateBlocked && success { - // If the call succeeds in a blocked state we reset to allowed. - // This is better than slowly accumulating values till we cross the minSuccessFraction - // threshold since a black hole is a binary property. - b.reset() - return - } - - if success { - b.successes++ - } - b.dialResults = append(b.dialResults, success) - - if len(b.dialResults) > b.N { - if b.dialResults[0] { - b.successes-- - } - b.dialResults = b.dialResults[1:] - } - - b.updateState() -} - -// HandleRequest returns the result of applying the black hole filter for the request. -func (b *BlackHoleSuccessCounter) HandleRequest() BlackHoleState { - b.mu.Lock() - defer b.mu.Unlock() - - b.requests++ - - if b.state == blackHoleStateAllowed { - return blackHoleStateAllowed - } else if b.state == blackHoleStateProbing || b.requests%b.N == 0 { - return blackHoleStateProbing - } else { - return blackHoleStateBlocked - } -} - -func (b *BlackHoleSuccessCounter) reset() { - b.successes = 0 - b.dialResults = b.dialResults[:0] - b.requests = 0 - b.updateState() -} - -func (b *BlackHoleSuccessCounter) updateState() { - st := b.state - - if len(b.dialResults) < b.N { - b.state = blackHoleStateProbing - } else if b.successes >= b.MinSuccesses { - b.state = blackHoleStateAllowed - } else { - b.state = blackHoleStateBlocked - } - - if st != b.state { - log.Debug("blackHoleDetector state changed", "name", b.Name, "from", st, "to", b.state) - } -} - -func (b *BlackHoleSuccessCounter) State() BlackHoleState { - b.mu.Lock() - defer b.mu.Unlock() - - return b.state -} - -type blackHoleInfo struct { - name string - state BlackHoleState - nextProbeAfter int - successFraction float64 -} - -func (b *BlackHoleSuccessCounter) info() blackHoleInfo { - b.mu.Lock() - defer b.mu.Unlock() - - nextProbeAfter := 0 - if b.state == blackHoleStateBlocked { - nextProbeAfter = b.N - (b.requests % b.N) - } - - successFraction := 0.0 - if len(b.dialResults) > 0 { - successFraction = float64(b.successes) / float64(len(b.dialResults)) - } - - return blackHoleInfo{ - name: b.Name, - state: b.state, - nextProbeAfter: nextProbeAfter, - successFraction: successFraction, - } -} - -// blackHoleDetector provides UDP and IPv6 black hole detection using a `BlackHoleSuccessCounter` for each. -// For details of the black hole detection logic see `BlackHoleSuccessCounter`. -// In Read Only mode, detector doesn't update the state of underlying filters and refuses requests -// when black hole state is unknown. This is useful for Swarms made specifically for services like -// AutoNAT where we care about accurately reporting the reachability of a peer. -// -// Black hole filtering is done at a peer dial level to ensure that periodic probes to detect change -// of the black hole state are actually dialed and are not skipped because of dial prioritisation -// logic. -type blackHoleDetector struct { - udp, ipv6 *BlackHoleSuccessCounter - mt MetricsTracer - readOnly bool -} - -// FilterAddrs filters the peer's addresses removing black holed addresses -func (d *blackHoleDetector) FilterAddrs(addrs []ma.Multiaddr) (valid []ma.Multiaddr, blackHoled []ma.Multiaddr) { - hasUDP, hasIPv6 := false, false - for _, a := range addrs { - if isPubAddr, err := manet.IsPublicAddr(a); !isPubAddr || err != nil { - continue - } - if isProtocolAddr(a, ma.P_UDP) { - hasUDP = true - } - if isProtocolAddr(a, ma.P_IP6) { - hasIPv6 = true - } - } - - udpRes := blackHoleStateAllowed - if d.udp != nil && hasUDP { - udpRes = d.getFilterState(d.udp) - d.trackMetrics(d.udp) - } - - ipv6Res := blackHoleStateAllowed - if d.ipv6 != nil && hasIPv6 { - ipv6Res = d.getFilterState(d.ipv6) - d.trackMetrics(d.ipv6) - } - - blackHoled = make([]ma.Multiaddr, 0, len(addrs)) - return ma.FilterAddrs( - addrs, - func(a ma.Multiaddr) bool { - if isPubAddr, err := manet.IsPublicAddr(a); !isPubAddr || err != nil { - return true - } - // allow all UDP addresses while probing irrespective of IPv6 black hole state - if udpRes == blackHoleStateProbing && isProtocolAddr(a, ma.P_UDP) { - return true - } - // allow all IPv6 addresses while probing irrespective of UDP black hole state - if ipv6Res == blackHoleStateProbing && isProtocolAddr(a, ma.P_IP6) { - return true - } - - if udpRes == blackHoleStateBlocked && isProtocolAddr(a, ma.P_UDP) { - blackHoled = append(blackHoled, a) - return false - } - if ipv6Res == blackHoleStateBlocked && isProtocolAddr(a, ma.P_IP6) { - blackHoled = append(blackHoled, a) - return false - } - return true - }, - ), blackHoled -} - -// RecordResult updates the state of the relevant BlackHoleSuccessCounters for addr -func (d *blackHoleDetector) RecordResult(addr ma.Multiaddr, success bool) { - if d.readOnly { - return - } - if isPubAddr, err := manet.IsPublicAddr(addr); !isPubAddr || err != nil { - return - } - if d.udp != nil && isProtocolAddr(addr, ma.P_UDP) { - d.udp.RecordResult(success) - d.trackMetrics(d.udp) - } - if d.ipv6 != nil && isProtocolAddr(addr, ma.P_IP6) { - d.ipv6.RecordResult(success) - d.trackMetrics(d.ipv6) - } -} - -func (d *blackHoleDetector) getFilterState(f *BlackHoleSuccessCounter) BlackHoleState { - if d.readOnly { - if f.State() != blackHoleStateAllowed { - return blackHoleStateBlocked - } - return blackHoleStateAllowed - } - return f.HandleRequest() -} - -func (d *blackHoleDetector) trackMetrics(f *BlackHoleSuccessCounter) { - if d.readOnly || d.mt == nil { - return - } - // Track metrics only in non readOnly state - info := f.info() - d.mt.UpdatedBlackHoleSuccessCounter(info.name, info.state, info.nextProbeAfter, info.successFraction) -} diff --git a/go-libp2p/p2p/net/swarm/black_hole_detector_test.go b/go-libp2p/p2p/net/swarm/black_hole_detector_test.go deleted file mode 100644 index 21c92bf..0000000 --- a/go-libp2p/p2p/net/swarm/black_hole_detector_test.go +++ /dev/null @@ -1,250 +0,0 @@ -package swarm - -import ( - "fmt" - "testing" - - ma "github.com/multiformats/go-multiaddr" - "github.com/stretchr/testify/require" -) - -func tStringCast(s string) ma.Multiaddr { - st, _ := ma.StringCast(s) - return st -} - -func TestBlackHoleSuccessCounterReset(t *testing.T) { - n := 10 - bhf := &BlackHoleSuccessCounter{N: n, MinSuccesses: 2, Name: "test"} - var i = 0 - // calls up to n should be probing - for i = 1; i <= n; i++ { - if bhf.HandleRequest() != blackHoleStateProbing { - t.Fatalf("expected calls up to n to be probes") - } - if bhf.State() != blackHoleStateProbing { - t.Fatalf("expected state to be probing got %s", bhf.State()) - } - bhf.RecordResult(false) - } - - // after threshold calls every nth call should be a probe - for i = n + 1; i < 42; i++ { - result := bhf.HandleRequest() - if (i%n == 0 && result != blackHoleStateProbing) || (i%n != 0 && result != blackHoleStateBlocked) { - t.Fatalf("expected every nth dial to be a probe") - } - if bhf.State() != blackHoleStateBlocked { - t.Fatalf("expected state to be blocked, got %s", bhf.State()) - } - } - - bhf.RecordResult(true) - // check if calls up to n are probes again - for i = 0; i < n; i++ { - if bhf.HandleRequest() != blackHoleStateProbing { - t.Fatalf("expected black hole detector state to reset after success") - } - if bhf.State() != blackHoleStateProbing { - t.Fatalf("expected state to be probing got %s", bhf.State()) - } - bhf.RecordResult(false) - } - - // next call should be blocked - if bhf.HandleRequest() != blackHoleStateBlocked { - t.Fatalf("expected dial to be blocked") - if bhf.State() != blackHoleStateBlocked { - t.Fatalf("expected state to be blocked, got %s", bhf.State()) - } - } -} - -func TestBlackHoleSuccessCounterSuccessFraction(t *testing.T) { - n := 10 - tests := []struct { - minSuccesses, successes int - result BlackHoleState - }{ - {minSuccesses: 5, successes: 5, result: blackHoleStateAllowed}, - {minSuccesses: 3, successes: 3, result: blackHoleStateAllowed}, - {minSuccesses: 5, successes: 4, result: blackHoleStateBlocked}, - {minSuccesses: 5, successes: 7, result: blackHoleStateAllowed}, - {minSuccesses: 3, successes: 1, result: blackHoleStateBlocked}, - {minSuccesses: 0, successes: 0, result: blackHoleStateAllowed}, - {minSuccesses: 10, successes: 10, result: blackHoleStateAllowed}, - } - for i, tc := range tests { - t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) { - bhf := BlackHoleSuccessCounter{N: n, MinSuccesses: tc.minSuccesses} - for i := 0; i < tc.successes; i++ { - bhf.RecordResult(true) - } - for i := 0; i < n-tc.successes; i++ { - bhf.RecordResult(false) - } - got := bhf.HandleRequest() - if got != tc.result { - t.Fatalf("expected %d got %d", tc.result, got) - } - }) - } -} - -func TestBlackHoleDetectorInApplicableAddress(t *testing.T) { - udpF := &BlackHoleSuccessCounter{N: 10, MinSuccesses: 5} - ipv6F := &BlackHoleSuccessCounter{N: 10, MinSuccesses: 5} - bhd := &blackHoleDetector{udp: udpF, ipv6: ipv6F} - addrs := []ma.Multiaddr{ - tStringCast("/ip4/1.2.3.4/tcp/1234"), - tStringCast("/ip4/1.2.3.4/tcp/1233"), - tStringCast("/ip6/::1/udp/1234/quic-v1"), - tStringCast("/ip4/192.168.1.5/udp/1234/quic-v1"), - } - for i := 0; i < 1000; i++ { - filteredAddrs, _ := bhd.FilterAddrs(addrs) - require.ElementsMatch(t, addrs, filteredAddrs) - for j := 0; j < len(addrs); j++ { - bhd.RecordResult(addrs[j], false) - } - } -} - -func TestBlackHoleDetectorUDPDisabled(t *testing.T) { - ipv6F := &BlackHoleSuccessCounter{N: 10, MinSuccesses: 5} - bhd := &blackHoleDetector{ipv6: ipv6F} - publicAddr := tStringCast("/ip4/1.2.3.4/udp/1234/quic-v1") - privAddr := tStringCast("/ip4/192.168.1.5/udp/1234/quic-v1") - for i := 0; i < 100; i++ { - bhd.RecordResult(publicAddr, false) - } - wantAddrs := []ma.Multiaddr{publicAddr, privAddr} - wantRemovedAddrs := make([]ma.Multiaddr, 0) - - gotAddrs, gotRemovedAddrs := bhd.FilterAddrs(wantAddrs) - require.ElementsMatch(t, wantAddrs, gotAddrs) - require.ElementsMatch(t, wantRemovedAddrs, gotRemovedAddrs) -} - -func TestBlackHoleDetectorIPv6Disabled(t *testing.T) { - udpF := &BlackHoleSuccessCounter{N: 10, MinSuccesses: 5} - bhd := &blackHoleDetector{udp: udpF} - publicAddr := tStringCast("/ip6/2001::1/tcp/1234") - privAddr := tStringCast("/ip6/::1/tcp/1234") - for i := 0; i < 100; i++ { - bhd.RecordResult(publicAddr, false) - } - - wantAddrs := []ma.Multiaddr{publicAddr, privAddr} - wantRemovedAddrs := make([]ma.Multiaddr, 0) - - gotAddrs, gotRemovedAddrs := bhd.FilterAddrs(wantAddrs) - require.ElementsMatch(t, wantAddrs, gotAddrs) - require.ElementsMatch(t, wantRemovedAddrs, gotRemovedAddrs) -} - -func TestBlackHoleDetectorProbes(t *testing.T) { - bhd := &blackHoleDetector{ - udp: &BlackHoleSuccessCounter{N: 2, MinSuccesses: 1, Name: "udp"}, - ipv6: &BlackHoleSuccessCounter{N: 3, MinSuccesses: 1, Name: "ipv6"}, - } - udp6Addr := tStringCast("/ip6/2001::1/udp/1234/quic-v1") - addrs := []ma.Multiaddr{udp6Addr} - for i := 0; i < 3; i++ { - bhd.RecordResult(udp6Addr, false) - } - for i := 1; i < 100; i++ { - filteredAddrs, _ := bhd.FilterAddrs(addrs) - if i%2 == 0 || i%3 == 0 { - if len(filteredAddrs) == 0 { - t.Fatalf("expected probe to be allowed irrespective of the state of other black hole filter") - } - } else { - if len(filteredAddrs) != 0 { - t.Fatalf("expected dial to be blocked %s", filteredAddrs) - } - } - } - -} - -func TestBlackHoleDetectorAddrFiltering(t *testing.T) { - udp6Pub := tStringCast("/ip6/2001::1/udp/1234/quic-v1") - udp6Pri := tStringCast("/ip6/::1/udp/1234/quic-v1") - udp4Pub := tStringCast("/ip4/1.2.3.4/udp/1234/quic-v1") - udp4Pri := tStringCast("/ip4/192.168.1.5/udp/1234/quic-v1") - tcp6Pub := tStringCast("/ip6/2001::1/tcp/1234/quic-v1") - tcp6Pri := tStringCast("/ip6/::1/tcp/1234/quic-v1") - tcp4Pub := tStringCast("/ip4/1.2.3.4/tcp/1234/quic-v1") - tcp4Pri := tStringCast("/ip4/192.168.1.5/tcp/1234/quic-v1") - - makeBHD := func(udpBlocked, ipv6Blocked bool) *blackHoleDetector { - bhd := &blackHoleDetector{ - udp: &BlackHoleSuccessCounter{N: 100, MinSuccesses: 10, Name: "udp"}, - ipv6: &BlackHoleSuccessCounter{N: 100, MinSuccesses: 10, Name: "ipv6"}, - } - for i := 0; i < 100; i++ { - bhd.RecordResult(udp4Pub, !udpBlocked) - } - for i := 0; i < 100; i++ { - bhd.RecordResult(tcp6Pub, !ipv6Blocked) - } - return bhd - } - - allInput := []ma.Multiaddr{udp6Pub, udp6Pri, udp4Pub, udp4Pri, tcp6Pub, tcp6Pri, - tcp4Pub, tcp4Pri} - - udpBlockedOutput := []ma.Multiaddr{udp6Pri, udp4Pri, tcp6Pub, tcp6Pri, tcp4Pub, tcp4Pri} - udpPublicAddrs := []ma.Multiaddr{udp6Pub, udp4Pub} - bhd := makeBHD(true, false) - gotAddrs, gotRemovedAddrs := bhd.FilterAddrs(allInput) - require.ElementsMatch(t, udpBlockedOutput, gotAddrs) - require.ElementsMatch(t, udpPublicAddrs, gotRemovedAddrs) - - ip6BlockedOutput := []ma.Multiaddr{udp6Pri, udp4Pub, udp4Pri, tcp6Pri, tcp4Pub, tcp4Pri} - ip6PublicAddrs := []ma.Multiaddr{udp6Pub, tcp6Pub} - bhd = makeBHD(false, true) - gotAddrs, gotRemovedAddrs = bhd.FilterAddrs(allInput) - require.ElementsMatch(t, ip6BlockedOutput, gotAddrs) - require.ElementsMatch(t, ip6PublicAddrs, gotRemovedAddrs) - - bothBlockedOutput := []ma.Multiaddr{udp6Pri, udp4Pri, tcp6Pri, tcp4Pub, tcp4Pri} - bothPublicAddrs := []ma.Multiaddr{udp6Pub, tcp6Pub, udp4Pub} - bhd = makeBHD(true, true) - gotAddrs, gotRemovedAddrs = bhd.FilterAddrs(allInput) - require.ElementsMatch(t, bothBlockedOutput, gotAddrs) - require.ElementsMatch(t, bothPublicAddrs, gotRemovedAddrs) -} - -func TestBlackHoleDetectorReadOnlyMode(t *testing.T) { - udpF := &BlackHoleSuccessCounter{N: 10, MinSuccesses: 5} - ipv6F := &BlackHoleSuccessCounter{N: 10, MinSuccesses: 5} - bhd := &blackHoleDetector{udp: udpF, ipv6: ipv6F, readOnly: true} - publicAddr := tStringCast("/ip4/1.2.3.4/udp/1234/quic-v1") - privAddr := tStringCast("/ip6/::1/tcp/1234") - for i := 0; i < 100; i++ { - bhd.RecordResult(publicAddr, true) - } - allAddr := []ma.Multiaddr{privAddr, publicAddr} - // public addr filtered because state is probing - wantAddrs := []ma.Multiaddr{privAddr} - wantRemovedAddrs := []ma.Multiaddr{publicAddr} - - gotAddrs, gotRemovedAddrs := bhd.FilterAddrs(allAddr) - require.ElementsMatch(t, wantAddrs, gotAddrs) - require.ElementsMatch(t, wantRemovedAddrs, gotRemovedAddrs) - - // a non readonly shared state black hole detector - nbhd := &blackHoleDetector{udp: bhd.udp, ipv6: bhd.ipv6, readOnly: false} - for i := 0; i < 100; i++ { - nbhd.RecordResult(publicAddr, true) - } - // no addresses filtered because state is allowed - wantAddrs = []ma.Multiaddr{privAddr, publicAddr} - wantRemovedAddrs = []ma.Multiaddr{} - - gotAddrs, gotRemovedAddrs = bhd.FilterAddrs(allAddr) - require.ElementsMatch(t, wantAddrs, gotAddrs) - require.ElementsMatch(t, wantRemovedAddrs, gotRemovedAddrs) -} diff --git a/go-libp2p/p2p/net/swarm/dial_worker.go b/go-libp2p/p2p/net/swarm/dial_worker.go index 641ec3b..faa8232 100644 --- a/go-libp2p/p2p/net/swarm/dial_worker.go +++ b/go-libp2p/p2p/net/swarm/dial_worker.go @@ -366,15 +366,11 @@ loop: continue loop } - // it must be an error -- add backoff if applicable and dispatch - // ErrDialRefusedBlackHole shouldn't end up here, just a safety check - if res.Err != ErrDialRefusedBlackHole && res.Err != context.Canceled && !w.connected { + // it must be an error -- add backoff if applicable + if res.Err != context.Canceled && !w.connected { // we only add backoff if there has not been a successful connection // for consistency with the old dialer behavior. w.s.backf.AddBackoff(w.peer, res.Addr) - } else if res.Err == ErrDialRefusedBlackHole { - log.Error("SWARM BUG: unexpected ErrDialRefusedBlackHole while dialing peer to addr", - "peer", w.peer, "addr", res.Addr) } w.dispatchError(ad, res.Err) diff --git a/go-libp2p/p2p/net/swarm/swarm.go b/go-libp2p/p2p/net/swarm/swarm.go index 365209e..5ea7bbe 100644 --- a/go-libp2p/p2p/net/swarm/swarm.go +++ b/go-libp2p/p2p/net/swarm/swarm.go @@ -115,37 +115,6 @@ func WithDialRanker(d network.DialRanker) Option { } } -// WithUDPBlackHoleSuccessCounter configures swarm to use the provided config for UDP black hole detection -// n is the size of the sliding window used to evaluate black hole state -// min is the minimum number of successes out of n required to not block requests -func WithUDPBlackHoleSuccessCounter(f *BlackHoleSuccessCounter) Option { - return func(s *Swarm) error { - s.udpBHF = f - return nil - } -} - -// WithIPv6BlackHoleSuccessCounter configures swarm to use the provided config for IPv6 black hole detection -// n is the size of the sliding window used to evaluate black hole state -// min is the minimum number of successes out of n required to not block requests -func WithIPv6BlackHoleSuccessCounter(f *BlackHoleSuccessCounter) Option { - return func(s *Swarm) error { - s.ipv6BHF = f - return nil - } -} - -// WithReadOnlyBlackHoleDetector configures the swarm to use the black hole detector in -// read only mode. In Read Only mode dial requests are refused in unknown state and -// no updates to the detector state are made. This is useful for services like AutoNAT that -// care about accurately providing reachability info. -func WithReadOnlyBlackHoleDetector() Option { - return func(s *Swarm) error { - s.readOnlyBHD = true - return nil - } -} - // Swarm is a connection muxer, allowing connections to other peers to // be opened and closed, while still using the same Chan for all // communication. The Chan sends/receives Messages, which note the @@ -218,10 +187,6 @@ type Swarm struct { dialRanker network.DialRanker connectednessEventEmitter *connectednessEventEmitter - udpBHF *BlackHoleSuccessCounter - ipv6BHF *BlackHoleSuccessCounter - bhd *blackHoleDetector - readOnlyBHD bool } // NewSwarm constructs a Swarm. @@ -241,12 +206,6 @@ func NewSwarm(local peer.ID, peers peerstore.Peerstore, eventBus event.Bus, opts dialTimeoutLocal: defaultDialTimeoutLocal, multiaddrResolver: ResolverFromMaDNS{madns.DefaultResolver}, dialRanker: DefaultDialRanker, - - // A black hole is a binary property. On a network if UDP dials are blocked or there is - // no IPv6 connectivity, all dials will fail. So a low success rate of 5 out 100 dials - // is good enough. - udpBHF: &BlackHoleSuccessCounter{N: 100, MinSuccesses: 5, Name: "UDP"}, - ipv6BHF: &BlackHoleSuccessCounter{N: 100, MinSuccesses: 5, Name: "IPv6"}, } s.conns.m = make(map[peer.ID][]*Conn) @@ -270,12 +229,6 @@ func NewSwarm(local peer.ID, peers peerstore.Peerstore, eventBus event.Bus, opts s.limiter = newDialLimiter(s.dialAddr) s.backf.init(s.ctx) - s.bhd = &blackHoleDetector{ - udp: s.udpBHF, - ipv6: s.ipv6BHF, - mt: s.metricsTracer, - readOnly: s.readOnlyBHD, - } return s, nil } diff --git a/go-libp2p/p2p/net/swarm/swarm_dial.go b/go-libp2p/p2p/net/swarm/swarm_dial.go index ecf8cfd..38a9ff5 100644 --- a/go-libp2p/p2p/net/swarm/swarm_dial.go +++ b/go-libp2p/p2p/net/swarm/swarm_dial.go @@ -42,9 +42,6 @@ var ( // been dialed too frequently ErrDialBackoff = errors.New("dial backoff") - // ErrDialRefusedBlackHole is returned when we are in a black holed environment - ErrDialRefusedBlackHole = errors.New("dial refused because of black hole") - // ErrDialToSelf is returned if we attempt to dial our own peer ErrDialToSelf = errors.New("dial to self attempted") @@ -534,12 +531,6 @@ func (s *Swarm) filterKnownUndialables(p peer.ID, addrs []ma.Multiaddr) (goodAdd // We don't return an error for these addresses addrs = filterLowPriorityAddresses(addrs) - // remove black holed addrs - addrs, blackHoledAddrs := s.bhd.FilterAddrs(addrs) - for _, a := range blackHoledAddrs { - addrErrs = append(addrErrs, TransportError{Address: a, Cause: ErrDialRefusedBlackHole}) - } - return ma.FilterAddrs(addrs, // Linux and BSD treat an unspecified address when dialing as a localhost address. // Windows doesn't support this. We filter all such addresses out because peers @@ -613,11 +604,6 @@ func (s *Swarm) dialAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr, updC connC, err = tpt.Dial(ctx, addr, p) } - // We're recording any error as a failure here. - // Notably, this also applies to cancellations (i.e. if another dial attempt was faster). - // This is ok since the black hole detector uses a very low threshold (5%). - s.bhd.RecordResult(addr, err == nil) - if err != nil { if s.metricsTracer != nil { s.metricsTracer.FailedDialing(addr, err, context.Cause(ctx)) diff --git a/go-libp2p/p2p/net/swarm/swarm_dial_test.go b/go-libp2p/p2p/net/swarm/swarm_dial_test.go index 1302f45..1f4a768 100644 --- a/go-libp2p/p2p/net/swarm/swarm_dial_test.go +++ b/go-libp2p/p2p/net/swarm/swarm_dial_test.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "crypto/rand" - "errors" "fmt" "net" "sort" @@ -357,50 +356,6 @@ func TestAddrsForDialFiltering(t *testing.T) { } } -func TestBlackHoledAddrBlocked(t *testing.T) { - resolver, err := madns.NewResolver() - if err != nil { - t.Fatal(err) - } - s := newTestSwarmWithResolver(t, resolver) - defer s.Close() - - n := 3 - s.bhd.ipv6 = &BlackHoleSuccessCounter{N: n, MinSuccesses: 1, Name: "IPv6"} - - // All dials to this addr will fail. - // manet.IsPublic is aggressive for IPv6 addresses. Use a NAT64 address. - addr := tStringCast("/ip6/64:ff9b::1.2.3.4/tcp/54321/") - - p, err := test.RandPeerID() - if err != nil { - t.Error(err) - } - s.Peerstore().AddAddr(p, addr, peerstore.PermanentAddrTTL) - - // do 1 extra dial to ensure that the blackHoleDetector state is updated since it - // happens in a different goroutine - for i := 0; i < n+1; i++ { - s.backf.Clear(p) - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - conn, err := s.DialPeer(ctx, p) - if err == nil || conn != nil { - t.Fatalf("expected dial to fail") - } - cancel() - } - s.backf.Clear(p) - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer cancel() - conn, err := s.DialPeer(ctx, p) - require.Nil(t, conn) - var de *DialError - if !errors.As(err, &de) { - t.Fatalf("expected to receive an error of type *DialError, got %s of type %T", err, err) - } - require.ErrorIs(t, err, ErrDialRefusedBlackHole) -} - type mockDNSResolver struct { ipsToReturn []net.IPAddr txtsToReturn []string diff --git a/go-libp2p/p2p/net/swarm/swarm_metrics.go b/go-libp2p/p2p/net/swarm/swarm_metrics.go index 2413e9f..5331f74 100644 --- a/go-libp2p/p2p/net/swarm/swarm_metrics.go +++ b/go-libp2p/p2p/net/swarm/swarm_metrics.go @@ -94,30 +94,6 @@ var ( Buckets: []float64{0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1, 2}, }, ) - blackHoleSuccessCounterState = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: metricNamespace, - Name: "black_hole_filter_state", - Help: "State of the black hole filter", - }, - []string{"name"}, - ) - blackHoleSuccessCounterSuccessFraction = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: metricNamespace, - Name: "black_hole_filter_success_fraction", - Help: "Fraction of successful dials among the last n requests", - }, - []string{"name"}, - ) - blackHoleSuccessCounterNextRequestAllowedAfter = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: metricNamespace, - Name: "black_hole_filter_next_request_allowed_after", - Help: "Number of requests after which the next request will be allowed", - }, - []string{"name"}, - ) collectors = []prometheus.Collector{ connsOpened, keyTypes, @@ -128,9 +104,6 @@ var ( dialsPerPeer, dialRankingDelay, dialLatency, - blackHoleSuccessCounterSuccessFraction, - blackHoleSuccessCounterState, - blackHoleSuccessCounterNextRequestAllowedAfter, } ) @@ -141,7 +114,6 @@ type MetricsTracer interface { FailedDialing(ma.Multiaddr, error, error) DialCompleted(success bool, totalDials int, latency time.Duration) DialRankingDelay(d time.Duration) - UpdatedBlackHoleSuccessCounter(name string, state BlackHoleState, nextProbeAfter int, successFraction float64) } type metricsTracer struct{} @@ -284,15 +256,3 @@ func (m *metricsTracer) DialCompleted(success bool, totalDials int, latency time func (m *metricsTracer) DialRankingDelay(d time.Duration) { dialRankingDelay.Observe(d.Seconds()) } - -func (m *metricsTracer) UpdatedBlackHoleSuccessCounter(name string, state BlackHoleState, - nextProbeAfter int, successFraction float64) { - tags := metricshelper.GetStringSlice() - defer metricshelper.PutStringSlice(tags) - - *tags = append(*tags, name) - - blackHoleSuccessCounterState.WithLabelValues(*tags...).Set(float64(state)) - blackHoleSuccessCounterSuccessFraction.WithLabelValues(*tags...).Set(successFraction) - blackHoleSuccessCounterNextRequestAllowedAfter.WithLabelValues(*tags...).Set(float64(nextProbeAfter)) -} diff --git a/go-libp2p/p2p/net/swarm/swarm_metrics_test.go b/go-libp2p/p2p/net/swarm/swarm_metrics_test.go index 1517659..0928385 100644 --- a/go-libp2p/p2p/net/swarm/swarm_metrics_test.go +++ b/go-libp2p/p2p/net/swarm/swarm_metrics_test.go @@ -78,9 +78,6 @@ func TestMetricsNoAllocNoCover(t *testing.T) { ma.StringCast("/ip4/1.2.3.4/udp/2345"), } - bhfNames := []string{"udp", "ipv6", "tcp", "icmp"} - bhfState := []BlackHoleState{blackHoleStateAllowed, blackHoleStateBlocked} - tests := map[string]func(){ "OpenedConnection": func() { mt.OpenedConnection(randItem(directions), randItem(keys), randItem(connections), randItem(addrs)) @@ -94,14 +91,6 @@ func TestMetricsNoAllocNoCover(t *testing.T) { "FailedDialing": func() { mt.FailedDialing(randItem(addrs), randItem(errors), randItem(errors)) }, "DialCompleted": func() { mt.DialCompleted(mrand.Intn(2) == 1, mrand.Intn(10), time.Duration(mrand.Intn(1000_000_000))) }, "DialRankingDelay": func() { mt.DialRankingDelay(time.Duration(mrand.Intn(1e10))) }, - "UpdatedBlackHoleSuccessCounter": func() { - mt.UpdatedBlackHoleSuccessCounter( - randItem(bhfNames), - randItem(bhfState), - mrand.Intn(100), - mrand.Float64(), - ) - }, } for method, f := range tests { diff --git a/go-libp2p/p2p/protocol/autonatv2/autonat_test.go b/go-libp2p/p2p/protocol/autonatv2/autonat_test.go index 551ca24..7a3a1f2 100644 --- a/go-libp2p/p2p/protocol/autonatv2/autonat_test.go +++ b/go-libp2p/p2p/protocol/autonatv2/autonat_test.go @@ -19,7 +19,6 @@ import ( "github.com/libp2p/go-libp2p/core/peerstore" bhost "github.com/libp2p/go-libp2p/p2p/host/blank" "github.com/libp2p/go-libp2p/p2p/host/eventbus" - "github.com/libp2p/go-libp2p/p2p/net/swarm" swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb" @@ -42,9 +41,7 @@ func newAutoNAT(t testing.TB, dialer host.Host, opts ...AutoNATOption) *AutoNAT if dialer == nil { dialer = bhost.NewBlankHost( swarmt.GenSwarm(t, - swarmt.WithSwarmOpts( - swarm.WithUDPBlackHoleSuccessCounter(nil), - swarm.WithIPv6BlackHoleSuccessCounter(nil)))) + swarmt.WithSwarmOpts())) } opts = append([]AutoNATOption{withThrottlePeerDuration(0)}, opts...) an, err := New(dialer, opts...) diff --git a/go-libp2p/p2p/protocol/autonatv2/server_test.go b/go-libp2p/p2p/protocol/autonatv2/server_test.go index 03a470f..9ad9354 100644 --- a/go-libp2p/p2p/protocol/autonatv2/server_test.go +++ b/go-libp2p/p2p/protocol/autonatv2/server_test.go @@ -16,7 +16,6 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/test" bhost "github.com/libp2p/go-libp2p/p2p/host/blank" - "github.com/libp2p/go-libp2p/p2p/net/swarm" swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb" "github.com/libp2p/go-msgio/pbio" @@ -53,7 +52,7 @@ func TestServerInvalidAddrsRejected(t *testing.T) { t.Run("black holed addr", func(t *testing.T) { dialer := bhost.NewBlankHost(swarmt.GenSwarm( - t, swarmt.WithSwarmOpts(swarm.WithReadOnlyBlackHoleDetector()))) + t, swarmt.WithSwarmOpts())) an := newAutoNAT(t, dialer) defer an.Close() defer an.host.Close() diff --git a/hypergraph/hypergraph.go b/hypergraph/hypergraph.go index 1facfb3..cf86a11 100644 --- a/hypergraph/hypergraph.go +++ b/hypergraph/hypergraph.go @@ -122,6 +122,12 @@ func (hg *HypergraphCRDT) cloneSetWithStore( func (hg *HypergraphCRDT) SetShutdownContext(ctx context.Context) { hg.shutdownCtx = ctx + go func() { + select { + case <-hg.shutdownCtx.Done(): + hg.snapshotMgr.release(hg.snapshotMgr.current) + } + }() } func (hg *HypergraphCRDT) contextWithShutdown( diff --git a/hypergraph/id_set.go b/hypergraph/id_set.go index bf0ccb4..c6cdec0 100644 --- a/hypergraph/id_set.go +++ b/hypergraph/id_set.go @@ -98,6 +98,22 @@ func (set *idSet) Add( ) } +// AddRaw inserts raw leaf data directly into the backing store without tree +// traversal. This is used for raw sync operations where data is pre-serialized. +func (set *idSet) AddRaw( + txn tries.TreeBackingStoreTransaction, + leaf *tries.RawLeafData, +) error { + set.dirty = true + return set.tree.Store.InsertRawLeaf( + txn, + set.tree.SetType, + set.tree.PhaseType, + set.tree.ShardKey, + leaf, + ) +} + // Delete removes an atom from the ID set. The atom must match the set's atom // type or ErrInvalidAtomType is returned. The atom is removed from the backing // tree store. diff --git a/hypergraph/snapshot_manager.go b/hypergraph/snapshot_manager.go index e3882e6..333ab7b 100644 --- a/hypergraph/snapshot_manager.go +++ b/hypergraph/snapshot_manager.go @@ -3,9 +3,11 @@ package hypergraph import ( "encoding/hex" "fmt" + "sync" "sync/atomic" "go.uber.org/zap" + "source.quilibrium.com/quilibrium/monorepo/protobufs" "source.quilibrium.com/quilibrium/monorepo/types/tries" ) @@ -14,6 +16,13 @@ type snapshotHandle struct { release func() refs atomic.Int32 root []byte + + branchCacheMu sync.RWMutex + branchCache map[string]*protobufs.HypergraphComparisonResponse + + leafCacheMu sync.RWMutex + leafDataCache map[string][]byte + leafCacheMiss map[string]struct{} } func newSnapshotHandle( @@ -22,8 +31,11 @@ func newSnapshotHandle( root []byte, ) *snapshotHandle { h := &snapshotHandle{ - store: store, - release: release, + store: store, + release: release, + branchCache: make(map[string]*protobufs.HypergraphComparisonResponse), + leafDataCache: make(map[string][]byte), + leafCacheMiss: make(map[string]struct{}), } if len(root) != 0 { h.root = append([]byte{}, root...) @@ -63,9 +75,79 @@ func (h *snapshotHandle) Root() []byte { return append([]byte{}, h.root...) } +func (h *snapshotHandle) getBranchInfo( + path []int32, +) (*protobufs.HypergraphComparisonResponse, bool) { + if h == nil { + return nil, false + } + key := string(packPath(path)) + h.branchCacheMu.RLock() + resp, ok := h.branchCache[key] + h.branchCacheMu.RUnlock() + return resp, ok +} + +func (h *snapshotHandle) storeBranchInfo( + path []int32, + resp *protobufs.HypergraphComparisonResponse, +) { + if h == nil || resp == nil { + return + } + key := string(packPath(path)) + h.branchCacheMu.Lock() + h.branchCache[key] = resp + h.branchCacheMu.Unlock() +} + +func (h *snapshotHandle) getLeafData(key []byte) ([]byte, bool) { + if h == nil { + return nil, false + } + cacheKey := string(key) + h.leafCacheMu.RLock() + data, ok := h.leafDataCache[cacheKey] + h.leafCacheMu.RUnlock() + return data, ok +} + +func (h *snapshotHandle) storeLeafData(key []byte, data []byte) { + if h == nil || len(data) == 0 { + return + } + cacheKey := string(key) + h.leafCacheMu.Lock() + h.leafDataCache[cacheKey] = data + delete(h.leafCacheMiss, cacheKey) + h.leafCacheMu.Unlock() +} + +func (h *snapshotHandle) markLeafMiss(key []byte) { + if h == nil { + return + } + cacheKey := string(key) + h.leafCacheMu.Lock() + h.leafCacheMiss[cacheKey] = struct{}{} + h.leafCacheMu.Unlock() +} + +func (h *snapshotHandle) isLeafMiss(key []byte) bool { + if h == nil { + return false + } + cacheKey := string(key) + h.leafCacheMu.RLock() + _, miss := h.leafCacheMiss[cacheKey] + h.leafCacheMu.RUnlock() + return miss +} + type snapshotManager struct { logger *zap.Logger - current atomic.Pointer[snapshotHandle] + mu sync.Mutex + current *snapshotHandle } func newSnapshotManager(logger *zap.Logger) *snapshotManager { @@ -77,11 +159,17 @@ func (m *snapshotManager) publish( release func(), root []byte, ) { + m.mu.Lock() + defer m.mu.Unlock() + handle := newSnapshotHandle(store, release, root) - prev := m.current.Swap(handle) + prev := m.current + m.current = handle + if prev != nil { prev.releaseRef(m.logger) } + rootHex := "" if len(root) != 0 { rootHex = hex.EncodeToString(root) @@ -90,12 +178,14 @@ func (m *snapshotManager) publish( } func (m *snapshotManager) acquire() *snapshotHandle { - handle := m.current.Load() - if handle == nil { + m.mu.Lock() + defer m.mu.Unlock() + + if m.current == nil { return nil } - handle.acquire() - return handle + m.current.acquire() + return m.current } func (m *snapshotManager) release(handle *snapshotHandle) { diff --git a/hypergraph/sync.go b/hypergraph/sync.go index 52ae949..b58b72e 100644 --- a/hypergraph/sync.go +++ b/hypergraph/sync.go @@ -23,18 +23,33 @@ import ( // HyperStream is the gRPC method that handles synchronization. func (hg *HypergraphCRDT) HyperStream( stream protobufs.HypergraphComparisonService_HyperStreamServer, -) error { +) (err error) { requestCtx := stream.Context() ctx, shutdownCancel := hg.contextWithShutdown(requestCtx) defer shutdownCancel() + sessionLogger := hg.logger + sessionStart := time.Now() + defer func() { + sessionLogger.Info( + "hyperstream session finished", + zap.Duration("session_duration", time.Since(sessionStart)), + zap.Error(err), + ) + }() + + identifyStart := time.Now() peerId, err := hg.authenticationProvider.Identify(requestCtx) if err != nil { return errors.Wrap(err, "hyper stream") } + sessionLogger = sessionLogger.With(zap.String("peer_id", peerId.String())) + sessionLogger.Debug( + "identified peer", + zap.Duration("duration", time.Since(identifyStart)), + ) peerKey := peerId.String() - sessionLogger := hg.logger.With(zap.String("peer_id", peerId.String())) if addr := peerIPFromContext(requestCtx); addr != "" { sessionLogger = sessionLogger.With(zap.String("peer_ip", addr)) } @@ -45,11 +60,16 @@ func (hg *HypergraphCRDT) HyperStream( hg.syncController.EndSyncSession(peerKey) }() + snapshotStart := time.Now() handle := hg.snapshotMgr.acquire() if handle == nil { return errors.New("hypergraph snapshot unavailable") } defer hg.snapshotMgr.release(handle) + sessionLogger.Debug( + "snapshot acquisition complete", + zap.Duration("duration", time.Since(snapshotStart)), + ) root := handle.Root() if len(root) != 0 { @@ -63,7 +83,13 @@ func (hg *HypergraphCRDT) HyperStream( snapshotStore := handle.Store() - err = hg.syncTreeServer(ctx, stream, snapshotStore, root, sessionLogger) + syncStart := time.Now() + err = hg.syncTreeServer(ctx, stream, snapshotStore, root, sessionLogger, handle) + sessionLogger.Info( + "syncTreeServer completed", + zap.Duration("sync_duration", time.Since(syncStart)), + zap.Error(err), + ) hg.syncController.SetStatus(peerKey, &hypergraph.SyncInfo{ Unreachable: false, @@ -81,7 +107,7 @@ func (hg *HypergraphCRDT) Sync( stream protobufs.HypergraphComparisonService_HyperStreamClient, shardKey tries.ShardKey, phaseSet protobufs.HypergraphPhaseSet, -) error { +) (err error) { const localSyncKey = "local-sync" if !hg.syncController.TryEstablishSyncSession(localSyncKey) { return errors.New("local sync already in progress") @@ -93,14 +119,21 @@ func (hg *HypergraphCRDT) Sync( hg.mu.Lock() defer hg.mu.Unlock() + syncStart := time.Now() + shardKeyHex := hex.EncodeToString(slices.Concat(shardKey.L1[:], shardKey.L2[:])) hg.logger.Info( - "sending initialization message", - zap.String( - "shard_key", - hex.EncodeToString(slices.Concat(shardKey.L1[:], shardKey.L2[:])), - ), + "sync started", + zap.String("shard_key", shardKeyHex), zap.Int("phase_set", int(phaseSet)), ) + defer func() { + hg.logger.Info( + "sync finished", + zap.String("shard_key", shardKeyHex), + zap.Duration("duration", time.Since(syncStart)), + zap.Error(err), + ) + }() var set hypergraph.IdSet switch phaseSet { @@ -119,6 +152,7 @@ func (hg *HypergraphCRDT) Sync( path := hg.getCoveredPrefix() // Send initial query for path + sendStart := time.Now() if err := stream.Send(&protobufs.HypergraphComparison{ Payload: &protobufs.HypergraphComparison_Query{ Query: &protobufs.HypergraphComparisonQuery{ @@ -132,13 +166,24 @@ func (hg *HypergraphCRDT) Sync( }); err != nil { return err } + hg.logger.Debug( + "sent initialization message", + zap.String("shard_key", shardKeyHex), + zap.Duration("duration", time.Since(sendStart)), + ) // hg.logger.Debug("server waiting for initial query") + recvStart := time.Now() msg, err := stream.Recv() if err != nil { hg.logger.Info("initial recv failed", zap.Error(err)) return err } + hg.logger.Debug( + "received initialization response", + zap.String("shard_key", shardKeyHex), + zap.Duration("duration", time.Since(recvStart)), + ) response := msg.GetResponse() if response == nil { return errors.New( @@ -146,6 +191,7 @@ func (hg *HypergraphCRDT) Sync( ) } + branchInfoStart := time.Now() branchInfo, err := getBranchInfoFromTree( hg.logger, set.GetTree(), @@ -154,6 +200,11 @@ func (hg *HypergraphCRDT) Sync( if err != nil { return err } + hg.logger.Debug( + "constructed branch info", + zap.String("shard_key", shardKeyHex), + zap.Duration("duration", time.Since(branchInfoStart)), + ) resp := &protobufs.HypergraphComparison{ Payload: &protobufs.HypergraphComparison_Response{ @@ -161,9 +212,15 @@ func (hg *HypergraphCRDT) Sync( }, } + responseSendStart := time.Now() if err := stream.Send(resp); err != nil { return err } + hg.logger.Debug( + "sent initial branch info", + zap.String("shard_key", shardKeyHex), + zap.Duration("duration", time.Since(responseSendStart)), + ) ctx, cancel := context.WithCancel(stream.Context()) @@ -242,6 +299,8 @@ func (hg *HypergraphCRDT) Sync( incomingResponsesOut, true, false, + shardKey, + phaseSet, ) if err != nil { hg.logger.Debug("error while syncing", zap.Error(err)) @@ -461,6 +520,398 @@ type streamManager struct { localTree *tries.LazyVectorCommitmentTree localSet hypergraph.IdSet lastSent time.Time + snapshot *snapshotHandle +} + +type rawVertexSaver interface { + SaveVertexTreeRaw( + txn tries.TreeBackingStoreTransaction, + id []byte, + data []byte, + ) error +} + +const ( + leafAckMinTimeout = 30 * time.Second + leafAckMaxTimeout = 10 * time.Minute + leafAckPerLeafBudget = 20 * time.Millisecond // Generous budget for tree building overhead +) + +func leafAckTimeout(count uint64) time.Duration { + // Calculate timeout with per-leaf budget plus a base overhead + baseOverhead := 30 * time.Second + timeout := baseOverhead + time.Duration(count)*leafAckPerLeafBudget + + if timeout < leafAckMinTimeout { + return leafAckMinTimeout + } + if timeout > leafAckMaxTimeout { + return leafAckMaxTimeout + } + return timeout +} + +func shouldUseRawSync(phaseSet protobufs.HypergraphPhaseSet) bool { + return phaseSet == protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_ADDS +} + +// rawShardSync performs a full raw sync of all leaves from server to client. +// This iterates directly over the database, bypassing in-memory tree caching +// to ensure all leaves are sent even if the in-memory tree is stale. +func (s *streamManager) rawShardSync( + shardKey tries.ShardKey, + phaseSet protobufs.HypergraphPhaseSet, + incomingLeaves <-chan *protobufs.HypergraphComparison, +) error { + shardHex := hex.EncodeToString(shardKey.L2[:]) + s.logger.Info( + "SERVER: starting raw shard sync (direct DB iteration)", + zap.String("shard_key", shardHex), + ) + start := time.Now() + + // Determine set and phase type strings + setType := string(hypergraph.VertexAtomType) + phaseType := string(hypergraph.AddsPhaseType) + switch phaseSet { + case protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_ADDS: + setType = string(hypergraph.VertexAtomType) + phaseType = string(hypergraph.AddsPhaseType) + case protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_VERTEX_REMOVES: + setType = string(hypergraph.VertexAtomType) + phaseType = string(hypergraph.RemovesPhaseType) + case protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_HYPEREDGE_ADDS: + setType = string(hypergraph.HyperedgeAtomType) + phaseType = string(hypergraph.AddsPhaseType) + case protobufs.HypergraphPhaseSet_HYPERGRAPH_PHASE_SET_HYPEREDGE_REMOVES: + setType = string(hypergraph.HyperedgeAtomType) + phaseType = string(hypergraph.RemovesPhaseType) + } + + // Get raw leaf iterator from the database + iter, err := s.hypergraphStore.IterateRawLeaves(setType, phaseType, shardKey) + if err != nil { + s.logger.Error( + "SERVER: failed to create raw leaf iterator", + zap.String("shard_key", shardHex), + zap.Error(err), + ) + return errors.Wrap(err, "raw shard sync") + } + defer iter.Close() + + // First pass: count leaves + var count uint64 + for valid := iter.First(); valid; valid = iter.Next() { + leaf, err := iter.Leaf() + if err != nil { + // Skip non-leaf nodes (branches) + continue + } + if leaf != nil { + count++ + } + } + + s.logger.Info( + "SERVER: raw sync sending metadata", + zap.String("shard_key", shardHex), + zap.Uint64("leaf_count", count), + ) + + // Send metadata with leaf count + if err := s.stream.Send(&protobufs.HypergraphComparison{ + Payload: &protobufs.HypergraphComparison_Metadata{ + Metadata: &protobufs.HypersyncMetadata{Leaves: count}, + }, + }); err != nil { + return errors.Wrap(err, "raw shard sync: send metadata") + } + + // Create new iterator for sending (previous one is exhausted) + iter.Close() + iter, err = s.hypergraphStore.IterateRawLeaves(setType, phaseType, shardKey) + if err != nil { + return errors.Wrap(err, "raw shard sync: recreate iterator") + } + defer iter.Close() + + // Second pass: send leaves + var sent uint64 + for valid := iter.First(); valid; valid = iter.Next() { + select { + case <-s.ctx.Done(): + return s.ctx.Err() + default: + } + + leaf, err := iter.Leaf() + if err != nil { + // Skip non-leaf nodes + continue + } + if leaf == nil { + continue + } + + update := &protobufs.LeafData{ + Key: leaf.Key, + Value: leaf.Value, + HashTarget: leaf.HashTarget, + Size: leaf.Size, + UnderlyingData: leaf.UnderlyingData, + } + + msg := &protobufs.HypergraphComparison{ + Payload: &protobufs.HypergraphComparison_LeafData{ + LeafData: update, + }, + } + + if err := s.stream.Send(msg); err != nil { + return errors.Wrap(err, "raw shard sync: send leaf") + } + + sent++ + if sent%1000 == 0 { + s.logger.Debug( + "SERVER: raw sync progress", + zap.Uint64("sent", sent), + zap.Uint64("total", count), + ) + } + } + + s.logger.Info( + "SERVER: raw sync sent all leaves, waiting for ack", + zap.String("shard_key", shardHex), + zap.Uint64("sent", sent), + ) + + // Wait for acknowledgment + timeoutTimer := time.NewTimer(leafAckTimeout(count)) + defer timeoutTimer.Stop() + + select { + case <-s.ctx.Done(): + return errors.Wrap(s.ctx.Err(), "raw shard sync: wait ack") + case msg, ok := <-incomingLeaves: + if !ok { + return errors.Wrap(errors.New("channel closed"), "raw shard sync: wait ack") + } + meta := msg.GetMetadata() + if meta == nil { + return errors.Wrap(errors.New("expected metadata ack"), "raw shard sync: wait ack") + } + if meta.Leaves != count { + return errors.Wrap( + fmt.Errorf("ack mismatch: expected %d, got %d", count, meta.Leaves), + "raw shard sync: wait ack", + ) + } + case <-timeoutTimer.C: + return errors.Wrap(errors.New("timeout waiting for ack"), "raw shard sync") + } + + s.logger.Info( + "SERVER: raw shard sync completed", + zap.String("shard_key", shardHex), + zap.Uint64("leaves_sent", sent), + zap.Duration("duration", time.Since(start)), + ) + return nil +} + +// receiveRawShardSync receives a full raw sync of all leaves from server. +// It uses tree insertion to properly build the tree structure on the client. +func (s *streamManager) receiveRawShardSync( + incomingLeaves <-chan *protobufs.HypergraphComparison, +) error { + start := time.Now() + s.logger.Info("CLIENT: starting receiveRawShardSync") + + expectedLeaves, err := s.awaitRawLeafMetadata(incomingLeaves) + if err != nil { + s.logger.Error("CLIENT: failed to receive metadata", zap.Error(err)) + return err + } + + s.logger.Info( + "CLIENT: received metadata", + zap.Uint64("expected_leaves", expectedLeaves), + ) + + var txn tries.TreeBackingStoreTransaction + var processed uint64 + for processed < expectedLeaves { + if processed%100 == 0 { + if txn != nil { + if err := txn.Commit(); err != nil { + return errors.Wrap(err, "receive raw shard sync") + } + } + txn, err = s.hypergraphStore.NewTransaction(false) + if err != nil { + return errors.Wrap(err, "receive raw shard sync") + } + } + + leafMsg, err := s.awaitLeafData(incomingLeaves) + if err != nil { + if txn != nil { + txn.Abort() + } + s.logger.Error( + "CLIENT: failed to receive leaf", + zap.Uint64("processed", processed), + zap.Uint64("expected", expectedLeaves), + zap.Error(err), + ) + return err + } + + // Deserialize the atom from the raw value + theirs := AtomFromBytes(leafMsg.Value) + if theirs == nil { + if txn != nil { + txn.Abort() + } + return errors.Wrap( + errors.New("invalid atom"), + "receive raw shard sync", + ) + } + + // Persist underlying vertex tree data if present + if len(leafMsg.UnderlyingData) > 0 { + if saver, ok := s.hypergraphStore.(rawVertexSaver); ok { + if err := saver.SaveVertexTreeRaw( + txn, + leafMsg.Key, + leafMsg.UnderlyingData, + ); err != nil { + txn.Abort() + return errors.Wrap(err, "receive raw shard sync: save vertex tree") + } + } + } + + // Use Add to properly build tree structure + if err := s.localSet.Add(txn, theirs); err != nil { + txn.Abort() + return errors.Wrap(err, "receive raw shard sync: add atom") + } + + processed++ + if processed%1000 == 0 { + s.logger.Debug( + "CLIENT: raw sync progress", + zap.Uint64("processed", processed), + zap.Uint64("expected", expectedLeaves), + ) + } + } + + if txn != nil { + if err := txn.Commit(); err != nil { + return errors.Wrap(err, "receive raw shard sync") + } + } + + // Send acknowledgment + if err := s.sendLeafMetadata(expectedLeaves); err != nil { + return errors.Wrap(err, "receive raw shard sync") + } + + s.logger.Info( + "CLIENT: raw shard sync completed", + zap.Uint64("leaves_received", expectedLeaves), + zap.Duration("duration", time.Since(start)), + ) + return nil +} + +func (s *streamManager) awaitRawLeafMetadata( + incomingLeaves <-chan *protobufs.HypergraphComparison, +) (uint64, error) { + s.logger.Debug("CLIENT: awaitRawLeafMetadata waiting...") + select { + case <-s.ctx.Done(): + return 0, errors.Wrap( + errors.New("context canceled"), + "await raw leaf metadata", + ) + case msg, ok := <-incomingLeaves: + if !ok { + s.logger.Error("CLIENT: incomingLeaves channel closed") + return 0, errors.Wrap( + errors.New("channel closed"), + "await raw leaf metadata", + ) + } + meta := msg.GetMetadata() + if meta == nil { + s.logger.Error( + "CLIENT: received non-metadata message while waiting for metadata", + zap.String("payload_type", fmt.Sprintf("%T", msg.Payload)), + ) + return 0, errors.Wrap( + errors.New("invalid message: expected metadata"), + "await raw leaf metadata", + ) + } + s.logger.Debug( + "CLIENT: received metadata", + zap.Uint64("leaves", meta.Leaves), + ) + return meta.Leaves, nil + case <-time.After(leafAckTimeout(1)): + s.logger.Error("CLIENT: timeout waiting for metadata") + return 0, errors.Wrap( + errors.New("timed out waiting for metadata"), + "await raw leaf metadata", + ) + } +} + +func (s *streamManager) awaitLeafData( + incomingLeaves <-chan *protobufs.HypergraphComparison, +) (*protobufs.LeafData, error) { + select { + case <-s.ctx.Done(): + return nil, errors.Wrap( + errors.New("context canceled"), + "await leaf data", + ) + case msg, ok := <-incomingLeaves: + if !ok { + return nil, errors.Wrap( + errors.New("channel closed"), + "await leaf data", + ) + } + if leaf := msg.GetLeafData(); leaf != nil { + return leaf, nil + } + return nil, errors.Wrap( + errors.New("invalid message: expected leaf data"), + "await leaf data", + ) + case <-time.After(leafAckTimeout(1)): + return nil, errors.Wrap( + errors.New("timed out waiting for leaf data"), + "await leaf data", + ) + } +} + +func (s *streamManager) sendLeafMetadata(leaves uint64) error { + s.logger.Debug("sending leaf metadata ack", zap.Uint64("leaves", leaves)) + return s.stream.Send(&protobufs.HypergraphComparison{ + Payload: &protobufs.HypergraphComparison_Metadata{ + Metadata: &protobufs.HypersyncMetadata{Leaves: leaves}, + }, + }) } // sendLeafData builds a LeafData message (with the full leaf data) for the @@ -468,7 +919,20 @@ type streamManager struct { func (s *streamManager) sendLeafData( path []int32, incomingLeaves <-chan *protobufs.HypergraphComparison, -) error { +) (err error) { + start := time.Now() + pathHex := hex.EncodeToString(packPath(path)) + var count uint64 + s.logger.Debug("send leaf data start", zap.String("path", pathHex)) + defer func() { + s.logger.Debug( + "send leaf data finished", + zap.String("path", pathHex), + zap.Uint64("leaves_sent", count), + zap.Duration("duration", time.Since(start)), + zap.Error(err), + ) + }() send := func(leaf *tries.LazyVectorCommitmentLeafNode) error { update := &protobufs.LeafData{ Key: leaf.Key, @@ -476,13 +940,12 @@ func (s *streamManager) sendLeafData( HashTarget: leaf.HashTarget, Size: leaf.Size.FillBytes(make([]byte, 32)), } - tree, err := s.hypergraphStore.LoadVertexTree(leaf.Key) - if err == nil { - b, err := tries.SerializeNonLazyTree(tree) - if err != nil { - return errors.Wrap(err, "send leaf data") - } - update.UnderlyingData = b + data, err := s.getSerializedLeaf(leaf.Key) + if err != nil { + return errors.Wrap(err, "send leaf data") + } + if len(data) != 0 { + update.UnderlyingData = data } msg := &protobufs.HypergraphComparison{ Payload: &protobufs.HypergraphComparison_LeafData{ @@ -530,6 +993,12 @@ func (s *streamManager) sendLeafData( } if node == nil { + s.logger.Warn( + "SERVER: node is nil at path, sending 0 leaves", + zap.String("path", pathHex), + zap.Bool("tree_nil", s.localTree == nil), + zap.Bool("root_nil", s.localTree != nil && s.localTree.Root == nil), + ) if err := s.stream.Send(&protobufs.HypergraphComparison{ Payload: &protobufs.HypergraphComparison_Metadata{ Metadata: &protobufs.HypersyncMetadata{Leaves: 0}, @@ -541,7 +1010,16 @@ func (s *streamManager) sendLeafData( } leaf, ok := node.(*tries.LazyVectorCommitmentLeafNode) - count := uint64(0) + count = uint64(0) + + // Debug: log the node type + s.logger.Info( + "SERVER: node type at path", + zap.String("path", pathHex), + zap.Bool("is_leaf", ok), + zap.String("node_type", fmt.Sprintf("%T", node)), + ) + if !ok { children := tries.GetAllLeaves( s.localTree.SetType, @@ -555,6 +1033,12 @@ func (s *streamManager) sendLeafData( } count++ } + s.logger.Info( + "SERVER: sending metadata with leaf count (branch)", + zap.String("path", pathHex), + zap.Uint64("leaf_count", count), + zap.Int("children_slice_len", len(children)), + ) if err := s.stream.Send(&protobufs.HypergraphComparison{ Payload: &protobufs.HypergraphComparison_Metadata{ Metadata: &protobufs.HypersyncMetadata{Leaves: count}, @@ -573,6 +1057,11 @@ func (s *streamManager) sendLeafData( } } else { count = 1 + s.logger.Info( + "SERVER: root is a single leaf, sending 1 leaf", + zap.String("path", pathHex), + zap.String("leaf_key", hex.EncodeToString(leaf.Key)), + ) if err := s.stream.Send(&protobufs.HypergraphComparison{ Payload: &protobufs.HypergraphComparison_Metadata{ Metadata: &protobufs.HypersyncMetadata{Leaves: count}, @@ -585,6 +1074,9 @@ func (s *streamManager) sendLeafData( } } + timeoutTimer := time.NewTimer(leafAckTimeout(count)) + defer timeoutTimer.Stop() + select { case <-s.ctx.Done(): return errors.Wrap( @@ -615,7 +1107,7 @@ func (s *streamManager) sendLeafData( errors.New("invalid message"), "send leaf data", ) - case <-time.After(30 * time.Second): + case <-timeoutTimer.C: return errors.Wrap( errors.New("timed out"), "send leaf data", @@ -623,6 +1115,75 @@ func (s *streamManager) sendLeafData( } } +func (s *streamManager) getSerializedLeaf(key []byte) ([]byte, error) { + if s.snapshot != nil { + if data, ok := s.snapshot.getLeafData(key); ok { + return data, nil + } + if s.snapshot.isLeafMiss(key) { + return nil, nil + } + } + + type rawVertexLoader interface { + LoadVertexTreeRaw(id []byte) ([]byte, error) + } + + if loader, ok := s.hypergraphStore.(rawVertexLoader); ok { + data, err := loader.LoadVertexTreeRaw(key) + if err != nil { + if s.snapshot != nil { + s.snapshot.markLeafMiss(key) + } + return nil, nil + } + if len(data) != 0 { + if s.snapshot != nil { + s.snapshot.storeLeafData(key, data) + } + return data, nil + } + } + + tree, err := s.hypergraphStore.LoadVertexTree(key) + if err != nil { + if s.snapshot != nil { + s.snapshot.markLeafMiss(key) + } + return nil, nil + } + data, err := tries.SerializeNonLazyTree(tree) + if err != nil { + return nil, err + } + + if s.snapshot != nil { + s.snapshot.storeLeafData(key, data) + } + return data, nil +} + +func (s *streamManager) getBranchInfo( + path []int32, +) (*protobufs.HypergraphComparisonResponse, error) { + if s.snapshot != nil { + if resp, ok := s.snapshot.getBranchInfo(path); ok { + return resp, nil + } + } + + resp, err := getBranchInfoFromTree(s.logger, s.localTree, path) + if err != nil { + return nil, err + } + + if s.snapshot != nil { + s.snapshot.storeBranchInfo(path, resp) + } + + return resp, nil +} + // getNodeAtPath traverses the tree along the provided nibble path. It returns // the node found (or nil if not found). The depth argument is used for internal // recursion. @@ -845,9 +1406,20 @@ func (s *streamManager) queryNext( incomingResponses <-chan *protobufs.HypergraphComparisonResponse, path []int32, ) ( - *protobufs.HypergraphComparisonResponse, - error, + resp *protobufs.HypergraphComparisonResponse, + err error, ) { + start := time.Now() + pathHex := hex.EncodeToString(packPath(path)) + s.logger.Debug("query next start", zap.String("path", pathHex)) + defer func() { + s.logger.Debug( + "query next finished", + zap.String("path", pathHex), + zap.Duration("duration", time.Since(start)), + zap.Error(err), + ) + }() if err := s.stream.Send(&protobufs.HypergraphComparison{ Payload: &protobufs.HypergraphComparison_Query{ Query: &protobufs.HypergraphComparisonQuery{ @@ -865,13 +1437,14 @@ func (s *streamManager) queryNext( errors.New("context canceled"), "handle query", ) - case resp, ok := <-incomingResponses: + case r, ok := <-incomingResponses: if !ok { return nil, errors.Wrap( errors.New("channel closed"), "handle query", ) } + resp = r return resp, nil case <-time.After(30 * time.Second): return nil, errors.Wrap( @@ -883,8 +1456,18 @@ func (s *streamManager) queryNext( func (s *streamManager) handleLeafData( incomingLeaves <-chan *protobufs.HypergraphComparison, -) error { - expectedLeaves := uint64(0) +) (err error) { + start := time.Now() + var expectedLeaves uint64 + s.logger.Debug("handle leaf data start") + defer func() { + s.logger.Debug( + "handle leaf data finished", + zap.Uint64("leaves_expected", expectedLeaves), + zap.Duration("duration", time.Since(start)), + zap.Error(err), + ) + }() select { case <-s.ctx.Done(): return errors.Wrap( @@ -918,7 +1501,6 @@ func (s *streamManager) handleLeafData( // s.logger.Info("expecting leaves", zap.Uint64("count", expectedLeaves)) var txn tries.TreeBackingStoreTransaction - var err error for i := uint64(0); i < expectedLeaves; i++ { if i%100 == 0 { if txn != nil { @@ -968,30 +1550,9 @@ func (s *streamManager) handleLeafData( // ) theirs := AtomFromBytes(remoteUpdate.Value) - if len(remoteUpdate.UnderlyingData) != 0 { - tree, err := tries.DeserializeNonLazyTree(remoteUpdate.UnderlyingData) - if err != nil { - s.logger.Error("server returned invalid tree", zap.Error(err)) - txn.Abort() - return err - } - - err = s.localSet.ValidateTree( - remoteUpdate.Key, - remoteUpdate.Value, - tree, - ) - if err != nil { - s.logger.Error("server returned invalid tree", zap.Error(err)) - txn.Abort() - return err - } - - err = s.hypergraphStore.SaveVertexTree(txn, remoteUpdate.Key, tree) - if err != nil { - txn.Abort() - return err - } + if err := s.persistLeafTree(txn, remoteUpdate); err != nil { + txn.Abort() + return err } err := s.localSet.Add(txn, theirs) @@ -1030,13 +1591,72 @@ func (s *streamManager) handleLeafData( return nil } +func (s *streamManager) persistLeafTree( + txn tries.TreeBackingStoreTransaction, + update *protobufs.LeafData, +) error { + if len(update.UnderlyingData) == 0 { + return nil + } + + needsValidation := s.requiresTreeValidation() + _, canSaveRaw := s.hypergraphStore.(rawVertexSaver) + + var tree *tries.VectorCommitmentTree + var err error + if needsValidation || !canSaveRaw { + tree, err = tries.DeserializeNonLazyTree(update.UnderlyingData) + if err != nil { + s.logger.Error("server returned invalid tree", zap.Error(err)) + return err + } + } + + if needsValidation { + if err := s.localSet.ValidateTree( + update.Key, + update.Value, + tree, + ); err != nil { + s.logger.Error("server returned invalid tree", zap.Error(err)) + return err + } + } + + if saver, ok := s.hypergraphStore.(rawVertexSaver); ok { + buf := make([]byte, len(update.UnderlyingData)) + copy(buf, update.UnderlyingData) + return saver.SaveVertexTreeRaw(txn, update.Key, buf) + } + + return s.hypergraphStore.SaveVertexTree(txn, update.Key, tree) +} + +func (s *streamManager) requiresTreeValidation() bool { + if typed, ok := s.localSet.(*idSet); ok { + return typed.validator != nil + } + return false +} + func (s *streamManager) handleQueryNext( incomingQueries <-chan *protobufs.HypergraphComparisonQuery, path []int32, ) ( - *protobufs.HypergraphComparisonResponse, - error, + branch *protobufs.HypergraphComparisonResponse, + err error, ) { + start := time.Now() + pathHex := hex.EncodeToString(packPath(path)) + s.logger.Debug("handle query next start", zap.String("path", pathHex)) + defer func() { + s.logger.Debug( + "handle query next finished", + zap.String("path", pathHex), + zap.Duration("duration", time.Since(start)), + zap.Error(err), + ) + }() select { case <-s.ctx.Done(): return nil, errors.Wrap( @@ -1058,9 +1678,9 @@ func (s *streamManager) handleQueryNext( ) } - branchInfo, err := getBranchInfoFromTree(s.logger, s.localTree, path) - if err != nil { - return nil, errors.Wrap(err, "handle query next") + branchInfo, berr := s.getBranchInfo(path) + if berr != nil { + return nil, errors.Wrap(berr, "handle query next") } resp := &protobufs.HypergraphComparison{ @@ -1073,7 +1693,8 @@ func (s *streamManager) handleQueryNext( return nil, errors.Wrap(err, "handle query next") } - return branchInfo, nil + branch = branchInfo + return branch, nil case <-time.After(30 * time.Second): return nil, errors.Wrap( errors.New("timed out"), @@ -1086,11 +1707,22 @@ func (s *streamManager) descendIndex( incomingResponses <-chan *protobufs.HypergraphComparisonResponse, path []int32, ) ( - *protobufs.HypergraphComparisonResponse, - *protobufs.HypergraphComparisonResponse, - error, + local *protobufs.HypergraphComparisonResponse, + remote *protobufs.HypergraphComparisonResponse, + err error, ) { - branchInfo, err := getBranchInfoFromTree(s.logger, s.localTree, path) + start := time.Now() + pathHex := hex.EncodeToString(packPath(path)) + s.logger.Debug("descend index start", zap.String("path", pathHex)) + defer func() { + s.logger.Debug( + "descend index finished", + zap.String("path", pathHex), + zap.Duration("duration", time.Since(start)), + zap.Error(err), + ) + }() + branchInfo, err := s.getBranchInfo(path) if err != nil { return nil, nil, errors.Wrap(err, "descend index") } @@ -1111,7 +1743,7 @@ func (s *streamManager) descendIndex( errors.New("context canceled"), "handle query next", ) - case resp, ok := <-incomingResponses: + case r, ok := <-incomingResponses: if !ok { return nil, nil, errors.Wrap( errors.New("channel closed"), @@ -1119,18 +1751,20 @@ func (s *streamManager) descendIndex( ) } - if slices.Compare(branchInfo.Path, resp.Path) != 0 { + if slices.Compare(branchInfo.Path, r.Path) != 0 { return nil, nil, errors.Wrap( fmt.Errorf( "invalid path received: %v, expected: %v", - resp.Path, + r.Path, branchInfo.Path, ), "descend index", ) } - return branchInfo, resp, nil + local = branchInfo + remote = r + return local, remote, nil case <-time.After(30 * time.Second): return nil, nil, errors.Wrap( errors.New("timed out"), @@ -1155,7 +1789,22 @@ func (s *streamManager) walk( incomingResponses <-chan *protobufs.HypergraphComparisonResponse, init bool, isServer bool, + shardKey tries.ShardKey, + phaseSet protobufs.HypergraphPhaseSet, ) error { + // Check if we should use raw sync mode for this phase set + if init && shouldUseRawSync(phaseSet) { + s.logger.Info( + "walk: using raw sync mode", + zap.Bool("is_server", isServer), + zap.Int("phase_set", int(phaseSet)), + ) + if isServer { + return s.rawShardSync(shardKey, phaseSet, incomingLeaves) + } + return s.receiveRawShardSync(incomingLeaves) + } + select { case <-s.ctx.Done(): return s.ctx.Err() @@ -1249,6 +1898,8 @@ func (s *streamManager) walk( incomingResponses, false, isServer, + shardKey, + phaseSet, ) } else { // s.logger.Debug("remote prefix longer, traversing local to path", pathString) @@ -1324,6 +1975,8 @@ func (s *streamManager) walk( incomingResponses, false, isServer, + shardKey, + phaseSet, ) } } else { @@ -1411,6 +2064,8 @@ func (s *streamManager) walk( incomingResponses, false, isServer, + shardKey, + phaseSet, ); err != nil { return errors.Wrap(err, "walk") } @@ -1447,6 +2102,7 @@ func (hg *HypergraphCRDT) syncTreeServer( snapshotStore tries.TreeBackingStore, snapshotRoot []byte, sessionLogger *zap.Logger, + handle *snapshotHandle, ) error { logger := sessionLogger if logger == nil { @@ -1549,6 +2205,7 @@ func (hg *HypergraphCRDT) syncTreeServer( cancel() close(incomingQueriesIn) close(incomingResponsesIn) + close(incomingLeavesIn) return } if err != nil { @@ -1556,6 +2213,7 @@ func (hg *HypergraphCRDT) syncTreeServer( cancel() close(incomingQueriesIn) close(incomingResponsesIn) + close(incomingLeavesIn) return } if msg == nil { @@ -1567,6 +2225,7 @@ func (hg *HypergraphCRDT) syncTreeServer( cancel() close(incomingQueriesIn) close(incomingResponsesIn) + close(incomingLeavesIn) return case *protobufs.HypergraphComparison_Metadata: incomingLeavesIn <- msg @@ -1585,7 +2244,9 @@ func (hg *HypergraphCRDT) syncTreeServer( stream: stream, hypergraphStore: snapshotStore, localTree: idSet.GetTree(), + localSet: idSet, lastSent: time.Now(), + snapshot: handle, } var wg sync.WaitGroup wg.Add(1) @@ -1600,6 +2261,8 @@ func (hg *HypergraphCRDT) syncTreeServer( incomingResponsesOut, true, true, + shardKey, + query.PhaseSet, ) if err != nil { logger.Error("error while syncing", zap.Error(err)) diff --git a/node/consensus/global/consensus_leader_provider.go b/node/consensus/global/consensus_leader_provider.go index 67e7b69..c562520 100644 --- a/node/consensus/global/consensus_leader_provider.go +++ b/node/consensus/global/consensus_leader_provider.go @@ -79,10 +79,18 @@ func (p *GlobalLeaderProvider) ProveNextState( ) } - prior, err := p.engine.clockStore.GetGlobalClockFrameCandidate( - latestQC.FrameNumber, - []byte(priorState), - ) + var prior *protobufs.GlobalFrame + var err error + if latestQC.FrameNumber == 0 { + prior, err = p.engine.clockStore.GetGlobalClockFrame( + latestQC.FrameNumber, + ) + } else { + prior, err = p.engine.clockStore.GetGlobalClockFrameCandidate( + latestQC.FrameNumber, + []byte(priorState), + ) + } if err != nil { frameProvingTotal.WithLabelValues("error").Inc() return nil, models.NewNoVoteErrorf("could not collect: %+w", err) diff --git a/node/consensus/global/consensus_liveness_provider.go b/node/consensus/global/consensus_liveness_provider.go index 5ecd698..75a9258 100644 --- a/node/consensus/global/consensus_liveness_provider.go +++ b/node/consensus/global/consensus_liveness_provider.go @@ -1,18 +1,13 @@ package global import ( - "bytes" "context" - "math/big" - "slices" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" - "golang.org/x/crypto/sha3" keyedaggregator "source.quilibrium.com/quilibrium/monorepo/node/keyedaggregator" "source.quilibrium.com/quilibrium/monorepo/protobufs" - "source.quilibrium.com/quilibrium/monorepo/types/tries" ) // GlobalLivenessProvider implements LivenessProvider @@ -115,82 +110,10 @@ func (p *GlobalLivenessProvider) Collect( ) } - commitments := make([]*tries.VectorCommitmentTree, 256) - for i := range 256 { - commitments[i] = &tries.VectorCommitmentTree{} - } - - proverRoot := make([]byte, 64) - - commitSet, err := p.engine.hypergraph.Commit(frameNumber) + commitmentHash, err := p.engine.rebuildShardCommitments(frameNumber, rank) if err != nil { - p.engine.logger.Error( - "could not commit", - zap.Error(err), - ) return GlobalCollectedCommitments{}, errors.Wrap(err, "collect") } - collected := 0 - - if err := p.engine.rebuildAppShardCache(rank); err != nil { - p.engine.logger.Warn( - "could not rebuild app shard cache", - zap.Uint64("rank", rank), - zap.Error(err), - ) - } - - // The poseidon hash's field is < 0x3fff...ffff, so we use the upper two bits - // to fold the four hypergraph phase/sets into the three different tree - // partitions the L1 key designates - for sk, s := range commitSet { - if !bytes.Equal(sk.L1[:], []byte{0x00, 0x00, 0x00}) { - collected++ - - for phaseSet := 0; phaseSet < 4; phaseSet++ { - commit := s[phaseSet] - foldedShardKey := make([]byte, 32) - copy(foldedShardKey, sk.L2[:]) - - // 0 -> 0b00 -> 0b00000000 -> 0x00 - // 1 -> 0b01 -> 0b01000000 -> 0x40 - // 2 -> 0b10 -> 0b10000000 -> 0x80 - // 3 -> 0b11 -> 0b11000000 -> 0xC0 - foldedShardKey[0] |= byte(phaseSet << 6) - for l1Idx := 0; l1Idx < 3; l1Idx++ { - err := commitments[sk.L1[l1Idx]].Insert( - foldedShardKey, - commit, - nil, - big.NewInt(int64(len(commit))), - ) - if err != nil { - return GlobalCollectedCommitments{}, errors.Wrap(err, "collect") - } - } - } - } else { - // Prover set is strictly vertex adds, so we simply take the first. - proverRoot = s[0] - } - } - - shardCommitments := make([][]byte, 256) - - for i := 0; i < 256; i++ { - shardCommitments[i] = commitments[i].Commit(p.engine.inclusionProver, false) - } - - preimage := slices.Concat( - slices.Concat(shardCommitments...), - proverRoot, - ) - - commitmentHash := sha3.Sum256(preimage) - - p.engine.shardCommitments = shardCommitments - p.engine.proverRoot = proverRoot - p.engine.commitmentHash = commitmentHash[:] // Store the accepted messages as canonical bytes for inclusion in the frame collectedMsgs := make([][]byte, 0, len(acceptedMessages)) @@ -199,12 +122,9 @@ func (p *GlobalLivenessProvider) Collect( } p.engine.collectedMessages = collectedMsgs - // Update metrics - shardCommitmentsCollected.Set(float64(collected)) - return GlobalCollectedCommitments{ frameNumber: frameNumber, - commitmentHash: commitmentHash[:], + commitmentHash: commitmentHash, prover: p.engine.getProverAddress(), }, nil } diff --git a/node/consensus/global/global_consensus_engine.go b/node/consensus/global/global_consensus_engine.go index bf80100..4305204 100644 --- a/node/consensus/global/global_consensus_engine.go +++ b/node/consensus/global/global_consensus_engine.go @@ -605,39 +605,54 @@ func NewGlobalConsensusEngine( latest.FinalizedRank, ) if err != nil { - establishGenesis() + panic(err) } else { - frame, err := engine.clockStore.GetGlobalClockFrame( - qc.GetFrameNumber(), - ) - if err != nil { + if qc.GetFrameNumber() == 0 { establishGenesis() } else { - parentFrame, err := engine.clockStore.GetGlobalClockFrame( - qc.GetFrameNumber() - 1, + frame, err := engine.clockStore.GetGlobalClockFrameCandidate( + qc.GetFrameNumber(), + qc.Selector, ) if err != nil { - establishGenesis() + panic(err) } else { - parentQC, err := engine.clockStore.GetQuorumCertificate( - nil, - parentFrame.GetRank(), + if _, rebuildErr := engine.rebuildShardCommitments( + frame.Header.FrameNumber+1, + frame.Header.Rank+1, + ); rebuildErr != nil { + logger.Warn( + "could not initialize shard commitments from latest frame", + zap.Error(rebuildErr), + ) + } + parentFrame, err := engine.clockStore.GetGlobalClockFrameCandidate( + qc.GetFrameNumber()-1, + frame.Header.ParentSelector, ) if err != nil { - establishGenesis() + panic(err) } else { - state = &models.CertifiedState[*protobufs.GlobalFrame]{ - State: &models.State[*protobufs.GlobalFrame]{ - Rank: frame.GetRank(), - Identifier: frame.Identity(), - ProposerID: frame.Source(), - ParentQuorumCertificate: parentQC, - Timestamp: frame.GetTimestamp(), - State: &frame, - }, - CertifyingQuorumCertificate: qc, + parentQC, err := engine.clockStore.GetQuorumCertificate( + nil, + parentFrame.GetRank(), + ) + if err != nil { + panic(err) + } else { + state = &models.CertifiedState[*protobufs.GlobalFrame]{ + State: &models.State[*protobufs.GlobalFrame]{ + Rank: frame.GetRank(), + Identifier: frame.Identity(), + ProposerID: frame.Source(), + ParentQuorumCertificate: parentQC, + Timestamp: frame.GetTimestamp(), + State: &frame, + }, + CertifyingQuorumCertificate: qc, + } + pending = engine.getPendingProposals(frame.Header.FrameNumber) } - pending = engine.getPendingProposals(frame.Header.FrameNumber) } } } @@ -3701,6 +3716,7 @@ func (e *GlobalConsensusEngine) OnRankChange(oldRank uint64, newRank uint64) { prior, err := e.clockStore.GetLatestGlobalClockFrame() if err != nil { + e.logger.Error("new rank, no latest global clock frame") frameProvingTotal.WithLabelValues("error").Inc() return } @@ -3714,6 +3730,79 @@ func (e *GlobalConsensusEngine) OnRankChange(oldRank uint64, newRank uint64) { } } +func (e *GlobalConsensusEngine) rebuildShardCommitments( + frameNumber uint64, + rank uint64, +) ([]byte, error) { + commitments := make([]*tries.VectorCommitmentTree, 256) + for i := range commitments { + commitments[i] = &tries.VectorCommitmentTree{} + } + + commitSet, err := e.hypergraph.Commit(frameNumber) + if err != nil { + e.logger.Error("could not commit", zap.Error(err)) + return nil, errors.Wrap(err, "rebuild shard commitments") + } + + if err := e.rebuildAppShardCache(rank); err != nil { + e.logger.Warn( + "could not rebuild app shard cache", + zap.Uint64("rank", rank), + zap.Error(err), + ) + } + + proverRoot := make([]byte, 64) + collected := 0 + + for sk, s := range commitSet { + if !bytes.Equal(sk.L1[:], []byte{0x00, 0x00, 0x00}) { + collected++ + + for phaseSet := 0; phaseSet < 4; phaseSet++ { + commit := s[phaseSet] + foldedShardKey := make([]byte, 32) + copy(foldedShardKey, sk.L2[:]) + + foldedShardKey[0] |= byte(phaseSet << 6) + for l1Idx := 0; l1Idx < 3; l1Idx++ { + if err := commitments[sk.L1[l1Idx]].Insert( + foldedShardKey, + commit, + nil, + big.NewInt(int64(len(commit))), + ); err != nil { + return nil, errors.Wrap(err, "rebuild shard commitments") + } + } + } + } else { + proverRoot = s[0] + } + } + + shardCommitments := make([][]byte, 256) + for i := 0; i < 256; i++ { + shardCommitments[i] = commitments[i].Commit(e.inclusionProver, false) + } + + preimage := slices.Concat( + slices.Concat(shardCommitments...), + proverRoot, + ) + + commitmentHash := sha3.Sum256(preimage) + + e.shardCommitments = shardCommitments + e.proverRoot = proverRoot + e.commitmentHash = commitmentHash[:] + + shardCommitmentsCollected.Set(float64(collected)) + + return commitmentHash[:], nil +} + // OnReceiveProposal implements consensus.Consumer. func (e *GlobalConsensusEngine) OnReceiveProposal( currentRank uint64, @@ -3998,7 +4087,16 @@ func (e *GlobalConsensusEngine) getPendingProposals( *protobufs.GlobalFrame, *protobufs.ProposalVote, ] { - root, err := e.clockStore.GetGlobalClockFrame(frameNumber) + rootIter, err := e.clockStore.RangeGlobalClockFrameCandidates( + frameNumber, + frameNumber, + ) + if err != nil { + panic(err) + } + + rootIter.First() + root, err := rootIter.Value() if err != nil { panic(err) } diff --git a/node/consensus/global/message_processors.go b/node/consensus/global/message_processors.go index 5e0b38d..5971fbe 100644 --- a/node/consensus/global/message_processors.go +++ b/node/consensus/global/message_processors.go @@ -992,12 +992,15 @@ func (e *GlobalConsensusEngine) handleGlobalProposal( // if we have a parent, cache and move on if proposal.State.Header.FrameNumber != 0 { + _, sErr := e.clockStore.GetGlobalClockFrame( + proposal.State.Header.FrameNumber - 1, + ) // also check with persistence layer - _, err := e.clockStore.GetGlobalClockFrameCandidate( + _, cErr := e.clockStore.GetGlobalClockFrameCandidate( proposal.State.Header.FrameNumber-1, proposal.State.Header.ParentSelector, ) - if err != nil { + if sErr != nil && cErr != nil { e.logger.Debug( "parent frame not stored, requesting sync", zap.Uint64("rank", proposal.GetRank()), @@ -1207,6 +1210,23 @@ func (e *GlobalConsensusEngine) cacheProposal( e.proposalCache[frameNumber] = proposal e.proposalCacheMu.Unlock() + txn, err := e.clockStore.NewTransaction(false) + if err != nil { + e.logger.Error("could not create transaction", zap.Error(err)) + return + } + err = e.clockStore.PutGlobalClockFrameCandidate(proposal.State, txn) + if err != nil { + e.logger.Error("could not put global clock frame candidate", zap.Error(err)) + txn.Abort() + return + } + if err = txn.Commit(); err != nil { + e.logger.Error("could not commit transaction", zap.Error(err)) + txn.Abort() + return + } + e.logger.Debug( "cached out-of-order proposal", zap.Uint64("rank", proposal.GetRank()), @@ -1580,6 +1600,7 @@ func (e *GlobalConsensusEngine) handleVote(message *pb.Message) { } func (e *GlobalConsensusEngine) handleTimeoutState(message *pb.Message) { + e.logger.Debug("handling timeout state") // Skip our own messages if bytes.Equal(message.From, e.pubsub.GetPeerID()) { return diff --git a/node/consensus/global/message_validation.go b/node/consensus/global/message_validation.go index a22026c..0844d5d 100644 --- a/node/consensus/global/message_validation.go +++ b/node/consensus/global/message_validation.go @@ -2,6 +2,7 @@ package global import ( "encoding/binary" + "encoding/hex" "time" "github.com/libp2p/go-libp2p/core/peer" @@ -126,6 +127,14 @@ func (e *GlobalConsensusEngine) validateGlobalConsensusMessage( timeoutStateValidationTotal.WithLabelValues("reject").Inc() return tp2p.ValidationResultReject } + e.logger.Debug( + "received timeout", + zap.Uint64("rank", timeoutState.Vote.Rank), + zap.String( + "voter", + hex.EncodeToString([]byte(timeoutState.Vote.Identity())), + ), + ) // We should still accept votes for the past rank in case a peer needs it if e.currentRank > timeoutState.Vote.Rank+1 { diff --git a/node/consensus/provers/prover_registry.go b/node/consensus/provers/prover_registry.go index c6c5d3d..00cc4ed 100644 --- a/node/consensus/provers/prover_registry.go +++ b/node/consensus/provers/prover_registry.go @@ -1782,3 +1782,51 @@ func (r *ProverRegistry) GetAllActiveAppShardProvers() ( return result, nil } + +// GetProverShardSummaries implements ProverRegistry +func (r *ProverRegistry) GetProverShardSummaries() ( + []*consensus.ProverShardSummary, + error, +) { + r.mu.RLock() + defer r.mu.RUnlock() + + summaries := make([]*consensus.ProverShardSummary, 0, len(r.filterCache)) + for filterKey, provers := range r.filterCache { + if len(filterKey) == 0 || len(provers) == 0 { + continue + } + statusCounts := make(map[consensus.ProverStatus]int) + for _, info := range provers { + counted := false + for _, alloc := range info.Allocations { + if len(alloc.ConfirmationFilter) > 0 && + string(alloc.ConfirmationFilter) == filterKey { + statusCounts[alloc.Status]++ + counted = true + break + } + if len(alloc.RejectionFilter) > 0 && + string(alloc.RejectionFilter) == filterKey { + statusCounts[alloc.Status]++ + counted = true + break + } + } + if !counted { + statusCounts[info.Status]++ + } + } + summary := &consensus.ProverShardSummary{ + Filter: append([]byte(nil), filterKey...), + StatusCounts: statusCounts, + } + summaries = append(summaries, summary) + } + + sort.Slice(summaries, func(i, j int) bool { + return bytes.Compare(summaries[i].Filter, summaries[j].Filter) < 0 + }) + + return summaries, nil +} diff --git a/node/consensus/sync/sync_client.go b/node/consensus/sync/sync_client.go index 5047a60..dca0461 100644 --- a/node/consensus/sync/sync_client.go +++ b/node/consensus/sync/sync_client.go @@ -33,6 +33,7 @@ type GlobalSyncClient struct { blsConstructor crypto.BlsConstructor proposalProcessor ProposalProcessor[*protobufs.GlobalProposal] config *config.Config + validateFrames bool } func NewGlobalSyncClient( @@ -46,9 +47,14 @@ func NewGlobalSyncClient( config: config, blsConstructor: blsConstructor, proposalProcessor: proposalProcessor, + validateFrames: true, } } +func (g *GlobalSyncClient) SetValidationEnabled(enabled bool) { + g.validateFrames = enabled +} + func (g *GlobalSyncClient) Sync( ctx context.Context, logger *zap.Logger, @@ -97,12 +103,14 @@ func (g *GlobalSyncClient) Sync( logger.Debug("received empty response from peer") return ErrInvalidResponse } - if err := response.Proposal.Validate(); err != nil { - logger.Debug( - "received invalid response from peer", - zap.Error(err), - ) - return ErrInvalidResponse + if g.validateFrames { + if err := response.Proposal.Validate(); err != nil { + logger.Debug( + "received invalid response from peer", + zap.Error(err), + ) + return ErrInvalidResponse + } } if len(expectedIdentity) != 0 { @@ -138,15 +146,17 @@ func (g *GlobalSyncClient) Sync( ), ) - if _, err := g.frameProver.VerifyGlobalFrameHeader( - response.Proposal.State.Header, - g.blsConstructor, - ); err != nil { - logger.Debug( - "received invalid frame from peer", - zap.Error(err), - ) - return ErrInvalidResponse + if g.validateFrames { + if _, err := g.frameProver.VerifyGlobalFrameHeader( + response.Proposal.State.Header, + g.blsConstructor, + ); err != nil { + logger.Debug( + "received invalid frame from peer", + zap.Error(err), + ) + return ErrInvalidResponse + } } g.proposalProcessor.AddProposal(response.Proposal) diff --git a/node/execution/intrinsics/global/global_prover_join.go b/node/execution/intrinsics/global/global_prover_join.go index d9372df..31677b5 100644 --- a/node/execution/intrinsics/global/global_prover_join.go +++ b/node/execution/intrinsics/global/global_prover_join.go @@ -694,10 +694,30 @@ func (p *ProverJoin) Verify(frameNumber uint64) (valid bool, err error) { frame, err := p.frameStore.GetGlobalClockFrame(p.FrameNumber) if err != nil { - return false, errors.Wrap(errors.Wrap( - err, - fmt.Sprintf("frame number: %d", p.FrameNumber), - ), "verify") + frames, err := p.frameStore.RangeGlobalClockFrameCandidates( + p.FrameNumber, + p.FrameNumber, + ) + if err != nil { + return false, errors.Wrap(errors.Wrap( + err, + fmt.Sprintf("frame number: %d", p.FrameNumber), + ), "verify") + } + if !frames.First() || !frames.Valid() { + return false, errors.Wrap(errors.Wrap( + errors.New("not found"), + fmt.Sprintf("frame number: %d", p.FrameNumber), + ), "verify") + } + frames.Close() + frame, err = frames.Value() + if err != nil { + return false, errors.Wrap(errors.Wrap( + err, + fmt.Sprintf("frame number: %d", p.FrameNumber), + ), "verify") + } } // Prepare challenge for verification diff --git a/node/execution/intrinsics/global/global_prover_kick.go b/node/execution/intrinsics/global/global_prover_kick.go index 1498089..57548c8 100644 --- a/node/execution/intrinsics/global/global_prover_kick.go +++ b/node/execution/intrinsics/global/global_prover_kick.go @@ -3,6 +3,7 @@ package global import ( "bytes" "encoding/binary" + "fmt" "math/big" "slices" @@ -398,7 +399,30 @@ func (p *ProverKick) Verify(frameNumber uint64) (bool, error) { frame, err := p.clockStore.GetGlobalClockFrame(frameNumber - 1) if err != nil { - return false, errors.Wrap(err, "verify") + frames, err := p.clockStore.RangeGlobalClockFrameCandidates( + frameNumber-1, + frameNumber-1, + ) + if err != nil { + return false, errors.Wrap(errors.Wrap( + err, + fmt.Sprintf("frame number: %d", p.FrameNumber), + ), "verify") + } + if !frames.First() || !frames.Valid() { + return false, errors.Wrap(errors.Wrap( + errors.New("not found"), + fmt.Sprintf("frame number: %d", p.FrameNumber), + ), "verify") + } + frames.Close() + frame, err = frames.Value() + if err != nil { + return false, errors.Wrap(errors.Wrap( + err, + fmt.Sprintf("frame number: %d", p.FrameNumber), + ), "verify") + } } validTraversal, err := p.hypergraph.VerifyTraversalProof( diff --git a/node/execution/intrinsics/token/token_intrinsic_mint_transaction.go b/node/execution/intrinsics/token/token_intrinsic_mint_transaction.go index 23e0685..6a6cb55 100644 --- a/node/execution/intrinsics/token/token_intrinsic_mint_transaction.go +++ b/node/execution/intrinsics/token/token_intrinsic_mint_transaction.go @@ -4,6 +4,7 @@ import ( "bytes" "crypto/sha512" "encoding/binary" + "fmt" "math/big" "slices" @@ -1746,14 +1747,33 @@ func (i *MintTransactionInput) verifyWithProofOfMeaningfulWork( ) } - frame, err := tx.clockStore.GetGlobalClockFrame( - binary.BigEndian.Uint64(tx.Outputs[0].FrameNumber), - ) + frameNumber := binary.BigEndian.Uint64(tx.Outputs[0].FrameNumber) + frame, err := tx.clockStore.GetGlobalClockFrame(frameNumber) if err != nil { - return errors.Wrap( - err, - "verify with mint with proof of meaningful work", + frames, err := tx.clockStore.RangeGlobalClockFrameCandidates( + frameNumber, + frameNumber, ) + if err != nil { + return errors.Wrap(errors.Wrap( + err, + fmt.Sprintf("frame number: %d", frameNumber), + ), "verify with mint with proof of meaningful work") + } + if !frames.First() || !frames.Valid() { + return errors.Wrap(errors.Wrap( + errors.New("not found"), + fmt.Sprintf("frame number: %d", frameNumber), + ), "verify with mint with proof of meaningful work") + } + frames.Close() + frame, err = frames.Value() + if err != nil { + return errors.Wrap(errors.Wrap( + err, + fmt.Sprintf("frame number: %d", frameNumber), + ), "verify with mint with proof of meaningful work") + } } rewardRoot = frame.Header.ProverTreeCommitment diff --git a/node/execution/intrinsics/token/token_intrinsic_transaction.go b/node/execution/intrinsics/token/token_intrinsic_transaction.go index 601d943..6b2c586 100644 --- a/node/execution/intrinsics/token/token_intrinsic_transaction.go +++ b/node/execution/intrinsics/token/token_intrinsic_transaction.go @@ -24,7 +24,7 @@ import ( const FRAME_2_1_CUTOVER = 244200 const FRAME_2_1_EXTENDED_ENROLL_END = 255840 -const FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END = FRAME_2_1_EXTENDED_ENROLL_END + 1080 +const FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END = FRAME_2_1_EXTENDED_ENROLL_END + 6500 // used to skip frame-based checks, for tests var BEHAVIOR_PASS = false diff --git a/node/p2p/blossomsub.go b/node/p2p/blossomsub.go index 2bdb945..bf98dc0 100644 --- a/node/p2p/blossomsub.go +++ b/node/p2p/blossomsub.go @@ -14,6 +14,7 @@ import ( "net" "runtime/debug" "slices" + "strings" "sync" "sync/atomic" "time" @@ -22,19 +23,18 @@ import ( dht "github.com/libp2p/go-libp2p-kad-dht" libp2pconfig "github.com/libp2p/go-libp2p/config" "github.com/libp2p/go-libp2p/core/crypto" - "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/p2p/discovery/routing" "github.com/libp2p/go-libp2p/p2p/discovery/util" - "github.com/libp2p/go-libp2p/p2p/host/eventbus" rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" routedhost "github.com/libp2p/go-libp2p/p2p/host/routed" "github.com/libp2p/go-libp2p/p2p/net/connmgr" "github.com/libp2p/go-libp2p/p2p/net/gostream" - "github.com/libp2p/go-libp2p/p2p/net/swarm" + "github.com/libp2p/go-libp2p/p2p/protocol/ping" "github.com/mr-tron/base58" ma "github.com/multiformats/go-multiaddr" madns "github.com/multiformats/go-multiaddr-dns" @@ -43,6 +43,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + grpcpeer "google.golang.org/grpc/peer" "google.golang.org/protobuf/types/known/wrapperspb" "source.quilibrium.com/quilibrium/monorepo/config" blossomsub "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub" @@ -82,7 +84,7 @@ type BlossomSub struct { peerScoreMx sync.Mutex bootstrap internal.PeerConnector discovery internal.PeerConnector - reachability atomic.Pointer[network.Reachability] + manualReachability atomic.Pointer[bool] p2pConfig config.P2PConfig dht *dht.IpfsDHT } @@ -91,6 +93,7 @@ var _ p2p.PubSub = (*BlossomSub)(nil) var ErrNoPeersAvailable = errors.New("no peers available") var ANNOUNCE_PREFIX = "quilibrium-2.0.2-dusk-" +var connectivityServiceProtocolID = protocol.ID("/quilibrium/connectivity/1.0.0") func getPeerID(p2pConfig *config.P2PConfig) peer.ID { peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey) @@ -150,53 +153,6 @@ func NewBlossomSubWithHost( logger.Info("established peer id", zap.String("peer_id", host.ID().String())) - reachabilitySub, err := host.EventBus().Subscribe( - &event.EvtLocalReachabilityChanged{}, - eventbus.Name("blossomsub"), - ) - if err != nil { - logger.Panic("error subscribing to reachability events", zap.Error(err)) - } - go func() { - defer reachabilitySub.Close() - instance := "node" - if coreId != 0 { - instance = "worker" - } - logger := logger.Named("reachability") - for { - select { - case <-ctx.Done(): - return - case evt, ok := <-reachabilitySub.Out(): - if !ok { - return - } - state := evt.(event.EvtLocalReachabilityChanged).Reachability - bs.reachability.Store(&state) - switch state { - case network.ReachabilityPublic: - logger.Info( - instance+" is externally reachable", - zap.Uint("core_id", coreId), - ) - case network.ReachabilityPrivate: - logger.Error( - instance+" is not externally reachable", - zap.Uint("core_id", coreId), - ) - case network.ReachabilityUnknown: - logger.Info( - instance+" reachability is unknown", - zap.Uint("core_id", coreId), - ) - default: - logger.Debug("unknown reachability state", zap.Any("state", state)) - } - } - } - }() - bootstrappers := []peer.AddrInfo{} for _, bh := range bootstrapHosts { // manually construct the p2p string, kind of kludgy, but this is intended @@ -267,6 +223,7 @@ func NewBlossomSubWithHost( ) var tracer *blossomsub.JSONTracer + var err error if p2pConfig.TraceLogStdout { tracer, err = blossomsub.NewStdoutJSONTracer() if err != nil { @@ -356,6 +313,8 @@ func NewBlossomSubWithHost( go bs.background(ctx) + bs.initConnectivityServices(isBootstrapPeer, bootstrappers) + return bs } @@ -516,22 +475,7 @@ func NewBlossomSub( opts = append( opts, - libp2p.SwarmOpts( - swarm.WithUDPBlackHoleSuccessCounter( - &swarm.BlackHoleSuccessCounter{ - N: 8000, - MinSuccesses: 1, - Name: "permissive-udp", - }, - ), - swarm.WithIPv6BlackHoleSuccessCounter( - &swarm.BlackHoleSuccessCounter{ - N: 8000, - MinSuccesses: 1, - Name: "permissive-ip6", - }, - ), - ), + libp2p.SwarmOpts(), ) if p2pConfig.LowWatermarkConnections != -1 && @@ -575,53 +519,6 @@ func NewBlossomSub( logger.Info("established peer id", zap.String("peer_id", h.ID().String())) - reachabilitySub, err := h.EventBus().Subscribe( - &event.EvtLocalReachabilityChanged{}, - eventbus.Name("blossomsub"), - ) - if err != nil { - logger.Panic("error subscribing to reachability events", zap.Error(err)) - } - go func() { - defer reachabilitySub.Close() - instance := "node" - if coreId != 0 { - instance = "worker" - } - logger := logger.Named("reachability") - for { - select { - case <-ctx.Done(): - return - case evt, ok := <-reachabilitySub.Out(): - if !ok { - return - } - state := evt.(event.EvtLocalReachabilityChanged).Reachability - bs.reachability.Store(&state) - switch state { - case network.ReachabilityPublic: - logger.Info( - instance+" is externally reachable", - zap.Uint("core_id", coreId), - ) - case network.ReachabilityPrivate: - logger.Error( - instance+" is not externally reachable", - zap.Uint("core_id", coreId), - ) - case network.ReachabilityUnknown: - logger.Info( - instance+" reachability is unknown", - zap.Uint("core_id", coreId), - ) - default: - logger.Debug("unknown reachability state", zap.Any("state", state)) - } - } - } - }() - kademliaDHT := initDHT( ctx, logger, @@ -857,6 +754,7 @@ func NewBlossomSub( bs.peerID = peerID bs.h = h bs.signKey = privKey + bs.initConnectivityServices(isBootstrapPeer, bootstrappers) go bs.background(ctx) @@ -1241,18 +1139,347 @@ func (b *BlossomSub) IsPeerConnected(peerId []byte) bool { } func (b *BlossomSub) Reachability() *wrapperspb.BoolValue { - reachability := b.reachability.Load() + if manual := b.manualReachability.Load(); manual != nil { + return wrapperspb.Bool(*manual) + } + reachability := b.manualReachability.Load() if reachability == nil { return nil } - switch *reachability { - case network.ReachabilityPublic: - return wrapperspb.Bool(true) - case network.ReachabilityPrivate: - return wrapperspb.Bool(false) - default: + return &wrapperspb.BoolValue{Value: *reachability} +} + +func (b *BlossomSub) initConnectivityServices( + isBootstrapPeer bool, + bootstrappers []peer.AddrInfo, +) { + if b.h == nil { + return + } + if isBootstrapPeer { + b.startConnectivityService() + return + } + clone := make([]peer.AddrInfo, len(bootstrappers)) + copy(clone, bootstrappers) + b.blockUntilConnectivityTest(clone) +} + +func (b *BlossomSub) startConnectivityService() { + // Use raw TCP listener on port 8340 + listenAddr := "0.0.0.0:8340" + + listener, err := net.Listen("tcp", listenAddr) + if err != nil { + b.logger.Error("failed to start connectivity service", zap.Error(err)) + return + } + + b.logger.Info("started connectivity service", zap.String("addr", listenAddr)) + + server := grpc.NewServer() + protobufs.RegisterConnectivityServiceServer( + server, + newConnectivityService(b.logger.Named("connectivity-service"), b.h), + ) + + go func() { + if err := server.Serve(listener); err != nil && + !errors.Is(err, net.ErrClosed) { + b.logger.Error("connectivity service exited", zap.Error(err)) + } + }() + + go func() { + <-b.ctx.Done() + server.GracefulStop() + _ = listener.Close() + }() +} + +func (b *BlossomSub) blockUntilConnectivityTest(bootstrappers []peer.AddrInfo) { + if len(bootstrappers) == 0 { + b.logger.Warn("connectivity test skipped, no bootstrap peers available") + return + } + + delay := time.NewTimer(10 * time.Second) + defer delay.Stop() + select { + case <-delay.C: + case <-b.ctx.Done(): + b.logger.Info("connectivity test cancelled before start, context done") + return + } + + backoff := 10 * time.Second + for { + if err := b.runConnectivityTest(b.ctx, bootstrappers); err == nil { + return + } else { + b.logger.Warn("connectivity test failed, retrying", zap.Error(err)) + } + + wait := time.NewTimer(backoff) + select { + case <-wait.C: + wait.Stop() + case <-b.ctx.Done(): + wait.Stop() + b.logger.Info("connectivity test cancelled, context done") + return + } + } +} + +func (b *BlossomSub) runConnectivityTest( + ctx context.Context, + bootstrappers []peer.AddrInfo, +) error { + candidates := make([]peer.AddrInfo, 0, len(bootstrappers)) + for _, info := range bootstrappers { + if info.ID == b.h.ID() { + continue + } + if strings.Contains(info.Addrs[0].String(), "dnsaddr") { + candidates = append(candidates, info) + } + } + if len(candidates) == 0 { + return errors.New("connectivity test: no bootstrap peers available") + } + selection, err := rand.Int(rand.Reader, big.NewInt(int64(len(candidates)))) + if err != nil { + return errors.Wrap(err, "connectivity test peer selection") + } + target := candidates[selection.Int64()] + return b.invokeConnectivityTest(ctx, target) +} + +func (b *BlossomSub) invokeConnectivityTest( + ctx context.Context, + target peer.AddrInfo, +) error { + dialCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + var targetAddr string + for _, addr := range target.Addrs { + host, err := addr.ValueForProtocol(ma.P_IP4) + if err != nil { + host, err = addr.ValueForProtocol(ma.P_IP6) + if err != nil { + host, err = addr.ValueForProtocol(ma.P_DNSADDR) + if err != nil { + continue + } + } + } + + targetAddr = fmt.Sprintf("%s:8340", host) + break + } + + if targetAddr == "" { + b.recordManualReachability(false) + return errors.New( + "connectivity test: no valid address found for bootstrap peer", + ) + } + + b.logger.Debug( + "connecting to bootstrap connectivity service", + zap.String("target", targetAddr), + ) + + conn, err := grpc.NewClient( + targetAddr, + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + b.recordManualReachability(false) + return errors.Wrap(err, "connectivity test dial") + } + defer conn.Close() + + client := protobufs.NewConnectivityServiceClient(conn) + req := &protobufs.ConnectivityTestRequest{ + PeerId: []byte(b.h.ID()), + Multiaddrs: b.collectConnectivityMultiaddrs(), + } + + resp, err := client.TestConnectivity(dialCtx, req) + if err != nil { + b.recordManualReachability(false) + return errors.Wrap(err, "connectivity test rpc") + } + + b.recordManualReachability(resp.GetSuccess()) + if resp.GetSuccess() { + b.logger.Info( + "your node is reachable", + zap.String("bootstrap_peer", target.ID.String()), + ) return nil } + + b.logger.Warn( + "YOUR NODE IS NOT REACHABLE. CHECK YOUR FIREWALL AND PORT FORWARDING CONFIGURATION", + zap.String("bootstrap_peer", target.ID.String()), + zap.String("error", resp.GetErrorMessage()), + ) + if resp.GetErrorMessage() != "" { + return errors.New(resp.GetErrorMessage()) + } + return errors.New("connectivity test failed") +} + +func (b *BlossomSub) collectConnectivityMultiaddrs() []string { + addrs := b.GetOwnMultiaddrs() + out := make([]string, 0, len(addrs)) + for _, addr := range addrs { + out = append(out, addr.String()) + } + return out +} + +func (b *BlossomSub) recordManualReachability(success bool) { + state := new(bool) + *state = success + b.manualReachability.Store(state) +} + +type connectivityService struct { + protobufs.UnimplementedConnectivityServiceServer + logger *zap.Logger + host host.Host + ping *ping.PingService +} + +func newConnectivityService( + logger *zap.Logger, + h host.Host, +) *connectivityService { + return &connectivityService{ + logger: logger, + host: h, + ping: ping.NewPingService(h), + } +} + +func (s *connectivityService) TestConnectivity( + ctx context.Context, + req *protobufs.ConnectivityTestRequest, +) (*protobufs.ConnectivityTestResponse, error) { + resp := &protobufs.ConnectivityTestResponse{} + peerID := peer.ID(req.GetPeerId()) + if peerID == "" { + resp.ErrorMessage = "peer id required" + return resp, nil + } + + // Get the actual IP address from the gRPC peer context + pr, ok := grpcpeer.FromContext(ctx) + if !ok || pr.Addr == nil { + resp.ErrorMessage = "unable to determine peer address from context" + return resp, nil + } + + // Extract the IP from the remote address + remoteAddr := pr.Addr.String() + host, _, err := net.SplitHostPort(remoteAddr) + if err != nil { + resp.ErrorMessage = fmt.Sprintf("invalid remote address: %v", err) + return resp, nil + } + + s.logger.Debug( + "connectivity test from peer", + zap.String("peer_id", peerID.String()), + zap.String("remote_ip", host), + ) + + addrs := make([]ma.Multiaddr, 0, len(req.GetMultiaddrs())) + for _, addrStr := range req.GetMultiaddrs() { + maddr, err := ma.NewMultiaddr(addrStr) + if err != nil { + s.logger.Debug( + "invalid multiaddr in connectivity request", + zap.String("peer_id", peerID.String()), + zap.String("multiaddr", addrStr), + zap.Error(err), + ) + continue + } + + // Extract the port from the multiaddr but use the actual IP from the + // connection + port, err := maddr.ValueForProtocol(ma.P_TCP) + if err != nil { + // If it's not TCP, try UDP + port, err = maddr.ValueForProtocol(ma.P_UDP) + if err != nil { + continue + } + // Build UDP multiaddr with actual IP + newAddr, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/udp/%s", host, port)) + if err != nil { + continue + } + addrs = append(addrs, newAddr) + continue + } + + // Build TCP multiaddr with actual IP + newAddr, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%s", host, port)) + if err != nil { + continue + } + addrs = append(addrs, newAddr) + } + + if len(addrs) == 0 { + resp.ErrorMessage = "no valid multiaddrs to test" + return resp, nil + } + + s.logger.Debug( + "attempting to connect to peer", + zap.String("peer_id", peerID.String()), + zap.Any("addrs", addrs), + ) + + s.host.Peerstore().AddAddrs(peerID, addrs, peerstore.TempAddrTTL) + + connectCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + err = s.host.Connect(connectCtx, peer.AddrInfo{ + ID: peerID, + Addrs: addrs, + }) + if err != nil { + resp.ErrorMessage = err.Error() + return resp, nil + } + + defer s.host.Network().ClosePeer(peerID) + + pingCtx, cancelPing := context.WithTimeout(ctx, 10*time.Second) + defer cancelPing() + + select { + case <-pingCtx.Done(): + resp.ErrorMessage = pingCtx.Err().Error() + return resp, nil + case result := <-s.ping.Ping(pingCtx, peerID): + if result.Error != nil { + resp.ErrorMessage = result.Error.Error() + return resp, nil + } + } + + resp.Success = true + return resp, nil } func initDHT( diff --git a/node/rpc/hypergraph_sync_rpc_server_test.go b/node/rpc/hypergraph_sync_rpc_server_test.go index 1fe35f6..faf3d9d 100644 --- a/node/rpc/hypergraph_sync_rpc_server_test.go +++ b/node/rpc/hypergraph_sync_rpc_server_test.go @@ -5,6 +5,7 @@ import ( "context" "crypto/rand" "crypto/sha512" + "encoding/binary" "fmt" "log" "math/big" @@ -22,6 +23,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap" + "golang.org/x/sync/errgroup" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/test/bufconn" @@ -780,11 +782,27 @@ func TestHypergraphSyncWithConcurrentCommits(t *testing.T) { enc := verenc.NewMPCitHVerifiableEncryptor(1) inclusionProver := bls48581.NewKZGInclusionProver(logger) - dataTree := buildDataTree(t, enc, inclusionProver) + logDuration := func(step string, start time.Time) { + t.Logf("%s took %s", step, time.Since(start)) + } + + start := time.Now() + dataTrees := make([]*tries.VectorCommitmentTree, 10000) + eg := errgroup.Group{} + eg.SetLimit(10000) + for i := 0; i < 10000; i++ { + eg.Go(func() error { + dataTrees[i] = buildDataTree(t, inclusionProver) + return nil + }) + } + eg.Wait() + logDuration("generated data trees", start) serverPath := filepath.Join(t.TempDir(), "server") clientBase := filepath.Join(t.TempDir(), "clients") + setupStart := time.Now() serverDB := store.NewPebbleDB(logger, &config.DBConfig{Path: serverPath}, 0) defer serverDB.Close() @@ -795,12 +813,14 @@ func TestHypergraphSyncWithConcurrentCommits(t *testing.T) { enc, inclusionProver, ) + logDuration("server DB/store initialization", setupStart) - const clientCount = 100 + const clientCount = 8 clientDBs := make([]*store.PebbleDB, clientCount) clientStores := make([]*store.PebbleHypergraphStore, clientCount) clientHGs := make([]*hgcrdt.HypergraphCRDT, clientCount) + serverHypergraphStart := time.Now() serverHG := hgcrdt.NewHypergraph( logger.With(zap.String("side", "server")), serverStore, @@ -809,6 +829,9 @@ func TestHypergraphSyncWithConcurrentCommits(t *testing.T) { &tests.Nopthenticator{}, 200, ) + logDuration("server hypergraph initialization", serverHypergraphStart) + + clientSetupStart := time.Now() for i := 0; i < clientCount; i++ { clientPath := filepath.Join(clientBase, fmt.Sprintf("client-%d", i)) clientDBs[i] = store.NewPebbleDB(logger, &config.DBConfig{Path: clientPath}, 0) @@ -828,6 +851,7 @@ func TestHypergraphSyncWithConcurrentCommits(t *testing.T) { 200, ) } + logDuration("client hypergraph initialization", clientSetupStart) defer func() { for _, db := range clientDBs { if db != nil { @@ -841,24 +865,28 @@ func TestHypergraphSyncWithConcurrentCommits(t *testing.T) { initialVertex := hgcrdt.NewVertex( domain, randomBytes32(t), - dataTree.Commit(inclusionProver, false), - dataTree.GetSize(), + dataTrees[0].Commit(inclusionProver, false), + dataTrees[0].GetSize(), ) + seedStart := time.Now() addVertices( t, serverStore, serverHG, - dataTree, + dataTrees[:1], initialVertex, ) + logDuration("seed server baseline vertex", seedStart) for i := 0; i < clientCount; i++ { + start := time.Now() addVertices( t, clientStores[i], clientHGs[i], - dataTree, + dataTrees[:1], initialVertex, ) + logDuration(fmt.Sprintf("seed client-%d baseline vertex", i), start) } shardKey := application.GetShardKey(initialVertex) @@ -915,16 +943,21 @@ func TestHypergraphSyncWithConcurrentCommits(t *testing.T) { const rounds = 3 for round := 0; round < rounds; round++ { - c, _ := serverHG.Commit(uint64(round)) + currentRound := round + roundStart := time.Now() + c, _ := serverHG.Commit(uint64(currentRound)) fmt.Printf("svr commitment: %x\n", c[shardKey][0]) + genStart := time.Now() updates := generateVertices( t, domain, - dataTree, + dataTrees, inclusionProver, - 5, + 15, + 1+(15*currentRound), ) + logDuration(fmt.Sprintf("round %d vertex generation", currentRound), genStart) var syncWG sync.WaitGroup var serverWG sync.WaitGroup @@ -933,8 +966,9 @@ func TestHypergraphSyncWithConcurrentCommits(t *testing.T) { serverWG.Add(1) for clientIdx := 0; clientIdx < clientCount; clientIdx++ { - go func(idx int) { + go func(idx int, round int) { defer syncWG.Done() + clientSyncStart := time.Now() clientHG := clientHGs[idx] conn, client := dialClient() streamCtx, cancelStream := context.WithTimeout( @@ -954,13 +988,20 @@ func TestHypergraphSyncWithConcurrentCommits(t *testing.T) { c, _ := clientHGs[idx].Commit(uint64(round)) fmt.Printf("cli commitment: %x\n", c[shardKey][0]) - }(clientIdx) + logDuration(fmt.Sprintf("round %d client-%d sync", round, idx), clientSyncStart) + }(clientIdx, currentRound) } go func(round int) { defer serverWG.Done() + serverRoundStart := time.Now() logger.Info("server applying concurrent updates", zap.Int("round", round)) - addVertices(t, serverStore, serverHG, dataTree, updates...) + addVertices(t, serverStore, serverHG, dataTrees[1+(15*round):1+(15*(round+1))], updates...) + logger.Info( + "server applied concurrent updates", + zap.Int("round", round), + zap.Duration("duration", time.Since(serverRoundStart)), + ) logger.Info("server commit starting", zap.Int("round", round)) _, err := serverHG.Commit(uint64(round + 1)) require.NoError(t, err) @@ -969,23 +1010,29 @@ func TestHypergraphSyncWithConcurrentCommits(t *testing.T) { syncWG.Wait() serverWG.Wait() + logDuration(fmt.Sprintf("round %d total sync", currentRound), roundStart) } // Add additional server-only updates after the concurrent sync rounds. - extraUpdates := generateVertices(t, domain, dataTree, inclusionProver, 3) - addVertices(t, serverStore, serverHG, dataTree, extraUpdates...) + extraStart := time.Now() + extraUpdates := generateVertices(t, domain, dataTrees, inclusionProver, len(dataTrees)-(1+(15*rounds))-1, 1+(15*rounds)) + addVertices(t, serverStore, serverHG, dataTrees[1+(15*rounds):], extraUpdates...) + logDuration("server extra updates application", extraStart) + commitStart := time.Now() _, err := serverHG.Commit(100) require.NoError(t, err) _, err = serverHG.Commit(101) require.NoError(t, err) + logDuration("server final commits", commitStart) wg := sync.WaitGroup{} - wg.Add(100) + wg.Add(1) serverRoot := serverHG.GetVertexAddsSet(shardKey).GetTree().Commit(false) - for i := 0; i < len(clientHGs); i++ { + for i := 0; i < 1; i++ { go func(idx int) { defer wg.Done() + catchUpStart := time.Now() _, err = clientHGs[idx].Commit(100) require.NoError(t, err) @@ -1011,6 +1058,7 @@ func TestHypergraphSyncWithConcurrentCommits(t *testing.T) { require.NoError(t, err) clientRoot := clientHGs[idx].GetVertexAddsSet(shardKey).GetTree().Commit(false) assert.Equal(t, serverRoot, clientRoot, "client should converge to server state") + logDuration(fmt.Sprintf("client-%d final catch-up", idx), catchUpStart) }(i) } wg.Wait() @@ -1018,19 +1066,16 @@ func TestHypergraphSyncWithConcurrentCommits(t *testing.T) { func buildDataTree( t *testing.T, - enc *verenc.MPCitHVerifiableEncryptor, prover *bls48581.KZGInclusionProver, ) *crypto.VectorCommitmentTree { t.Helper() - pub, _, _ := ed448.GenerateKey(rand.Reader) - data := enc.Encrypt(make([]byte, 20), pub) - verenc1 := data[0].Compress() tree := &crypto.VectorCommitmentTree{} - for _, encrypted := range []application.Encrypted{verenc1} { - bytes := encrypted.ToBytes() + b := make([]byte, 20000) + rand.Read(b) + for bytes := range slices.Chunk(b, 64) { id := sha512.Sum512(bytes) - tree.Insert(id[:], bytes, encrypted.GetStatement(), big.NewInt(int64(len(bytes)))) + tree.Insert(id[:], bytes, nil, big.NewInt(int64(len(bytes)))) } tree.Commit(prover, false) return tree @@ -1040,16 +1085,16 @@ func addVertices( t *testing.T, hStore *store.PebbleHypergraphStore, hg *hgcrdt.HypergraphCRDT, - dataTree *crypto.VectorCommitmentTree, + dataTrees []*crypto.VectorCommitmentTree, vertices ...application.Vertex, ) { t.Helper() txn, err := hStore.NewTransaction(false) require.NoError(t, err) - for _, v := range vertices { + for i, v := range vertices { id := v.GetID() - require.NoError(t, hStore.SaveVertexTree(txn, id[:], dataTree)) + require.NoError(t, hStore.SaveVertexTree(txn, id[:], dataTrees[i])) require.NoError(t, hg.AddVertex(txn, v)) } require.NoError(t, txn.Commit()) @@ -1058,20 +1103,22 @@ func addVertices( func generateVertices( t *testing.T, appAddress [32]byte, - dataTree *crypto.VectorCommitmentTree, + dataTrees []*crypto.VectorCommitmentTree, prover *bls48581.KZGInclusionProver, count int, + startingIndex int, ) []application.Vertex { t.Helper() verts := make([]application.Vertex, count) for i := 0; i < count; i++ { addr := randomBytes32(t) + binary.BigEndian.PutUint64(addr[:], uint64(i)) verts[i] = hgcrdt.NewVertex( appAddress, addr, - dataTree.Commit(prover, false), - dataTree.GetSize(), + dataTrees[startingIndex+i].Commit(prover, false), + dataTrees[startingIndex+i].GetSize(), ) } return verts diff --git a/node/rpc/node_rpc_server.go b/node/rpc/node_rpc_server.go index fd8c6cc..ac20001 100644 --- a/node/rpc/node_rpc_server.go +++ b/node/rpc/node_rpc_server.go @@ -220,6 +220,7 @@ func (r *RPCServer) GetNodeInfo( RunningWorkers: uint32(len(workers)), AllocatedWorkers: allocated, PatchNumber: append([]byte{}, config.GetPatchNumber()), + Reachable: r.pubSub.Reachability().Value, }, nil } diff --git a/node/store/hypergraph.go b/node/store/hypergraph.go index 8a0fa0a..5ef10f4 100644 --- a/node/store/hypergraph.go +++ b/node/store/hypergraph.go @@ -405,16 +405,27 @@ func (p *PebbleHypergraphStore) NewTransaction(indexed bool) ( return p.db.NewBatch(indexed), nil } -func (p *PebbleHypergraphStore) LoadVertexTree(id []byte) ( - *tries.VectorCommitmentTree, - error, -) { +func (p *PebbleHypergraphStore) LoadVertexTreeRaw(id []byte) ([]byte, error) { vertexData, closer, err := p.db.Get(hypergraphVertexDataKey(id)) if err != nil { return nil, errors.Wrap(err, "load vertex data") } defer closer.Close() + data := make([]byte, len(vertexData)) + copy(data, vertexData) + return data, nil +} + +func (p *PebbleHypergraphStore) LoadVertexTree(id []byte) ( + *tries.VectorCommitmentTree, + error, +) { + vertexData, err := p.LoadVertexTreeRaw(id) + if err != nil { + return nil, err + } + tree, err := tries.DeserializeNonLazyTree(vertexData) if err != nil { return nil, errors.Wrap(err, "load vertex data") @@ -446,6 +457,27 @@ func (p *PebbleHypergraphStore) SaveVertexTree( ) } +func (p *PebbleHypergraphStore) SaveVertexTreeRaw( + txn tries.TreeBackingStoreTransaction, + id []byte, + data []byte, +) error { + if txn == nil { + return errors.Wrap( + errors.New("requires transaction"), + "save vertex tree raw", + ) + } + + buf := make([]byte, len(data)) + copy(buf, data) + + return errors.Wrap( + txn.Set(hypergraphVertexDataKey(id), buf), + "save vertex tree raw", + ) +} + func (p *PebbleHypergraphStore) SetCoveredPrefix(coveredPrefix []int) error { buf := bytes.NewBuffer(nil) prefix := []int64{} @@ -1601,3 +1633,207 @@ func (p *PebbleHypergraphStore) ApplySnapshot( ) return nil } + +// PebbleRawLeafIterator implements tries.RawLeafIterator for direct DB iteration. +type PebbleRawLeafIterator struct { + iter store.Iterator + db *PebbleHypergraphStore + shardKey tries.ShardKey + setType string +} + +var _ tries.RawLeafIterator = (*PebbleRawLeafIterator)(nil) + +func (p *PebbleRawLeafIterator) First() bool { + return p.iter.First() +} + +func (p *PebbleRawLeafIterator) Next() bool { + return p.iter.Next() +} + +func (p *PebbleRawLeafIterator) Valid() bool { + return p.iter.Valid() +} + +func (p *PebbleRawLeafIterator) Close() error { + return p.iter.Close() +} + +func (p *PebbleRawLeafIterator) Leaf() (*tries.RawLeafData, error) { + if !p.iter.Valid() { + return nil, errors.New("iterator not valid") + } + + nodeData := p.iter.Value() + if len(nodeData) == 0 { + return nil, errors.New("empty node data") + } + + // Only process leaf nodes (type byte == TypeLeaf) + if nodeData[0] != tries.TypeLeaf { + return nil, errors.New("not a leaf node") + } + + leaf, err := tries.DeserializeLeafNode(p.db, bytes.NewReader(nodeData[1:])) + if err != nil { + return nil, errors.Wrap(err, "deserialize leaf") + } + + result := &tries.RawLeafData{ + Key: slices.Clone(leaf.Key), + Value: slices.Clone(leaf.Value), + HashTarget: slices.Clone(leaf.HashTarget), + Commitment: slices.Clone(leaf.Commitment), + } + + if leaf.Size != nil { + result.Size = leaf.Size.FillBytes(make([]byte, 32)) + } + + // Load underlying vertex tree data if this is a vertex adds set + if p.setType == string(hypergraph.VertexAtomType) { + data, err := p.db.LoadVertexTreeRaw(leaf.Key) + if err == nil && len(data) > 0 { + result.UnderlyingData = data + } + } + + return result, nil +} + +// IterateRawLeaves returns an iterator over all leaf nodes for a given shard. +// This iterates directly over the database tree node storage, bypassing any +// in-memory tree caching. +func (p *PebbleHypergraphStore) IterateRawLeaves( + setType string, + phaseType string, + shardKey tries.ShardKey, +) (tries.RawLeafIterator, error) { + // Determine the key function based on set and phase type + var keyPrefix byte + switch hypergraph.AtomType(setType) { + case hypergraph.VertexAtomType: + switch hypergraph.PhaseType(phaseType) { + case hypergraph.AddsPhaseType: + keyPrefix = VERTEX_ADDS_TREE_NODE + case hypergraph.RemovesPhaseType: + keyPrefix = VERTEX_REMOVES_TREE_NODE + default: + return nil, errors.New("unknown phase type") + } + case hypergraph.HyperedgeAtomType: + switch hypergraph.PhaseType(phaseType) { + case hypergraph.AddsPhaseType: + keyPrefix = HYPEREDGE_ADDS_TREE_NODE + case hypergraph.RemovesPhaseType: + keyPrefix = HYPEREDGE_REMOVES_TREE_NODE + default: + return nil, errors.New("unknown phase type") + } + default: + return nil, errors.New("unknown set type") + } + + // Build the key range for this shard's tree nodes + startKey := []byte{HYPERGRAPH_SHARD, keyPrefix} + startKey = append(startKey, shardKey.L1[:]...) + startKey = append(startKey, shardKey.L2[:]...) + + // End key is the next shard (increment L2 by 1 for upper bound) + endKey := []byte{HYPERGRAPH_SHARD, keyPrefix} + endKey = append(endKey, shardKey.L1[:]...) + // Use L2 + 1 as upper bound, handling overflow + l2End := new(big.Int).SetBytes(shardKey.L2[:]) + l2End.Add(l2End, big.NewInt(1)) + + // Check if L2 overflowed (would need more than 32 bytes) + if l2End.BitLen() > 256 { + // L2 overflow: increment L1 and set L2 to zero + l1End := [3]byte{shardKey.L1[0], shardKey.L1[1], shardKey.L1[2]} + carry := byte(1) + for i := 2; i >= 0; i-- { + sum := uint16(l1End[i]) + uint16(carry) + l1End[i] = byte(sum) + carry = byte(sum >> 8) + } + // If L1 also overflowed (carry is still 1), use max key + if carry == 1 { + // Both L1 and L2 at max - use prefix+1 as end key (next key prefix) + endKey = []byte{HYPERGRAPH_SHARD, keyPrefix + 1} + } else { + endKey = append(endKey[:2], l1End[:]...) + endKey = append(endKey, make([]byte, 32)...) // L2 = all zeros + } + } else { + l2EndBytes := l2End.FillBytes(make([]byte, 32)) + endKey = append(endKey, l2EndBytes...) + } + + iter, err := p.db.NewIter(startKey, endKey) + if err != nil { + return nil, errors.Wrap(err, "create raw leaf iterator") + } + + return &PebbleRawLeafIterator{ + iter: iter, + db: p, + shardKey: shardKey, + setType: setType, + }, nil +} + +// InsertRawLeaf inserts a leaf node directly into the database without tree +// traversal. This is used during raw sync to efficiently insert many leaves +// without the overhead of maintaining tree structure. +func (p *PebbleHypergraphStore) InsertRawLeaf( + txn tries.TreeBackingStoreTransaction, + setType string, + phaseType string, + shardKey tries.ShardKey, + leaf *tries.RawLeafData, +) error { + if leaf == nil || len(leaf.Key) == 0 { + return errors.New("invalid leaf data") + } + + // Reconstruct the leaf node + leafNode := &tries.LazyVectorCommitmentLeafNode{ + Key: leaf.Key, + Value: leaf.Value, + HashTarget: leaf.HashTarget, + Commitment: leaf.Commitment, + Store: p, + } + + if len(leaf.Size) > 0 { + leafNode.Size = new(big.Int).SetBytes(leaf.Size) + } else { + leafNode.Size = big.NewInt(0) + } + + // Get the full path for this leaf key + path := tries.GetFullPath(leaf.Key) + + // Insert the node directly + if err := p.InsertNode( + txn, + setType, + phaseType, + shardKey, + leaf.Key, + path, + leafNode, + ); err != nil { + return errors.Wrap(err, "insert raw leaf") + } + + // If there's underlying vertex tree data, save it too + if len(leaf.UnderlyingData) > 0 { + if err := p.SaveVertexTreeRaw(txn, leaf.Key, leaf.UnderlyingData); err != nil { + return errors.Wrap(err, "insert raw leaf: save vertex tree") + } + } + + return nil +} diff --git a/node/store/pebble.go b/node/store/pebble.go index 27adc75..362c90c 100644 --- a/node/store/pebble.go +++ b/node/store/pebble.go @@ -42,6 +42,9 @@ var pebbleMigrations = []func(*pebble.Batch) error{ migration_2_1_0_146, migration_2_1_0_147, migration_2_1_0_148, + migration_2_1_0_149, + migration_2_1_0_1410, + migration_2_1_0_1411, } func NewPebbleDB( @@ -520,6 +523,18 @@ func migration_2_1_0_148(b *pebble.Batch) error { return migration_2_1_0_14(b) } +func migration_2_1_0_149(b *pebble.Batch) error { + return nil +} + +func migration_2_1_0_1410(b *pebble.Batch) error { + return migration_2_1_0_149(b) +} + +func migration_2_1_0_1411(b *pebble.Batch) error { + return migration_2_1_0_149(b) +} + type pebbleSnapshotDB struct { snap *pebble.Snapshot } diff --git a/node/tests/simnet.go b/node/tests/simnet.go index 5bfa4ae..b8ab2fe 100644 --- a/node/tests/simnet.go +++ b/node/tests/simnet.go @@ -8,7 +8,6 @@ import ( "github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/p2p/net/swarm" simlibp2p "github.com/libp2p/go-libp2p/x/simlibp2p" "github.com/marcopolo/simnet" "github.com/stretchr/testify/require" @@ -42,22 +41,7 @@ func GenerateSimnetHosts(t *testing.T, count int, opts []libp2p.Option) ( simlibp2p.NetworkSettings{ OptsForHostIdx: func(idx int) []libp2p.Option { return []libp2p.Option{ - libp2p.SwarmOpts( - swarm.WithUDPBlackHoleSuccessCounter( - &swarm.BlackHoleSuccessCounter{ - N: 8000, - MinSuccesses: 1, - Name: "permissive-udp", - }, - ), - swarm.WithIPv6BlackHoleSuccessCounter( - &swarm.BlackHoleSuccessCounter{ - N: 8000, - MinSuccesses: 1, - Name: "permissive-ip6", - }, - ), - ), + libp2p.SwarmOpts(), } }, }, diff --git a/protobufs/node.pb.go b/protobufs/node.pb.go index 8061a27..6d0ba4d 100644 --- a/protobufs/node.pb.go +++ b/protobufs/node.pb.go @@ -392,6 +392,7 @@ type NodeInfoResponse struct { PatchNumber []byte `protobuf:"bytes,7,opt,name=patch_number,json=patchNumber,proto3" json:"patch_number,omitempty"` LastReceivedFrame uint64 `protobuf:"varint,8,opt,name=last_received_frame,json=lastReceivedFrame,proto3" json:"last_received_frame,omitempty"` LastGlobalHeadFrame uint64 `protobuf:"varint,9,opt,name=last_global_head_frame,json=lastGlobalHeadFrame,proto3" json:"last_global_head_frame,omitempty"` + Reachable bool `protobuf:"varint,10,opt,name=reachable,proto3" json:"reachable,omitempty"` } func (x *NodeInfoResponse) Reset() { @@ -489,6 +490,13 @@ func (x *NodeInfoResponse) GetLastGlobalHeadFrame() uint64 { return 0 } +func (x *NodeInfoResponse) GetReachable() bool { + if x != nil { + return x.Reachable + } + return false +} + type WorkerInfo struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1739,6 +1747,116 @@ func (x *GetTokensByAccountResponse) GetPendingTransactions() []*MaterializedPen return nil } +type ConnectivityTestRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PeerId []byte `protobuf:"bytes,1,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"` + Multiaddrs []string `protobuf:"bytes,2,rep,name=multiaddrs,proto3" json:"multiaddrs,omitempty"` +} + +func (x *ConnectivityTestRequest) Reset() { + *x = ConnectivityTestRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_node_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConnectivityTestRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectivityTestRequest) ProtoMessage() {} + +func (x *ConnectivityTestRequest) ProtoReflect() protoreflect.Message { + mi := &file_node_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnectivityTestRequest.ProtoReflect.Descriptor instead. +func (*ConnectivityTestRequest) Descriptor() ([]byte, []int) { + return file_node_proto_rawDescGZIP(), []int{25} +} + +func (x *ConnectivityTestRequest) GetPeerId() []byte { + if x != nil { + return x.PeerId + } + return nil +} + +func (x *ConnectivityTestRequest) GetMultiaddrs() []string { + if x != nil { + return x.Multiaddrs + } + return nil +} + +type ConnectivityTestResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ConnectivityTestResponse) Reset() { + *x = ConnectivityTestResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_node_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConnectivityTestResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectivityTestResponse) ProtoMessage() {} + +func (x *ConnectivityTestResponse) ProtoReflect() protoreflect.Message { + mi := &file_node_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnectivityTestResponse.ProtoReflect.Descriptor instead. +func (*ConnectivityTestResponse) Descriptor() ([]byte, []int) { + return file_node_proto_rawDescGZIP(), []int{26} +} + +func (x *ConnectivityTestResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *ConnectivityTestResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + type RespawnRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1750,7 +1868,7 @@ type RespawnRequest struct { func (x *RespawnRequest) Reset() { *x = RespawnRequest{} if protoimpl.UnsafeEnabled { - mi := &file_node_proto_msgTypes[25] + mi := &file_node_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1763,7 +1881,7 @@ func (x *RespawnRequest) String() string { func (*RespawnRequest) ProtoMessage() {} func (x *RespawnRequest) ProtoReflect() protoreflect.Message { - mi := &file_node_proto_msgTypes[25] + mi := &file_node_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1776,7 +1894,7 @@ func (x *RespawnRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RespawnRequest.ProtoReflect.Descriptor instead. func (*RespawnRequest) Descriptor() ([]byte, []int) { - return file_node_proto_rawDescGZIP(), []int{25} + return file_node_proto_rawDescGZIP(), []int{27} } func (x *RespawnRequest) GetFilter() []byte { @@ -1795,7 +1913,7 @@ type RespawnResponse struct { func (x *RespawnResponse) Reset() { *x = RespawnResponse{} if protoimpl.UnsafeEnabled { - mi := &file_node_proto_msgTypes[26] + mi := &file_node_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1808,7 +1926,7 @@ func (x *RespawnResponse) String() string { func (*RespawnResponse) ProtoMessage() {} func (x *RespawnResponse) ProtoReflect() protoreflect.Message { - mi := &file_node_proto_msgTypes[26] + mi := &file_node_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1821,7 +1939,7 @@ func (x *RespawnResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RespawnResponse.ProtoReflect.Descriptor instead. func (*RespawnResponse) Descriptor() ([]byte, []int) { - return file_node_proto_rawDescGZIP(), []int{26} + return file_node_proto_rawDescGZIP(), []int{28} } type CreateJoinProofRequest struct { @@ -1838,7 +1956,7 @@ type CreateJoinProofRequest struct { func (x *CreateJoinProofRequest) Reset() { *x = CreateJoinProofRequest{} if protoimpl.UnsafeEnabled { - mi := &file_node_proto_msgTypes[27] + mi := &file_node_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1851,7 +1969,7 @@ func (x *CreateJoinProofRequest) String() string { func (*CreateJoinProofRequest) ProtoMessage() {} func (x *CreateJoinProofRequest) ProtoReflect() protoreflect.Message { - mi := &file_node_proto_msgTypes[27] + mi := &file_node_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1864,7 +1982,7 @@ func (x *CreateJoinProofRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateJoinProofRequest.ProtoReflect.Descriptor instead. func (*CreateJoinProofRequest) Descriptor() ([]byte, []int) { - return file_node_proto_rawDescGZIP(), []int{27} + return file_node_proto_rawDescGZIP(), []int{29} } func (x *CreateJoinProofRequest) GetChallenge() []byte { @@ -1906,7 +2024,7 @@ type CreateJoinProofResponse struct { func (x *CreateJoinProofResponse) Reset() { *x = CreateJoinProofResponse{} if protoimpl.UnsafeEnabled { - mi := &file_node_proto_msgTypes[28] + mi := &file_node_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1919,7 +2037,7 @@ func (x *CreateJoinProofResponse) String() string { func (*CreateJoinProofResponse) ProtoMessage() {} func (x *CreateJoinProofResponse) ProtoReflect() protoreflect.Message { - mi := &file_node_proto_msgTypes[28] + mi := &file_node_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1932,7 +2050,7 @@ func (x *CreateJoinProofResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateJoinProofResponse.ProtoReflect.Descriptor instead. func (*CreateJoinProofResponse) Descriptor() ([]byte, []int) { - return file_node_proto_rawDescGZIP(), []int{28} + return file_node_proto_rawDescGZIP(), []int{30} } func (x *CreateJoinProofResponse) GetResponse() []byte { @@ -1996,7 +2114,7 @@ var file_node_proto_rawDesc = []byte{ 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, - 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xe9, 0x02, 0x0a, 0x10, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x87, 0x03, 0x0a, 0x10, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, @@ -2019,105 +2137,132 @@ var file_node_proto_rawDesc = []byte{ 0x61, 0x73, 0x74, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x48, 0x65, 0x61, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, - 0x22, 0x8f, 0x01, 0x0a, 0x0a, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x17, 0x0a, 0x07, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x06, 0x63, 0x6f, 0x72, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x61, 0x76, 0x61, - 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x23, 0x0a, - 0x0d, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x22, 0x5a, 0x0a, 0x12, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, - 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, - 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x6e, - 0x0a, 0x0a, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x2f, 0x0a, 0x13, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x2f, 0x0a, - 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x61, 0x64, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x2f, - 0x0a, 0x09, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x72, - 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, - 0x41, 0x0a, 0x07, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x04, 0x6b, 0x65, - 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, - 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x04, 0x6b, 0x65, - 0x79, 0x73, 0x22, 0x4f, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x64, 0x65, 0x6c, - 0x69, 0x76, 0x65, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x22, 0x54, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x44, - 0x61, 0x74, 0x61, 0x12, 0x44, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, - 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, - 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, - 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0xdd, 0x01, 0x0a, 0x0b, 0x53, 0x65, - 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x12, 0x42, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x07, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x61, - 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, - 0x0d, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, - 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x44, - 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0c, 0x64, 0x65, 0x6c, - 0x69, 0x76, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x22, 0x5a, 0x0a, 0x0c, 0x53, 0x65, 0x6e, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0d, 0x64, 0x65, 0x6c, - 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x25, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x69, 0x76, - 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0c, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, - 0x79, 0x44, 0x61, 0x74, 0x61, 0x22, 0x30, 0x0a, 0x14, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, - 0x74, 0x65, 0x64, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x66, 0x12, 0x18, 0x0a, - 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, - 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x68, 0x0a, 0x0f, 0x49, 0x6d, 0x70, 0x6c, 0x69, - 0x63, 0x69, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6d, - 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x0c, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x22, 0xce, 0x01, 0x0a, 0x0a, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x66, - 0x12, 0x5e, 0x0a, 0x12, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x71, - 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x65, - 0x64, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x66, 0x48, 0x00, 0x52, 0x11, 0x6f, - 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x12, 0x55, 0x0a, 0x10, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x61, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, + 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x8f, + 0x01, 0x0a, 0x0a, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x17, 0x0a, + 0x07, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, + 0x63, 0x6f, 0x72, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2b, + 0x0a, 0x11, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x61, 0x76, 0x61, 0x69, 0x6c, + 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x22, 0x5a, 0x0a, 0x12, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, + 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x71, 0x75, + 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x6e, 0x0a, 0x0a, + 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x2f, 0x0a, 0x13, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x13, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x2f, 0x0a, 0x09, + 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65, 0x66, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x41, 0x0a, + 0x07, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, + 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, + 0x2e, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, + 0x22, 0x4f, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x64, 0x65, 0x6c, 0x69, 0x76, + 0x65, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x22, 0x54, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x44, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, + 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0xdd, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, + 0x42, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x61, 0x75, 0x74, + 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0d, 0x64, + 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, + 0x69, 0x76, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0c, 0x64, 0x65, 0x6c, 0x69, 0x76, + 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x22, 0x5a, 0x0a, 0x0c, 0x53, 0x65, 0x6e, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0d, 0x64, 0x65, 0x6c, 0x69, 0x76, + 0x65, 0x72, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, + 0x79, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0c, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x44, + 0x61, 0x74, 0x61, 0x22, 0x30, 0x0a, 0x14, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x65, + 0x64, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x68, 0x0a, 0x0f, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, + 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6d, 0x70, 0x6c, + 0x69, 0x63, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0c, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, + 0xce, 0x01, 0x0a, 0x0a, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x66, 0x12, 0x5e, + 0x0a, 0x12, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x41, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0f, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x61, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x22, 0x7d, 0x0a, 0x04, 0x43, 0x6f, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, - 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, - 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x66, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, - 0x72, 0x22, 0x7c, 0x0a, 0x0a, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x43, 0x6f, 0x69, 0x6e, 0x12, - 0x31, 0x0a, 0x04, 0x63, 0x6f, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, - 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x69, 0x6e, 0x52, 0x04, 0x63, 0x6f, - 0x69, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, - 0x88, 0x03, 0x0a, 0x17, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, + 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x41, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x66, 0x48, 0x00, 0x52, 0x11, 0x6f, 0x72, 0x69, + 0x67, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x55, + 0x0a, 0x10, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, + 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x70, 0x62, 0x2e, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0f, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x41, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x22, 0x7d, 0x0a, 0x04, 0x43, 0x6f, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x66, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x22, + 0x7c, 0x0a, 0x0a, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x43, 0x6f, 0x69, 0x6e, 0x12, 0x31, 0x0a, + 0x04, 0x63, 0x6f, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x71, 0x75, + 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x69, 0x6e, 0x52, 0x04, 0x63, 0x6f, 0x69, 0x6e, + 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x88, 0x03, + 0x0a, 0x17, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x72, 0x61, 0x77, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, + 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, + 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6d, + 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x0c, 0x6f, 0x6e, 0x65, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6f, + 0x6e, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x69, 0x6e, 0x5f, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x69, 0x6e, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x61, 0x73, 0x6b, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x31, 0x0a, 0x14, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x38, + 0x0a, 0x18, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x16, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x8c, 0x06, 0x0a, 0x1e, 0x4d, 0x61, 0x74, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x61, 0x6c, @@ -2126,163 +2271,158 @@ var file_node_proto_rawDesc = []byte{ 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x63, - 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x0c, 0x6f, 0x6e, 0x65, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x0a, 0x6f, 0x6e, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x76, - 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x69, 0x6e, 0x5f, 0x62, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, - 0x69, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x61, 0x73, - 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x31, 0x0a, - 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x61, 0x64, 0x64, + 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0f, 0x74, 0x6f, 0x5f, + 0x6f, 0x6e, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0c, 0x74, 0x6f, 0x4f, 0x6e, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, + 0x12, 0x2d, 0x0a, 0x13, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x6f, 0x6e, 0x65, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x72, + 0x65, 0x66, 0x75, 0x6e, 0x64, 0x4f, 0x6e, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x12, + 0x2e, 0x0a, 0x13, 0x74, 0x6f, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x74, 0x6f, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, + 0x36, 0x0a, 0x17, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x15, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, + 0x69, 0x6e, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x0d, 0x74, 0x6f, 0x43, 0x6f, 0x69, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, + 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x69, 0x6e, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x72, 0x65, + 0x66, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x69, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, + 0x17, 0x0a, 0x07, 0x74, 0x6f, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x06, 0x74, 0x6f, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x66, 0x75, + 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x72, + 0x65, 0x66, 0x75, 0x6e, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x36, 0x0a, 0x17, 0x74, 0x6f, 0x5f, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x15, 0x74, 0x6f, 0x41, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x3d, 0x0a, 0x1b, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4b, 0x65, 0x79, + 0x12, 0x3e, 0x0a, 0x1b, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, + 0x0f, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x19, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, - 0x12, 0x38, 0x0a, 0x18, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, - 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x16, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, - 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x8c, 0x06, 0x0a, 0x1e, 0x4d, - 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x50, 0x65, 0x6e, 0x64, 0x69, - 0x6e, 0x67, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, - 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x61, 0x77, 0x5f, 0x62, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x72, 0x61, - 0x77, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, - 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, - 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x63, - 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0f, 0x74, - 0x6f, 0x5f, 0x6f, 0x6e, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x74, 0x6f, 0x4f, 0x6e, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4b, - 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x13, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x6f, 0x6e, 0x65, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x10, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x4f, 0x6e, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4b, 0x65, - 0x79, 0x12, 0x2e, 0x0a, 0x13, 0x74, 0x6f, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, - 0x74, 0x6f, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, - 0x79, 0x12, 0x36, 0x0a, 0x17, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x15, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x6f, 0x5f, - 0x63, 0x6f, 0x69, 0x6e, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0d, 0x74, 0x6f, 0x43, 0x6f, 0x69, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x69, 0x6e, - 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, - 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x69, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x65, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x6f, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x06, 0x74, 0x6f, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, - 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x0a, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x36, 0x0a, 0x17, 0x74, - 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x15, 0x74, 0x6f, - 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x12, 0x3d, 0x0a, 0x1b, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6b, - 0x65, 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4b, - 0x65, 0x79, 0x12, 0x3e, 0x0a, 0x1b, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x19, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x41, - 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, - 0x63, 0x65, 0x12, 0x45, 0x0a, 0x1f, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x72, 0x65, 0x66, - 0x75, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x78, 0x70, - 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x65, - 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4d, 0x0a, 0x19, 0x47, 0x65, 0x74, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0xa6, 0x02, 0x0a, 0x1a, 0x47, 0x65, 0x74, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x0c, 0x6c, 0x65, 0x67, 0x61, 0x63, - 0x79, 0x5f, 0x63, 0x6f, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, - 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x43, 0x6f, - 0x69, 0x6e, 0x52, 0x0b, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x43, 0x6f, 0x69, 0x6e, 0x73, 0x12, - 0x54, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, - 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, - 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, - 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x6a, 0x0a, 0x14, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, - 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x12, 0x45, 0x0a, 0x1f, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x72, 0x65, 0x66, 0x75, 0x6e, + 0x64, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x65, 0x78, 0x70, + 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4d, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, + 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, + 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0xa6, 0x02, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x0c, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, + 0x63, 0x6f, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x71, 0x75, + 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x43, 0x6f, 0x69, 0x6e, + 0x52, 0x0b, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x43, 0x6f, 0x69, 0x6e, 0x73, 0x12, 0x54, 0x0a, + 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x61, - 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, - 0x67, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x70, 0x65, - 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0x28, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x11, 0x0a, 0x0f, 0x52, - 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8b, - 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x72, 0x6f, - 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x68, 0x61, - 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x68, - 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, - 0x63, 0x75, 0x6c, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x64, 0x69, 0x66, - 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x03, 0x69, 0x64, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, - 0x76, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x0b, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x35, 0x0a, 0x17, + 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x6a, 0x0a, 0x14, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x37, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x61, 0x74, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x70, 0x65, 0x6e, 0x64, + 0x69, 0x6e, 0x67, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0x52, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, + 0x65, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x65, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, + 0x72, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, + 0x64, 0x72, 0x73, 0x22, 0x59, 0x0a, 0x18, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, + 0x69, 0x74, 0x79, 0x54, 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x28, + 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x11, 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x70, + 0x61, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8b, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x32, 0x9c, 0x04, 0x0a, 0x0b, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x65, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, - 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x29, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x0b, 0x47, 0x65, - 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, - 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, - 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, - 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x6b, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x2d, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, - 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x57, 0x6f, 0x72, 0x6b, - 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, - 0x0a, 0x04, 0x53, 0x65, 0x6e, 0x64, 0x12, 0x24, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, - 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, - 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x71, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, + 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x68, 0x61, 0x6c, 0x6c, + 0x65, 0x6e, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, + 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, + 0x75, 0x6c, 0x74, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x03, 0x69, 0x64, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, + 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x70, 0x72, + 0x6f, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x35, 0x0a, 0x17, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x32, 0x9c, 0x04, 0x0a, 0x0b, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x65, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, + 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x7d, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, - 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x32, 0x2e, 0x71, 0x75, 0x69, 0x6c, - 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, - 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x32, 0xe4, 0x01, 0x0a, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x49, 0x50, 0x43, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5c, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, - 0x12, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x61, - 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, - 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x74, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, - 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, + 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4e, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, + 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4e, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6b, + 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x2d, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, + 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x04, 0x53, + 0x65, 0x6e, 0x64, 0x12, 0x24, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, + 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x71, 0x75, 0x69, 0x6c, + 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x7d, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x32, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, + 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, + 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x71, 0x75, 0x69, + 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, + 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, + 0x8e, 0x01, 0x0a, 0x13, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x77, 0x0a, 0x10, 0x54, 0x65, 0x73, 0x74, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x12, 0x30, 0x2e, 0x71, 0x75, + 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, + 0x74, 0x79, 0x54, 0x65, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, + 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x69, 0x74, 0x79, 0x54, 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x32, 0xe4, 0x01, 0x0a, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x49, 0x50, 0x43, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x5c, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x12, 0x27, + 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, - 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x6f, - 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3a, 0x5a, 0x38, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f, - 0x6e, 0x6f, 0x72, 0x65, 0x70, 0x6f, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x74, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, + 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, + 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3a, 0x5a, 0x38, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f, 0x6e, 0x6f, + 0x72, 0x65, 0x70, 0x6f, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2297,7 +2437,7 @@ func file_node_proto_rawDescGZIP() []byte { return file_node_proto_rawDescData } -var file_node_proto_msgTypes = make([]protoimpl.MessageInfo, 29) +var file_node_proto_msgTypes = make([]protoimpl.MessageInfo, 31) var file_node_proto_goTypes = []interface{}{ (*GetPeerInfoRequest)(nil), // 0: quilibrium.node.node.pb.GetPeerInfoRequest (*GetNodeInfoRequest)(nil), // 1: quilibrium.node.node.pb.GetNodeInfoRequest @@ -2324,12 +2464,14 @@ var file_node_proto_goTypes = []interface{}{ (*MaterializedPendingTransaction)(nil), // 22: quilibrium.node.node.pb.MaterializedPendingTransaction (*GetTokensByAccountRequest)(nil), // 23: quilibrium.node.node.pb.GetTokensByAccountRequest (*GetTokensByAccountResponse)(nil), // 24: quilibrium.node.node.pb.GetTokensByAccountResponse - (*RespawnRequest)(nil), // 25: quilibrium.node.node.pb.RespawnRequest - (*RespawnResponse)(nil), // 26: quilibrium.node.node.pb.RespawnResponse - (*CreateJoinProofRequest)(nil), // 27: quilibrium.node.node.pb.CreateJoinProofRequest - (*CreateJoinProofResponse)(nil), // 28: quilibrium.node.node.pb.CreateJoinProofResponse - (*InboxMessage)(nil), // 29: quilibrium.node.channel.pb.InboxMessage - (*MessageBundle)(nil), // 30: quilibrium.node.global.pb.MessageBundle + (*ConnectivityTestRequest)(nil), // 25: quilibrium.node.node.pb.ConnectivityTestRequest + (*ConnectivityTestResponse)(nil), // 26: quilibrium.node.node.pb.ConnectivityTestResponse + (*RespawnRequest)(nil), // 27: quilibrium.node.node.pb.RespawnRequest + (*RespawnResponse)(nil), // 28: quilibrium.node.node.pb.RespawnResponse + (*CreateJoinProofRequest)(nil), // 29: quilibrium.node.node.pb.CreateJoinProofRequest + (*CreateJoinProofResponse)(nil), // 30: quilibrium.node.node.pb.CreateJoinProofResponse + (*InboxMessage)(nil), // 31: quilibrium.node.channel.pb.InboxMessage + (*MessageBundle)(nil), // 32: quilibrium.node.global.pb.MessageBundle } var file_node_proto_depIdxs = []int32{ 3, // 0: quilibrium.node.node.pb.PeerInfo.reachability:type_name -> quilibrium.node.node.pb.Reachability @@ -2337,8 +2479,8 @@ var file_node_proto_depIdxs = []int32{ 4, // 2: quilibrium.node.node.pb.PeerInfoResponse.peer_info:type_name -> quilibrium.node.node.pb.PeerInfo 7, // 3: quilibrium.node.node.pb.WorkerInfoResponse.worker_info:type_name -> quilibrium.node.node.pb.WorkerInfo 10, // 4: quilibrium.node.node.pb.KeyRing.keys:type_name -> quilibrium.node.node.pb.InlineKey - 29, // 5: quilibrium.node.node.pb.DeliveryData.messages:type_name -> quilibrium.node.channel.pb.InboxMessage - 30, // 6: quilibrium.node.node.pb.SendRequest.request:type_name -> quilibrium.node.global.pb.MessageBundle + 31, // 5: quilibrium.node.node.pb.DeliveryData.messages:type_name -> quilibrium.node.channel.pb.InboxMessage + 32, // 6: quilibrium.node.node.pb.SendRequest.request:type_name -> quilibrium.node.global.pb.MessageBundle 13, // 7: quilibrium.node.node.pb.SendRequest.delivery_data:type_name -> quilibrium.node.node.pb.DeliveryData 13, // 8: quilibrium.node.node.pb.SendResponse.delivery_data:type_name -> quilibrium.node.node.pb.DeliveryData 16, // 9: quilibrium.node.node.pb.AccountRef.originated_account:type_name -> quilibrium.node.node.pb.OriginatedAccountRef @@ -2353,17 +2495,19 @@ var file_node_proto_depIdxs = []int32{ 2, // 18: quilibrium.node.node.pb.NodeService.GetWorkerInfo:input_type -> quilibrium.node.node.pb.GetWorkerInfoRequest 14, // 19: quilibrium.node.node.pb.NodeService.Send:input_type -> quilibrium.node.node.pb.SendRequest 23, // 20: quilibrium.node.node.pb.NodeService.GetTokensByAccount:input_type -> quilibrium.node.node.pb.GetTokensByAccountRequest - 25, // 21: quilibrium.node.node.pb.DataIPCService.Respawn:input_type -> quilibrium.node.node.pb.RespawnRequest - 27, // 22: quilibrium.node.node.pb.DataIPCService.CreateJoinProof:input_type -> quilibrium.node.node.pb.CreateJoinProofRequest - 5, // 23: quilibrium.node.node.pb.NodeService.GetPeerInfo:output_type -> quilibrium.node.node.pb.PeerInfoResponse - 6, // 24: quilibrium.node.node.pb.NodeService.GetNodeInfo:output_type -> quilibrium.node.node.pb.NodeInfoResponse - 8, // 25: quilibrium.node.node.pb.NodeService.GetWorkerInfo:output_type -> quilibrium.node.node.pb.WorkerInfoResponse - 15, // 26: quilibrium.node.node.pb.NodeService.Send:output_type -> quilibrium.node.node.pb.SendResponse - 24, // 27: quilibrium.node.node.pb.NodeService.GetTokensByAccount:output_type -> quilibrium.node.node.pb.GetTokensByAccountResponse - 26, // 28: quilibrium.node.node.pb.DataIPCService.Respawn:output_type -> quilibrium.node.node.pb.RespawnResponse - 28, // 29: quilibrium.node.node.pb.DataIPCService.CreateJoinProof:output_type -> quilibrium.node.node.pb.CreateJoinProofResponse - 23, // [23:30] is the sub-list for method output_type - 16, // [16:23] is the sub-list for method input_type + 25, // 21: quilibrium.node.node.pb.ConnectivityService.TestConnectivity:input_type -> quilibrium.node.node.pb.ConnectivityTestRequest + 27, // 22: quilibrium.node.node.pb.DataIPCService.Respawn:input_type -> quilibrium.node.node.pb.RespawnRequest + 29, // 23: quilibrium.node.node.pb.DataIPCService.CreateJoinProof:input_type -> quilibrium.node.node.pb.CreateJoinProofRequest + 5, // 24: quilibrium.node.node.pb.NodeService.GetPeerInfo:output_type -> quilibrium.node.node.pb.PeerInfoResponse + 6, // 25: quilibrium.node.node.pb.NodeService.GetNodeInfo:output_type -> quilibrium.node.node.pb.NodeInfoResponse + 8, // 26: quilibrium.node.node.pb.NodeService.GetWorkerInfo:output_type -> quilibrium.node.node.pb.WorkerInfoResponse + 15, // 27: quilibrium.node.node.pb.NodeService.Send:output_type -> quilibrium.node.node.pb.SendResponse + 24, // 28: quilibrium.node.node.pb.NodeService.GetTokensByAccount:output_type -> quilibrium.node.node.pb.GetTokensByAccountResponse + 26, // 29: quilibrium.node.node.pb.ConnectivityService.TestConnectivity:output_type -> quilibrium.node.node.pb.ConnectivityTestResponse + 28, // 30: quilibrium.node.node.pb.DataIPCService.Respawn:output_type -> quilibrium.node.node.pb.RespawnResponse + 30, // 31: quilibrium.node.node.pb.DataIPCService.CreateJoinProof:output_type -> quilibrium.node.node.pb.CreateJoinProofResponse + 24, // [24:32] is the sub-list for method output_type + 16, // [16:24] is the sub-list for method input_type 16, // [16:16] is the sub-list for extension type_name 16, // [16:16] is the sub-list for extension extendee 0, // [0:16] is the sub-list for field type_name @@ -2678,7 +2822,7 @@ func file_node_proto_init() { } } file_node_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RespawnRequest); i { + switch v := v.(*ConnectivityTestRequest); i { case 0: return &v.state case 1: @@ -2690,7 +2834,7 @@ func file_node_proto_init() { } } file_node_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RespawnResponse); i { + switch v := v.(*ConnectivityTestResponse); i { case 0: return &v.state case 1: @@ -2702,7 +2846,7 @@ func file_node_proto_init() { } } file_node_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateJoinProofRequest); i { + switch v := v.(*RespawnRequest); i { case 0: return &v.state case 1: @@ -2714,6 +2858,30 @@ func file_node_proto_init() { } } file_node_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RespawnResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_node_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateJoinProofRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_node_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CreateJoinProofResponse); i { case 0: return &v.state @@ -2736,9 +2904,9 @@ func file_node_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_node_proto_rawDesc, NumEnums: 0, - NumMessages: 29, + NumMessages: 31, NumExtensions: 0, - NumServices: 2, + NumServices: 3, }, GoTypes: file_node_proto_goTypes, DependencyIndexes: file_node_proto_depIdxs, diff --git a/protobufs/node.pb.gw.go b/protobufs/node.pb.gw.go index a7e24a7..61fecda 100644 --- a/protobufs/node.pb.gw.go +++ b/protobufs/node.pb.gw.go @@ -201,6 +201,40 @@ func local_request_NodeService_GetTokensByAccount_0(ctx context.Context, marshal } +func request_ConnectivityService_TestConnectivity_0(ctx context.Context, marshaler runtime.Marshaler, client ConnectivityServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ConnectivityTestRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.TestConnectivity(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ConnectivityService_TestConnectivity_0(ctx context.Context, marshaler runtime.Marshaler, server ConnectivityServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ConnectivityTestRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.TestConnectivity(ctx, &protoReq) + return msg, metadata, err + +} + func request_DataIPCService_Respawn_0(ctx context.Context, marshaler runtime.Marshaler, client DataIPCServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq RespawnRequest var metadata runtime.ServerMetadata @@ -403,6 +437,40 @@ func RegisterNodeServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux return nil } +// RegisterConnectivityServiceHandlerServer registers the http handlers for service ConnectivityService to "mux". +// UnaryRPC :call ConnectivityServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterConnectivityServiceHandlerFromEndpoint instead. +func RegisterConnectivityServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ConnectivityServiceServer) error { + + mux.Handle("POST", pattern_ConnectivityService_TestConnectivity_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/quilibrium.node.node.pb.ConnectivityService/TestConnectivity", runtime.WithHTTPPathPattern("/quilibrium.node.node.pb.ConnectivityService/TestConnectivity")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ConnectivityService_TestConnectivity_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ConnectivityService_TestConnectivity_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + // RegisterDataIPCServiceHandlerServer registers the http handlers for service DataIPCService to "mux". // UnaryRPC :call DataIPCServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. @@ -637,6 +705,77 @@ var ( forward_NodeService_GetTokensByAccount_0 = runtime.ForwardResponseMessage ) +// RegisterConnectivityServiceHandlerFromEndpoint is same as RegisterConnectivityServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterConnectivityServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.DialContext(ctx, endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterConnectivityServiceHandler(ctx, mux, conn) +} + +// RegisterConnectivityServiceHandler registers the http handlers for service ConnectivityService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterConnectivityServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterConnectivityServiceHandlerClient(ctx, mux, NewConnectivityServiceClient(conn)) +} + +// RegisterConnectivityServiceHandlerClient registers the http handlers for service ConnectivityService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ConnectivityServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ConnectivityServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "ConnectivityServiceClient" to call the correct interceptors. +func RegisterConnectivityServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ConnectivityServiceClient) error { + + mux.Handle("POST", pattern_ConnectivityService_TestConnectivity_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/quilibrium.node.node.pb.ConnectivityService/TestConnectivity", runtime.WithHTTPPathPattern("/quilibrium.node.node.pb.ConnectivityService/TestConnectivity")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ConnectivityService_TestConnectivity_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ConnectivityService_TestConnectivity_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_ConnectivityService_TestConnectivity_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"quilibrium.node.node.pb.ConnectivityService", "TestConnectivity"}, "")) +) + +var ( + forward_ConnectivityService_TestConnectivity_0 = runtime.ForwardResponseMessage +) + // RegisterDataIPCServiceHandlerFromEndpoint is same as RegisterDataIPCServiceHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterDataIPCServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { diff --git a/protobufs/node.proto b/protobufs/node.proto index 1b7bad5..02a7ce3 100644 --- a/protobufs/node.proto +++ b/protobufs/node.proto @@ -61,6 +61,7 @@ message NodeInfoResponse { bytes patch_number = 7; uint64 last_received_frame = 8; uint64 last_global_head_frame = 9; + bool reachable = 10; } message WorkerInfo { @@ -216,6 +217,21 @@ service NodeService { returns (GetTokensByAccountResponse); } +message ConnectivityTestRequest { + bytes peer_id = 1; + repeated string multiaddrs = 2; +} + +message ConnectivityTestResponse { + bool success = 1; + string error_message = 2; +} + +service ConnectivityService { + rpc TestConnectivity(ConnectivityTestRequest) + returns (ConnectivityTestResponse); +} + message RespawnRequest { bytes filter = 1; } diff --git a/protobufs/node_grpc.pb.go b/protobufs/node_grpc.pb.go index fb5dcf7..9718de9 100644 --- a/protobufs/node_grpc.pb.go +++ b/protobufs/node_grpc.pb.go @@ -256,6 +256,96 @@ var NodeService_ServiceDesc = grpc.ServiceDesc{ Metadata: "node.proto", } +const ( + ConnectivityService_TestConnectivity_FullMethodName = "/quilibrium.node.node.pb.ConnectivityService/TestConnectivity" +) + +// ConnectivityServiceClient is the client API for ConnectivityService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ConnectivityServiceClient interface { + TestConnectivity(ctx context.Context, in *ConnectivityTestRequest, opts ...grpc.CallOption) (*ConnectivityTestResponse, error) +} + +type connectivityServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewConnectivityServiceClient(cc grpc.ClientConnInterface) ConnectivityServiceClient { + return &connectivityServiceClient{cc} +} + +func (c *connectivityServiceClient) TestConnectivity(ctx context.Context, in *ConnectivityTestRequest, opts ...grpc.CallOption) (*ConnectivityTestResponse, error) { + out := new(ConnectivityTestResponse) + err := c.cc.Invoke(ctx, ConnectivityService_TestConnectivity_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ConnectivityServiceServer is the server API for ConnectivityService service. +// All implementations must embed UnimplementedConnectivityServiceServer +// for forward compatibility +type ConnectivityServiceServer interface { + TestConnectivity(context.Context, *ConnectivityTestRequest) (*ConnectivityTestResponse, error) + mustEmbedUnimplementedConnectivityServiceServer() +} + +// UnimplementedConnectivityServiceServer must be embedded to have forward compatible implementations. +type UnimplementedConnectivityServiceServer struct { +} + +func (UnimplementedConnectivityServiceServer) TestConnectivity(context.Context, *ConnectivityTestRequest) (*ConnectivityTestResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TestConnectivity not implemented") +} +func (UnimplementedConnectivityServiceServer) mustEmbedUnimplementedConnectivityServiceServer() {} + +// UnsafeConnectivityServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ConnectivityServiceServer will +// result in compilation errors. +type UnsafeConnectivityServiceServer interface { + mustEmbedUnimplementedConnectivityServiceServer() +} + +func RegisterConnectivityServiceServer(s grpc.ServiceRegistrar, srv ConnectivityServiceServer) { + s.RegisterService(&ConnectivityService_ServiceDesc, srv) +} + +func _ConnectivityService_TestConnectivity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ConnectivityTestRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConnectivityServiceServer).TestConnectivity(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ConnectivityService_TestConnectivity_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConnectivityServiceServer).TestConnectivity(ctx, req.(*ConnectivityTestRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ConnectivityService_ServiceDesc is the grpc.ServiceDesc for ConnectivityService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ConnectivityService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "quilibrium.node.node.pb.ConnectivityService", + HandlerType: (*ConnectivityServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "TestConnectivity", + Handler: _ConnectivityService_TestConnectivity_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "node.proto", +} + const ( DataIPCService_Respawn_FullMethodName = "/quilibrium.node.node.pb.DataIPCService/Respawn" DataIPCService_CreateJoinProof_FullMethodName = "/quilibrium.node.node.pb.DataIPCService/CreateJoinProof" diff --git a/types/consensus/prover_registry.go b/types/consensus/prover_registry.go index 55efda2..8303fb4 100644 --- a/types/consensus/prover_registry.go +++ b/types/consensus/prover_registry.go @@ -68,6 +68,13 @@ type ProverInfo struct { DelegateAddress []byte } +// ProverShardSummary represents the aggregate information about a shard filter +// and the number of provers assigned to it. +type ProverShardSummary struct { + Filter []byte + StatusCounts map[ProverStatus]int +} + // ProverRegistry is an interface for tracking prover information from // hypergraph state transitions. type ProverRegistry interface { @@ -117,4 +124,8 @@ type ProverRegistry interface { // shards (i.e., all provers with non-nil filters). This is used for global // coordination and coverage checks. GetAllActiveAppShardProvers() ([]*ProverInfo, error) + + // GetProverShardSummaries returns all shard filters that currently have any + // provers assigned (regardless of status) along with their counts. + GetProverShardSummaries() ([]*ProverShardSummary, error) } diff --git a/types/hypergraph/id_set.go b/types/hypergraph/id_set.go index 9d5d405..7fd4c15 100644 --- a/types/hypergraph/id_set.go +++ b/types/hypergraph/id_set.go @@ -40,6 +40,13 @@ type IdSet interface { atom Atom, ) error + // AddRaw inserts raw leaf data directly into the backing store without tree + // traversal. This is used for raw sync operations where data is pre-serialized. + AddRaw( + txn tries.TreeBackingStoreTransaction, + leaf *tries.RawLeafData, + ) error + // Delete removes an atom from the ID set. The atom must match the set's atom // type or ErrInvalidAtomType is returned. The atom is removed from the // backing tree store. diff --git a/types/mocks/prover_registry.go b/types/mocks/prover_registry.go index 5043089..06c30b6 100644 --- a/types/mocks/prover_registry.go +++ b/types/mocks/prover_registry.go @@ -30,6 +30,15 @@ func (m *MockProverRegistry) GetAllActiveAppShardProvers() ( return args.Get(0).([]*consensus.ProverInfo), args.Error(1) } +// GetProverShardSummaries implements consensus.ProverRegistry. +func (m *MockProverRegistry) GetProverShardSummaries() ( + []*consensus.ProverShardSummary, + error, +) { + args := m.Called() + return args.Get(0).([]*consensus.ProverShardSummary), args.Error(1) +} + func (m *MockProverRegistry) ProcessStateTransition( state state.State, frameNumber uint64, diff --git a/types/tries/lazy_proof_tree.go b/types/tries/lazy_proof_tree.go index 0d839c5..0fba0e5 100644 --- a/types/tries/lazy_proof_tree.go +++ b/types/tries/lazy_proof_tree.go @@ -426,6 +426,28 @@ type VertexDataIterator interface { Last() bool } +// RawLeafData contains serialized leaf node data for raw sync operations. +type RawLeafData struct { + Key []byte // The leaf key (atom ID) + Value []byte // The atom bytes + HashTarget []byte // Hash target for commitment + Commitment []byte // Pre-computed commitment + Size []byte // Size as big-endian bytes + UnderlyingData []byte // Serialized vertex tree data (if applicable) +} + +// RawLeafIterator provides direct database iteration over leaf nodes, +// bypassing in-memory tree structures for efficient raw sync. +type RawLeafIterator interface { + First() bool + Next() bool + Valid() bool + // Leaf returns the current leaf data. The returned data is only valid + // until the next call to Next() or Close(). + Leaf() (*RawLeafData, error) + Close() error +} + type TreeBackingStore interface { NewTransaction(indexed bool) (TreeBackingStoreTransaction, error) GetNodeByKey( @@ -529,6 +551,24 @@ type TreeBackingStore interface { ) ([]byte, error) GetRootCommits(frameNumber uint64) (map[ShardKey][][]byte, error) NewSnapshot() (TreeBackingStore, func(), error) + // IterateRawLeaves returns an iterator over all leaf nodes for a given + // shard and phase set. This bypasses in-memory tree caching and reads + // directly from the database for raw sync operations. + IterateRawLeaves( + setType string, + phaseType string, + shardKey ShardKey, + ) (RawLeafIterator, error) + // InsertRawLeaf inserts a leaf node directly into the database without + // tree traversal. This is used for raw sync operations where the tree + // structure will be rebuilt later or managed externally. + InsertRawLeaf( + txn TreeBackingStoreTransaction, + setType string, + phaseType string, + shardKey ShardKey, + leaf *RawLeafData, + ) error } // LazyVectorCommitmentTree is a lazy-loaded (from a TreeBackingStore based