v2.1.0.12

This commit is contained in:
Cassandra Heart 2025-11-26 03:22:07 -06:00
parent 54584b0a63
commit c64ab68ce2
No known key found for this signature in database
GPG Key ID: 371083BFA6C240AA
66 changed files with 2428 additions and 1227 deletions

View File

@ -107,6 +107,11 @@ type EngineConfig struct {
DataWorkerP2PMultiaddrs []string `yaml:"dataWorkerP2PMultiaddrs"`
// Configuration to specify data worker stream multiaddrs
DataWorkerStreamMultiaddrs []string `yaml:"dataWorkerStreamMultiaddrs"`
// Configuration to manually override data worker p2p multiaddrs in peer info
DataWorkerAnnounceP2PMultiaddrs []string `yaml:"dataWorkerAnnounceP2PMultiaddrs"`
// Configuration to manually override data worker stream multiaddrs in peer
// info
DataWorkerAnnounceStreamMultiaddrs []string `yaml:"dataWorkerAnnounceStreamMultiaddrs"`
// Number of data worker processes to spawn.
DataWorkerCount int `yaml:"dataWorkerCount"`
// Specific shard filters for the data workers.

View File

@ -22,58 +22,60 @@ const (
)
type P2PConfig struct {
D int `yaml:"d"`
DLo int `yaml:"dLo"`
DHi int `yaml:"dHi"`
DScore int `yaml:"dScore"`
DOut int `yaml:"dOut"`
HistoryLength int `yaml:"historyLength"`
HistoryGossip int `yaml:"historyGossip"`
DLazy int `yaml:"dLazy"`
GossipFactor float64 `yaml:"gossipFactor"`
GossipRetransmission int `yaml:"gossipRetransmission"`
HeartbeatInitialDelay time.Duration `yaml:"heartbeatInitialDelay"`
HeartbeatInterval time.Duration `yaml:"heartbeatInterval"`
FanoutTTL time.Duration `yaml:"fanoutTTL"`
PrunePeers int `yaml:"prunePeers"`
PruneBackoff time.Duration `yaml:"pruneBackoff"`
UnsubscribeBackoff time.Duration `yaml:"unsubscribeBackoff"`
Connectors int `yaml:"connectors"`
MaxPendingConnections int `yaml:"maxPendingConnections"`
ConnectionTimeout time.Duration `yaml:"connectionTimeout"`
DirectConnectTicks uint64 `yaml:"directConnectTicks"`
DirectConnectInitialDelay time.Duration `yaml:"directConnectInitialDelay"`
OpportunisticGraftTicks uint64 `yaml:"opportunisticGraftTicks"`
OpportunisticGraftPeers int `yaml:"opportunisticGraftPeers"`
GraftFloodThreshold time.Duration `yaml:"graftFloodThreshold"`
MaxIHaveLength int `yaml:"maxIHaveLength"`
MaxIHaveMessages int `yaml:"maxIHaveMessages"`
MaxIDontWantMessages int `yaml:"maxIDontWantMessages"`
IWantFollowupTime time.Duration `yaml:"iWantFollowupTime"`
IDontWantMessageThreshold int `yaml:"iDontWantMessageThreshold"`
IDontWantMessageTTL int `yaml:"iDontWantMessageTTL"`
BootstrapPeers []string `yaml:"bootstrapPeers"`
ListenMultiaddr string `yaml:"listenMultiaddr"`
StreamListenMultiaddr string `yaml:"streamListenMultiaddr"`
PeerPrivKey string `yaml:"peerPrivKey"`
TraceLogFile string `yaml:"traceLogFile"`
TraceLogStdout bool `yaml:"traceLogStdout"`
Network uint8 `yaml:"network"`
LowWatermarkConnections int `yaml:"lowWatermarkConnections"`
HighWatermarkConnections int `yaml:"highWatermarkConnections"`
DirectPeers []string `yaml:"directPeers"`
GRPCServerRateLimit int `yaml:"grpcServerRateLimit"`
MinBootstrapPeers int `yaml:"minBootstrapPeers"`
BootstrapParallelism int `yaml:"bootstrapParallelism"`
DiscoveryParallelism int `yaml:"discoveryParallelism"`
DiscoveryPeerLookupLimit int `yaml:"discoveryPeerLookupLimit"`
PingTimeout time.Duration `yaml:"pingTimeout"`
PingPeriod time.Duration `yaml:"pingPeriod"`
PingAttempts int `yaml:"pingAttempts"`
ValidateQueueSize int `yaml:"validateQueueSize"`
ValidateWorkers int `yaml:"validateWorkers"`
SubscriptionQueueSize int `yaml:"subscriptionQueueSize"`
PeerOutboundQueueSize int `yaml:"peerOutboundQueueSize"`
D int `yaml:"d"`
DLo int `yaml:"dLo"`
DHi int `yaml:"dHi"`
DScore int `yaml:"dScore"`
DOut int `yaml:"dOut"`
HistoryLength int `yaml:"historyLength"`
HistoryGossip int `yaml:"historyGossip"`
DLazy int `yaml:"dLazy"`
GossipFactor float64 `yaml:"gossipFactor"`
GossipRetransmission int `yaml:"gossipRetransmission"`
HeartbeatInitialDelay time.Duration `yaml:"heartbeatInitialDelay"`
HeartbeatInterval time.Duration `yaml:"heartbeatInterval"`
FanoutTTL time.Duration `yaml:"fanoutTTL"`
PrunePeers int `yaml:"prunePeers"`
PruneBackoff time.Duration `yaml:"pruneBackoff"`
UnsubscribeBackoff time.Duration `yaml:"unsubscribeBackoff"`
Connectors int `yaml:"connectors"`
MaxPendingConnections int `yaml:"maxPendingConnections"`
ConnectionTimeout time.Duration `yaml:"connectionTimeout"`
DirectConnectTicks uint64 `yaml:"directConnectTicks"`
DirectConnectInitialDelay time.Duration `yaml:"directConnectInitialDelay"`
OpportunisticGraftTicks uint64 `yaml:"opportunisticGraftTicks"`
OpportunisticGraftPeers int `yaml:"opportunisticGraftPeers"`
GraftFloodThreshold time.Duration `yaml:"graftFloodThreshold"`
MaxIHaveLength int `yaml:"maxIHaveLength"`
MaxIHaveMessages int `yaml:"maxIHaveMessages"`
MaxIDontWantMessages int `yaml:"maxIDontWantMessages"`
IWantFollowupTime time.Duration `yaml:"iWantFollowupTime"`
IDontWantMessageThreshold int `yaml:"iDontWantMessageThreshold"`
IDontWantMessageTTL int `yaml:"iDontWantMessageTTL"`
BootstrapPeers []string `yaml:"bootstrapPeers"`
ListenMultiaddr string `yaml:"listenMultiaddr"`
StreamListenMultiaddr string `yaml:"streamListenMultiaddr"`
AnnounceListenMultiaddr string `yaml:"announceListenMultiaddr"`
AnnounceStreamListenMultiaddr string `yaml:"announceStreamListenMultiaddr"`
PeerPrivKey string `yaml:"peerPrivKey"`
TraceLogFile string `yaml:"traceLogFile"`
TraceLogStdout bool `yaml:"traceLogStdout"`
Network uint8 `yaml:"network"`
LowWatermarkConnections int `yaml:"lowWatermarkConnections"`
HighWatermarkConnections int `yaml:"highWatermarkConnections"`
DirectPeers []string `yaml:"directPeers"`
GRPCServerRateLimit int `yaml:"grpcServerRateLimit"`
MinBootstrapPeers int `yaml:"minBootstrapPeers"`
BootstrapParallelism int `yaml:"bootstrapParallelism"`
DiscoveryParallelism int `yaml:"discoveryParallelism"`
DiscoveryPeerLookupLimit int `yaml:"discoveryPeerLookupLimit"`
PingTimeout time.Duration `yaml:"pingTimeout"`
PingPeriod time.Duration `yaml:"pingPeriod"`
PingAttempts int `yaml:"pingAttempts"`
ValidateQueueSize int `yaml:"validateQueueSize"`
ValidateWorkers int `yaml:"validateWorkers"`
SubscriptionQueueSize int `yaml:"subscriptionQueueSize"`
PeerOutboundQueueSize int `yaml:"peerOutboundQueueSize"`
}
// WithDefaults returns a copy of the P2PConfig with any missing fields set to

View File

@ -43,7 +43,7 @@ func FormatVersion(version []byte) string {
}
func GetPatchNumber() byte {
return 0x0b
return 0x0c
}
func GetRCNumber() byte {

View File

@ -111,8 +111,8 @@ func (hg *HypergraphCRDT) GetHyperedge(id [64]byte) (
hypergraph.Hyperedge,
error,
) {
hg.mu.RLock()
defer hg.mu.RUnlock()
hg.mu.Lock()
defer hg.mu.Unlock()
return hg.getHyperedge(id)
}
@ -346,8 +346,8 @@ func (hg *HypergraphCRDT) RevertRemoveHyperedge(
// LookupHyperedge checks if a hyperedge exists in the hypergraph. Returns true
// if the hyperedge is in the add set and not in the remove set.
func (hg *HypergraphCRDT) LookupHyperedge(h hypergraph.Hyperedge) bool {
hg.mu.RLock()
defer hg.mu.RUnlock()
hg.mu.Lock()
defer hg.mu.Unlock()
return hg.lookupHyperedge(h)
}

View File

@ -171,8 +171,8 @@ func (hg *HypergraphCRDT) GetSize(
shardKey *tries.ShardKey,
path []int,
) *big.Int {
hg.mu.RLock()
defer hg.mu.RUnlock()
hg.mu.Lock()
defer hg.mu.Unlock()
if shardKey == nil {
sk := tries.ShardKey{
L1: [3]byte{0, 0, 0},

View File

@ -502,8 +502,8 @@ func (hg *HypergraphCRDT) CreateTraversalProof(
phaseType hypergraph.PhaseType,
keys [][]byte,
) (*tries.TraversalProof, error) {
hg.mu.RLock()
defer hg.mu.RUnlock()
hg.mu.Lock()
defer hg.mu.Unlock()
timer := prometheus.NewTimer(TraversalProofDuration.WithLabelValues("create"))
defer timer.ObserveDuration()
@ -564,8 +564,8 @@ func (hg *HypergraphCRDT) VerifyTraversalProof(
root []byte,
traversalProof *tries.TraversalProof,
) (bool, error) {
hg.mu.RLock()
defer hg.mu.RUnlock()
hg.mu.Lock()
defer hg.mu.Unlock()
timer := prometheus.NewTimer(TraversalProofDuration.WithLabelValues("verify"))
defer timer.ObserveDuration()

View File

@ -26,8 +26,8 @@ func (hg *HypergraphCRDT) HyperStream(
return errors.New("unavailable")
}
hg.mu.RLock()
defer hg.mu.RUnlock()
hg.mu.Lock()
defer hg.mu.Unlock()
defer hg.syncController.EndSyncSession()
peerId, err := hg.authenticationProvider.Identify(stream.Context())
@ -63,8 +63,8 @@ func (hg *HypergraphCRDT) Sync(
return errors.New("unavailable")
}
hg.mu.RLock()
defer hg.mu.RUnlock()
hg.mu.Lock()
defer hg.mu.Unlock()
defer hg.syncController.EndSyncSession()
hg.logger.Info(

View File

@ -81,8 +81,8 @@ func (v *vertex) Commit(prover crypto.InclusionProver) []byte {
// GetVertex retrieves a vertex by its ID. Returns ErrRemoved if the vertex has
// been removed, or an error if not found.
func (hg *HypergraphCRDT) GetVertex(id [64]byte) (hypergraph.Vertex, error) {
hg.mu.RLock()
defer hg.mu.RUnlock()
hg.mu.Lock()
defer hg.mu.Unlock()
timer := prometheus.NewTimer(GetDuration.WithLabelValues("vertex"))
defer timer.ObserveDuration()
@ -287,8 +287,8 @@ func (hg *HypergraphCRDT) RevertRemoveVertex(
// LookupVertex checks if a vertex exists in the hypergraph. Returns true if the
// vertex is in the add set and not in the remove set.
func (hg *HypergraphCRDT) LookupVertex(v hypergraph.Vertex) bool {
hg.mu.RLock()
defer hg.mu.RUnlock()
hg.mu.Lock()
defer hg.mu.Unlock()
return hg.lookupVertex(v)
}

View File

@ -29,8 +29,8 @@ func (hg *HypergraphCRDT) GetVertexData(id [64]byte) (
*tries.VectorCommitmentTree,
error,
) {
hg.mu.RLock()
defer hg.mu.RUnlock()
hg.mu.Lock()
defer hg.mu.Unlock()
timer := prometheus.NewTimer(GetDuration.WithLabelValues("vertex_data"))
defer timer.ObserveDuration()

View File

@ -3,6 +3,7 @@ package app
import (
"fmt"
"os"
"sync"
"go.uber.org/zap"
consensustime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time"
@ -25,6 +26,7 @@ type DataWorkerNode struct {
globalTimeReel *consensustime.GlobalTimeReel
parentProcess int
quit chan struct{}
stopOnce sync.Once
}
func newDataWorkerNode(
@ -68,7 +70,7 @@ func (n *DataWorkerNode) Start(
"error while starting ipc server for core",
zap.Uint64("core", uint64(n.coreId)),
)
n.quit <- struct{}{}
n.Stop()
}
}()
@ -99,11 +101,12 @@ func (n *DataWorkerNode) Start(
}
func (n *DataWorkerNode) Stop() {
n.logger.Info("stopping data worker node")
if n.quit != nil {
close(n.quit)
}
n.stopOnce.Do(func() {
n.logger.Info("stopping data worker node")
if n.quit != nil {
close(n.quit)
}
})
}
// GetQuitChannel returns the quit channel for external signaling

View File

@ -20,6 +20,7 @@ import (
"github.com/pkg/errors"
"go.uber.org/zap"
"golang.org/x/crypto/sha3"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"source.quilibrium.com/quilibrium/monorepo/config"
"source.quilibrium.com/quilibrium/monorepo/consensus"
@ -110,6 +111,8 @@ type AppConsensusEngine struct {
currentDifficultyMu sync.RWMutex
messageCollectors *keyedaggregator.SequencedCollectors[sequencedAppMessage]
messageAggregator *keyedaggregator.SequencedAggregator[sequencedAppMessage]
appMessageSpillover map[uint64][]*protobufs.Message
appSpilloverMu sync.Mutex
lastProposalRank uint64
lastProposalRankMu sync.RWMutex
collectedMessages []*protobufs.Message
@ -261,6 +264,7 @@ func NewAppConsensusEngine(
proofCache: make(map[uint64][516]byte),
collectedMessages: []*protobufs.Message{},
provingMessages: []*protobufs.Message{},
appMessageSpillover: make(map[uint64][]*protobufs.Message),
consensusMessageQueue: make(chan *pb.Message, 1000),
proverMessageQueue: make(chan *pb.Message, 1000),
frameMessageQueue: make(chan *pb.Message, 100),
@ -900,77 +904,86 @@ func (e *AppConsensusEngine) materialize(
var state state.State
state = hgstate.NewHypergraphState(e.hypergraph)
eg := errgroup.Group{}
eg.SetLimit(len(frame.Requests))
for i, request := range frame.Requests {
e.logger.Debug(
"processing request",
zap.Int("message_index", i),
)
requestBytes, err := request.ToCanonicalBytes()
if err != nil {
e.logger.Error(
"error serializing request",
zap.Int("message_index", i),
zap.Error(err),
)
return errors.Wrap(err, "materialize")
}
if len(requestBytes) == 0 {
e.logger.Error(
"empty request bytes",
eg.Go(func() error {
e.logger.Debug(
"processing request",
zap.Int("message_index", i),
)
return errors.Wrap(errors.New("empty request"), "materialize")
}
costBasis, err := e.executionManager.GetCost(requestBytes)
if err != nil {
e.logger.Error(
"invalid message",
zap.Int("message_index", i),
zap.Error(err),
requestBytes, err := request.ToCanonicalBytes()
if err != nil {
e.logger.Error(
"error serializing request",
zap.Int("message_index", i),
zap.Error(err),
)
return errors.Wrap(err, "materialize")
}
if len(requestBytes) == 0 {
e.logger.Error(
"empty request bytes",
zap.Int("message_index", i),
)
return errors.Wrap(errors.New("empty request"), "materialize")
}
costBasis, err := e.executionManager.GetCost(requestBytes)
if err != nil {
e.logger.Error(
"invalid message",
zap.Int("message_index", i),
zap.Error(err),
)
return errors.Wrap(err, "materialize")
}
e.currentDifficultyMu.RLock()
difficulty := uint64(e.currentDifficulty)
e.currentDifficultyMu.RUnlock()
var baseline *big.Int
if costBasis.Cmp(big.NewInt(0)) == 0 {
baseline = big.NewInt(0)
} else {
baseline = reward.GetBaselineFee(
difficulty,
e.hypergraph.GetSize(nil, nil).Uint64(),
costBasis.Uint64(),
8000000000,
)
baseline.Quo(baseline, costBasis)
}
_, err = e.executionManager.ProcessMessage(
frame.Header.FrameNumber,
new(big.Int).Mul(
baseline,
big.NewInt(int64(frame.Header.FeeMultiplierVote)),
),
e.appAddress[:32],
requestBytes,
state,
)
return errors.Wrap(err, "materialize")
}
if err != nil {
e.logger.Error(
"error processing message",
zap.Int("message_index", i),
zap.Error(err),
)
return errors.Wrap(err, "materialize")
}
e.currentDifficultyMu.RLock()
difficulty := uint64(e.currentDifficulty)
e.currentDifficultyMu.RUnlock()
var baseline *big.Int
if costBasis.Cmp(big.NewInt(0)) == 0 {
baseline = big.NewInt(0)
} else {
baseline = reward.GetBaselineFee(
difficulty,
e.hypergraph.GetSize(nil, nil).Uint64(),
costBasis.Uint64(),
8000000000,
)
baseline.Quo(baseline, costBasis)
}
return nil
})
}
result, err := e.executionManager.ProcessMessage(
frame.Header.FrameNumber,
new(big.Int).Mul(
baseline,
big.NewInt(int64(frame.Header.FeeMultiplierVote)),
),
e.appAddress[:32],
requestBytes,
state,
)
if err != nil {
e.logger.Error(
"error processing message",
zap.Int("message_index", i),
zap.Error(err),
)
return errors.Wrap(err, "materialize")
}
state = result.State
if err := eg.Wait(); err != nil {
return err
}
e.logger.Debug(
@ -1760,9 +1773,37 @@ func (e *AppConsensusEngine) OnOwnProposal(
proposal.PreviousRankTimeoutCertificate.(*protobufs.TimeoutCertificate)
}
provers, err := e.proverRegistry.GetActiveProvers(e.appAddress)
if err != nil {
e.logger.Error("could not get provers", zap.Error(err))
return
}
var signingProverPubKey []byte
var signingProverIndex int
for i, prover := range provers {
if bytes.Equal(
prover.Address,
(*proposal.Vote).PublicKeySignatureBls48581.Address,
) {
signingProverIndex = i
signingProverPubKey = prover.PublicKey
break
}
}
bitmask := make([]byte, (len(provers)+7)/8)
bitmask[signingProverIndex/8] = 1 << (signingProverIndex % 8)
// Manually override the signature as the vdf prover's signature is invalid
(*proposal.State.State).Header.PublicKeySignatureBls48581.Signature =
(*proposal.Vote).PublicKeySignatureBls48581.Signature
(*proposal.State.State).Header.PublicKeySignatureBls48581 =
&protobufs.BLS48581AggregateSignature{
PublicKey: &protobufs.BLS48581G2PublicKey{
KeyValue: signingProverPubKey,
},
Signature: (*proposal.Vote).PublicKeySignatureBls48581.Signature,
Bitmask: bitmask,
}
pbProposal := &protobufs.AppShardProposal{
State: *proposal.State.State,

View File

@ -69,12 +69,12 @@ func (p *AppLeaderProvider) ProveNextState(
p.engine.appAddress,
)
if err != nil {
frameProvingTotal.WithLabelValues("error").Inc()
frameProvingTotal.WithLabelValues(p.engine.appAddressHex, "error").Inc()
return nil, models.NewNoVoteErrorf("could not collect: %+w", err)
}
if prior == nil {
frameProvingTotal.WithLabelValues("error").Inc()
frameProvingTotal.WithLabelValues(p.engine.appAddressHex, "error").Inc()
return nil, models.NewNoVoteErrorf("missing prior frame")
}
@ -89,7 +89,7 @@ func (p *AppLeaderProvider) ProveNextState(
}
if prior.Identity() != priorState {
frameProvingTotal.WithLabelValues("error").Inc()
frameProvingTotal.WithLabelValues(p.engine.appAddressHex, "error").Inc()
if latestQC != nil && latestQC.Identity() == priorState {
switch {

View File

@ -49,6 +49,8 @@ func (e *AppConsensusEngine) eventDistributorLoop(
zap.Uint64("frame_number", data.Frame.Header.FrameNumber),
)
e.flushDeferredAppMessages(data.Frame.GetRank() + 1)
// Record the fee vote from the accepted frame
if err := e.dynamicFeeManager.AddFrameFeeVote(
e.appAddress,

View File

@ -65,6 +65,11 @@ type mockAppIntegrationPubSub struct {
underlyingBlossomSub *qp2p.BlossomSub
}
// Close implements p2p.PubSub.
func (m *mockAppIntegrationPubSub) Close() error {
panic("unimplemented")
}
// GetOwnMultiaddrs implements p2p.PubSub.
func (m *mockAppIntegrationPubSub) GetOwnMultiaddrs() []multiaddr.Multiaddr {
panic("unimplemented")
@ -576,6 +581,9 @@ func (m *mockGlobalClientLocks) GetAppShards(ctx context.Context, in *protobufs.
func (m *mockGlobalClientLocks) GetGlobalShards(ctx context.Context, in *protobufs.GetGlobalShardsRequest, opts ...grpc.CallOption) (*protobufs.GetGlobalShardsResponse, error) {
return nil, errors.New("not used in this test")
}
func (m *mockGlobalClientLocks) GetGlobalProposal(ctx context.Context, in *protobufs.GetGlobalProposalRequest, opts ...grpc.CallOption) (*protobufs.GlobalProposalResponse, error) {
return nil, errors.New("not used in this test")
}
func (m *mockGlobalClientLocks) GetWorkerInfo(ctx context.Context, in *protobufs.GlobalGetWorkerInfoRequest, opts ...grpc.CallOption) (*protobufs.GlobalGetWorkerInfoResponse, error) {
return nil, errors.New("not used in this test")
}

View File

@ -3,6 +3,7 @@ package app
import (
"fmt"
"go.uber.org/zap"
"golang.org/x/crypto/sha3"
"google.golang.org/protobuf/proto"
@ -141,6 +142,7 @@ func (p *appMessageProcessor) enforceCollectorLimit(
if len(collector.Records()) >= maxAppMessagesPerRank {
collector.Remove(record)
p.engine.deferAppMessage(p.rank+1, record.message)
return keyedcollector.NewInvalidRecordError(
record,
fmt.Errorf("message limit reached for rank %d", p.rank),
@ -163,13 +165,13 @@ func (e *AppConsensusEngine) initAppMessageAggregator() error {
return err
}
e.messageCollectors = keyedaggregator.NewSequencedCollectors[sequencedAppMessage](
e.messageCollectors = keyedaggregator.NewSequencedCollectors(
tracer,
0,
collectorFactory,
)
aggregator, err := keyedaggregator.NewSequencedAggregator[sequencedAppMessage](
aggregator, err := keyedaggregator.NewSequencedAggregator(
tracer,
0,
e.messageCollectors,
@ -267,3 +269,60 @@ func (e *AppConsensusEngine) updatePendingMessagesGauge(rank uint64) {
float64(len(collector.Records())),
)
}
func (e *AppConsensusEngine) deferAppMessage(
targetRank uint64,
message *protobufs.Message,
) {
if e == nil || message == nil || targetRank == 0 {
return
}
cloned := proto.Clone(message).(*protobufs.Message)
e.appSpilloverMu.Lock()
e.appMessageSpillover[targetRank] = append(
e.appMessageSpillover[targetRank],
cloned,
)
pending := len(e.appMessageSpillover[targetRank])
e.appSpilloverMu.Unlock()
if e.logger != nil {
e.logger.Debug(
"deferred app message due to collector limit",
zap.String("app_address", e.appAddressHex),
zap.Uint64("target_rank", targetRank),
zap.Int("pending", pending),
)
}
}
func (e *AppConsensusEngine) flushDeferredAppMessages(targetRank uint64) {
if e == nil || e.messageAggregator == nil || targetRank == 0 {
return
}
e.appSpilloverMu.Lock()
messages := e.appMessageSpillover[targetRank]
if len(messages) > 0 {
delete(e.appMessageSpillover, targetRank)
}
e.appSpilloverMu.Unlock()
if len(messages) == 0 {
return
}
for _, msg := range messages {
e.messageAggregator.Add(newSequencedAppMessage(targetRank, msg))
}
if e.logger != nil {
e.logger.Debug(
"replayed deferred app messages",
zap.String("app_address", e.appAddressHex),
zap.Uint64("target_rank", targetRank),
zap.Int("count", len(messages)),
)
}
}

View File

@ -1134,13 +1134,15 @@ func (e *AppConsensusEngine) validatePeerInfoSignature(
// Create a copy of the peer info without the signature for validation
infoCopy := &protobufs.PeerInfo{
PeerId: peerInfo.PeerId,
Reachability: peerInfo.Reachability,
Timestamp: peerInfo.Timestamp,
Version: peerInfo.Version,
PatchNumber: peerInfo.PatchNumber,
Capabilities: peerInfo.Capabilities,
PublicKey: peerInfo.PublicKey,
PeerId: peerInfo.PeerId,
Reachability: peerInfo.Reachability,
Timestamp: peerInfo.Timestamp,
Version: peerInfo.Version,
PatchNumber: peerInfo.PatchNumber,
Capabilities: peerInfo.Capabilities,
PublicKey: peerInfo.PublicKey,
LastReceivedFrame: peerInfo.LastReceivedFrame,
LastGlobalHeadFrame: peerInfo.LastGlobalHeadFrame,
// Exclude Signature field
}

View File

@ -250,7 +250,7 @@ func (e *AppConsensusEngine) peerAuthCacheAllows(id peer.ID) bool {
if time.Now().After(expiry) {
e.peerAuthCacheMu.Lock()
if current, exists := e.peerAuthCache[string(id)]; exists &&
current == expiry {
current.Equal(expiry) {
delete(e.peerAuthCache, string(id))
}
e.peerAuthCacheMu.Unlock()

View File

@ -8,6 +8,7 @@ import (
"github.com/pkg/errors"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token"
typesconsensus "source.quilibrium.com/quilibrium/monorepo/types/consensus"
)
@ -108,7 +109,12 @@ func (e *GlobalConsensusEngine) checkShardCoverage(frameNumber uint64) error {
return errors.Wrap(err, "check shard coverage")
}
remaining := int(haltGraceFrames - streak.Count)
var remaining int
if frameNumber < token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END+360 {
remaining = int(haltGraceFrames + 720 - streak.Count)
} else {
remaining = int(haltGraceFrames - streak.Count)
}
if remaining <= 0 {
e.logger.Error(
"CRITICAL: Shard has insufficient coverage - triggering network halt",

View File

@ -73,11 +73,14 @@ func (e *GlobalConsensusEngine) eventDistributorLoop(
// New global frame has been selected as the head by the time reel
if data, ok := event.Data.(*consensustime.GlobalEvent); ok &&
data.Frame != nil {
e.lastObservedFrame.Store(data.Frame.Header.FrameNumber)
e.logger.Info(
"received new global head event",
zap.Uint64("frame_number", data.Frame.Header.FrameNumber),
)
e.flushDeferredGlobalMessages(data.Frame.GetRank() + 1)
// Check shard coverage
if err := e.checkShardCoverage(
data.Frame.Header.FrameNumber,
@ -103,12 +106,20 @@ func (e *GlobalConsensusEngine) eventDistributorLoop(
if len(workers) == 0 {
e.logger.Error("no workers detected for allocation")
}
allocated := true
allAllocated := true
needsProposals := false
for _, w := range workers {
allocated = allocated && w.Allocated
allAllocated = allAllocated && w.Allocated
if len(w.Filter) == 0 {
needsProposals = true
}
}
if !allocated {
e.evaluateForProposals(ctx, data)
if needsProposals || !allAllocated {
e.evaluateForProposals(ctx, data, needsProposals)
} else {
self, effectiveSeniority := e.allocationContext()
e.checkExcessPendingJoins(self, data.Frame.Header.FrameNumber)
e.logAllocationStatusOnly(ctx, data, self, effectiveSeniority)
}
}
}
@ -376,22 +387,147 @@ func (e *GlobalConsensusEngine) estimateSeniorityFromConfig() uint64 {
func (e *GlobalConsensusEngine) evaluateForProposals(
ctx context.Context,
data *consensustime.GlobalEvent,
allowProposals bool,
) {
self, err := e.proverRegistry.GetProverInfo(e.getProverAddress())
var effectiveSeniority uint64
if err != nil || self == nil {
effectiveSeniority = e.estimateSeniorityFromConfig()
} else {
effectiveSeniority = self.Seniority
self, effectiveSeniority := e.allocationContext()
e.checkExcessPendingJoins(self, data.Frame.Header.FrameNumber)
canPropose, skipReason := e.joinProposalReady(data.Frame.Header.FrameNumber)
snapshot, ok := e.collectAllocationSnapshot(
ctx,
data,
self,
effectiveSeniority,
)
if !ok {
return
}
pendingFilters := [][]byte{}
proposalDescriptors := []provers.ShardDescriptor{}
decideDescriptors := []provers.ShardDescriptor{}
e.logAllocationStatus(snapshot)
pendingFilters := snapshot.pendingFilters
proposalDescriptors := snapshot.proposalDescriptors
decideDescriptors := snapshot.decideDescriptors
worldBytes := snapshot.worldBytes
if len(proposalDescriptors) != 0 && allowProposals {
if canPropose {
proposals, err := e.proposer.PlanAndAllocate(
uint64(data.Frame.Header.Difficulty),
proposalDescriptors,
100,
worldBytes,
)
if err != nil {
e.logger.Error("could not plan shard allocations", zap.Error(err))
} else {
if len(proposals) > 0 {
e.lastJoinAttemptFrame.Store(data.Frame.Header.FrameNumber)
}
expectedRewardSum := big.NewInt(0)
for _, p := range proposals {
expectedRewardSum.Add(expectedRewardSum, p.ExpectedReward)
}
raw := decimal.NewFromBigInt(expectedRewardSum, 0)
rewardInQuilPerInterval := raw.Div(decimal.NewFromInt(8000000000))
rewardInQuilPerDay := rewardInQuilPerInterval.Mul(
decimal.NewFromInt(24 * 60 * 6),
)
e.logger.Info(
"proposed joins",
zap.Int("shard_proposals", len(proposals)),
zap.String(
"estimated_reward_per_interval",
rewardInQuilPerInterval.String(),
),
zap.String(
"estimated_reward_per_day",
rewardInQuilPerDay.String(),
),
)
}
} else {
e.logger.Info(
"skipping join proposals",
zap.String("reason", skipReason),
zap.Uint64("frame_number", data.Frame.Header.FrameNumber),
)
}
} else if len(proposalDescriptors) != 0 && !allowProposals {
e.logger.Info(
"skipping join proposals",
zap.String("reason", "all workers already assigned filters"),
zap.Uint64("frame_number", data.Frame.Header.FrameNumber),
)
}
if len(pendingFilters) != 0 {
if err := e.proposer.DecideJoins(
uint64(data.Frame.Header.Difficulty),
decideDescriptors,
pendingFilters,
worldBytes,
); err != nil {
e.logger.Error("could not decide shard allocations", zap.Error(err))
} else {
e.logger.Info(
"decided on joins",
zap.Int("joins", len(pendingFilters)),
)
}
}
}
type allocationSnapshot struct {
shardsPending int
awaitingFrames []string
shardsLeaving int
shardsActive int
shardsPaused int
shardDivisions int
logicalShards int
pendingFilters [][]byte
proposalDescriptors []provers.ShardDescriptor
decideDescriptors []provers.ShardDescriptor
worldBytes *big.Int
}
func (s *allocationSnapshot) statusFields() []zap.Field {
if s == nil {
return nil
}
return []zap.Field{
zap.Int("pending_joins", s.shardsPending),
zap.String("pending_join_frames", strings.Join(s.awaitingFrames, ", ")),
zap.Int("pending_leaves", s.shardsLeaving),
zap.Int("active", s.shardsActive),
zap.Int("paused", s.shardsPaused),
zap.Int("network_shards", s.shardDivisions),
zap.Int("network_logical_shards", s.logicalShards),
}
}
func (s *allocationSnapshot) proposalSnapshotFields() []zap.Field {
if s == nil {
return nil
}
return []zap.Field{
zap.Int("proposal_candidates", len(s.proposalDescriptors)),
zap.Int("pending_confirmations", len(s.pendingFilters)),
zap.Int("decide_descriptors", len(s.decideDescriptors)),
}
}
func (e *GlobalConsensusEngine) collectAllocationSnapshot(
ctx context.Context,
data *consensustime.GlobalEvent,
self *typesconsensus.ProverInfo,
effectiveSeniority uint64,
) (*allocationSnapshot, bool) {
appShards, err := e.shardsStore.RangeAppShards()
if err != nil {
e.logger.Error("could not obtain app shard info", zap.Error(err))
return
return nil, false
}
// consolidate into high level L2 shards:
@ -417,24 +553,24 @@ func (e *GlobalConsensusEngine) evaluateForProposals(
hex.EncodeToString(data.Frame.Header.Prover),
),
)
return
return nil, false
}
if registry.IdentityKey == nil || registry.IdentityKey.KeyValue == nil {
e.logger.Info("key registry info missing identity of prover")
return
return nil, false
}
pub, err := pcrypto.UnmarshalEd448PublicKey(registry.IdentityKey.KeyValue)
if err != nil {
e.logger.Warn("error unmarshaling identity key", zap.Error(err))
return
return nil, false
}
peerId, err := peer.IDFromPublicKey(pub)
if err != nil {
e.logger.Warn("error deriving peer id", zap.Error(err))
return
return nil, false
}
info := e.peerInfoManager.GetPeerInfo([]byte(peerId))
@ -443,7 +579,7 @@ func (e *GlobalConsensusEngine) evaluateForProposals(
"no peer info known yet",
zap.String("peer", peer.ID(peerId).String()),
)
return
return nil, false
}
if len(info.Reachability) == 0 {
@ -451,11 +587,12 @@ func (e *GlobalConsensusEngine) evaluateForProposals(
"no reachability info known yet",
zap.String("peer", peer.ID(peerId).String()),
)
return
return nil, false
}
var client protobufs.GlobalServiceClient = nil
for _, s := range info.Reachability[0].StreamMultiaddrs {
if len(info.Reachability[0].StreamMultiaddrs) > 0 {
s := info.Reachability[0].StreamMultiaddrs[0]
creds, err := p2p.NewPeerAuthenticator(
e.logger,
e.config.P2P,
@ -468,17 +605,17 @@ func (e *GlobalConsensusEngine) evaluateForProposals(
map[string]channel.AllowedPeerPolicyType{},
).CreateClientTLSCredentials([]byte(peerId))
if err != nil {
return
return nil, false
}
ma, err := multiaddr.StringCast(s)
if err != nil {
return
return nil, false
}
mga, err := mn.ToNetAddr(ma)
if err != nil {
return
return nil, false
}
cc, err := grpc.NewClient(
@ -492,7 +629,7 @@ func (e *GlobalConsensusEngine) evaluateForProposals(
zap.String("multiaddr", ma.String()),
zap.Error(err),
)
return
return nil, false
}
defer func() {
if err := cc.Close(); err != nil {
@ -501,12 +638,11 @@ func (e *GlobalConsensusEngine) evaluateForProposals(
}()
client = protobufs.NewGlobalServiceClient(cc)
break
}
if client == nil {
e.logger.Debug("could not get app shards from prover")
return
return nil, false
}
worldBytes := big.NewInt(0)
@ -517,20 +653,24 @@ func (e *GlobalConsensusEngine) evaluateForProposals(
logicalShards := 0
shardDivisions := 0
awaitingFrame := map[uint64]struct{}{}
for _, info := range shards {
pendingFilters := [][]byte{}
proposalDescriptors := []provers.ShardDescriptor{}
decideDescriptors := []provers.ShardDescriptor{}
for _, shardInfo := range shards {
resp, err := e.getAppShardsFromProver(
client,
slices.Concat(info.L1, info.L2),
slices.Concat(shardInfo.L1, shardInfo.L2),
)
if err != nil {
e.logger.Debug("could not get app shards from prover", zap.Error(err))
return
return nil, false
}
for _, shard := range resp.Info {
shardDivisions++
worldBytes = worldBytes.Add(worldBytes, new(big.Int).SetBytes(shard.Size))
bp := slices.Clone(info.L2)
bp := slices.Clone(shardInfo.L2)
for _, p := range shard.Prefix {
bp = append(bp, byte(p))
}
@ -615,66 +755,95 @@ func (e *GlobalConsensusEngine) evaluateForProposals(
awaitingFrames = append(awaitingFrames, fmt.Sprintf("%d", frame))
}
return &allocationSnapshot{
shardsPending: shardsPending,
awaitingFrames: awaitingFrames,
shardsLeaving: shardsLeaving,
shardsActive: shardsActive,
shardsPaused: shardsPaused,
shardDivisions: shardDivisions,
logicalShards: logicalShards,
pendingFilters: pendingFilters,
proposalDescriptors: proposalDescriptors,
decideDescriptors: decideDescriptors,
worldBytes: worldBytes,
}, true
}
func (e *GlobalConsensusEngine) logAllocationStatus(
snapshot *allocationSnapshot,
) {
if snapshot == nil {
return
}
e.logger.Info(
"status for allocations",
zap.Int("pending_joins", shardsPending),
zap.String("pending_join_frames", strings.Join(awaitingFrames, ", ")),
zap.Int("pending_leaves", shardsLeaving),
zap.Int("active", shardsActive),
zap.Int("paused", shardsPaused),
zap.Int("network_shards", shardDivisions),
zap.Int("network_logical_shards", logicalShards),
snapshot.statusFields()...,
)
if len(proposalDescriptors) != 0 {
proposals, err := e.proposer.PlanAndAllocate(
uint64(data.Frame.Header.Difficulty),
proposalDescriptors,
100,
worldBytes,
e.logger.Debug(
"proposal evaluation snapshot",
snapshot.proposalSnapshotFields()...,
)
}
func (e *GlobalConsensusEngine) logAllocationStatusOnly(
ctx context.Context,
data *consensustime.GlobalEvent,
self *typesconsensus.ProverInfo,
effectiveSeniority uint64,
) {
snapshot, ok := e.collectAllocationSnapshot(
ctx,
data,
self,
effectiveSeniority,
)
if !ok || snapshot == nil {
e.logger.Info(
"all workers already allocated or pending; skipping proposal cycle",
)
if err != nil {
e.logger.Error("could not plan shard allocations", zap.Error(err))
} else {
expectedRewardSum := big.NewInt(0)
for _, p := range proposals {
expectedRewardSum.Add(expectedRewardSum, p.ExpectedReward)
}
raw := decimal.NewFromBigInt(expectedRewardSum, 0)
rewardInQuilPerInterval := raw.Div(decimal.NewFromInt(8000000000))
rewardInQuilPerDay := rewardInQuilPerInterval.Mul(
decimal.NewFromInt(24 * 60 * 6),
)
e.logger.Info(
"proposed joins",
zap.Int("shard_proposals", len(proposals)),
zap.String(
"estimated_reward_per_interval",
rewardInQuilPerInterval.String(),
),
zap.String(
"estimated_reward_per_day",
rewardInQuilPerDay.String(),
),
)
}
return
}
if len(pendingFilters) != 0 {
err = e.proposer.DecideJoins(
uint64(data.Frame.Header.Difficulty),
decideDescriptors,
pendingFilters,
worldBytes,
e.logger.Info(
"all workers already allocated or pending; skipping proposal cycle",
snapshot.statusFields()...,
)
e.logAllocationStatus(snapshot)
}
func (e *GlobalConsensusEngine) allocationContext() (
*typesconsensus.ProverInfo,
uint64,
) {
self, err := e.proverRegistry.GetProverInfo(e.getProverAddress())
if err != nil || self == nil {
return nil, e.estimateSeniorityFromConfig()
}
return self, self.Seniority
}
func (e *GlobalConsensusEngine) checkExcessPendingJoins(
self *typesconsensus.ProverInfo,
frameNumber uint64,
) {
excessFilters := e.selectExcessPendingFilters(self)
if len(excessFilters) != 0 {
e.logger.Debug(
"identified excess pending joins",
zap.Int("excess_count", len(excessFilters)),
zap.Uint64("frame_number", frameNumber),
)
if err != nil {
e.logger.Error("could not decide shard allocations", zap.Error(err))
} else {
e.logger.Info(
"decided on joins",
zap.Int("joins", len(pendingFilters)),
)
}
e.rejectExcessPending(excessFilters, frameNumber)
return
}
e.logger.Debug(
"no excess pending joins detected",
zap.Uint64("frame_number", frameNumber),
)
}
func (e *GlobalConsensusEngine) publishKeyRegistry() {

View File

@ -238,11 +238,159 @@ func (e *GlobalConsensusEngine) initializeGenesis() (
}
l1 := up2p.GetBloomFilterIndices(token.QUIL_TOKEN_ADDRESS, 256, 3)
err = e.hypergraph.AddVertex(txn, hgcrdt.NewVertex(
[32]byte(token.QUIL_TOKEN_ADDRESS),
[32]byte{0b00000000},
make([]byte, 64),
big.NewInt(100),
))
if err != nil {
e.logger.Error(
"failed to place app shard",
zap.Error(err),
)
txn.Abort()
return nil, nil
}
err = e.hypergraph.AddVertex(txn, hgcrdt.NewVertex(
[32]byte(token.QUIL_TOKEN_ADDRESS),
[32]byte{0b00000001},
make([]byte, 64),
big.NewInt(100),
))
if err != nil {
e.logger.Error(
"failed to place app shard",
zap.Error(err),
)
txn.Abort()
return nil, nil
}
err = e.hypergraph.AddVertex(txn, hgcrdt.NewVertex(
[32]byte(token.QUIL_TOKEN_ADDRESS),
[32]byte{0b00000010},
make([]byte, 64),
big.NewInt(100),
))
if err != nil {
e.logger.Error(
"failed to place app shard",
zap.Error(err),
)
txn.Abort()
return nil, nil
}
err = e.hypergraph.AddVertex(txn, hgcrdt.NewVertex(
[32]byte(token.QUIL_TOKEN_ADDRESS),
[32]byte{0b00000011},
make([]byte, 64),
big.NewInt(100),
))
if err != nil {
e.logger.Error(
"failed to place app shard",
zap.Error(err),
)
txn.Abort()
return nil, nil
}
err = e.hypergraph.AddVertex(txn, hgcrdt.NewVertex(
[32]byte(token.QUIL_TOKEN_ADDRESS),
[32]byte{0b00000100},
make([]byte, 64),
big.NewInt(100),
))
if err != nil {
e.logger.Error(
"failed to place app shard",
zap.Error(err),
)
txn.Abort()
return nil, nil
}
err = e.hypergraph.AddVertex(txn, hgcrdt.NewVertex(
[32]byte(token.QUIL_TOKEN_ADDRESS),
[32]byte{0b00000101},
make([]byte, 64),
big.NewInt(100),
))
if err != nil {
e.logger.Error(
"failed to place app shard",
zap.Error(err),
)
txn.Abort()
return nil, nil
}
err = e.shardsStore.PutAppShard(txn, store.ShardInfo{
L1: l1,
L2: token.QUIL_TOKEN_ADDRESS,
Path: []uint32{},
Path: []uint32{0},
})
if err != nil {
e.logger.Error(
"failed to place app shard",
zap.Error(err),
)
txn.Abort()
return nil, nil
}
err = e.shardsStore.PutAppShard(txn, store.ShardInfo{
L1: l1,
L2: token.QUIL_TOKEN_ADDRESS,
Path: []uint32{1},
})
if err != nil {
e.logger.Error(
"failed to place app shard",
zap.Error(err),
)
txn.Abort()
return nil, nil
}
err = e.shardsStore.PutAppShard(txn, store.ShardInfo{
L1: l1,
L2: token.QUIL_TOKEN_ADDRESS,
Path: []uint32{2},
})
if err != nil {
e.logger.Error(
"failed to place app shard",
zap.Error(err),
)
txn.Abort()
return nil, nil
}
err = e.shardsStore.PutAppShard(txn, store.ShardInfo{
L1: l1,
L2: token.QUIL_TOKEN_ADDRESS,
Path: []uint32{3},
})
if err != nil {
e.logger.Error(
"failed to place app shard",
zap.Error(err),
)
txn.Abort()
return nil, nil
}
err = e.shardsStore.PutAppShard(txn, store.ShardInfo{
L1: l1,
L2: token.QUIL_TOKEN_ADDRESS,
Path: []uint32{4},
})
if err != nil {
e.logger.Error(
"failed to place app shard",
zap.Error(err),
)
txn.Abort()
return nil, nil
}
err = e.shardsStore.PutAppShard(txn, store.ShardInfo{
L1: l1,
L2: token.QUIL_TOKEN_ADDRESS,
Path: []uint32{5},
})
if err != nil {
e.logger.Error(

View File

@ -14,6 +14,7 @@ import (
"slices"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/iden3/go-iden3-crypto/poseidon"
@ -126,15 +127,24 @@ type GlobalConsensusEngine struct {
*protobufs.GlobalFrame,
*protobufs.ProposalVote,
]
blsConstructor crypto.BlsConstructor
executionManager *manager.ExecutionEngineManager
mixnet typesconsensus.Mixnet
peerInfoManager tp2p.PeerInfoManager
workerManager worker.WorkerManager
proposer *provers.Manager
currentRank uint64
alertPublicKey []byte
hasSentKeyBundle bool
blsConstructor crypto.BlsConstructor
executionManager *manager.ExecutionEngineManager
mixnet typesconsensus.Mixnet
peerInfoManager tp2p.PeerInfoManager
workerManager worker.WorkerManager
proposer *provers.Manager
currentRank uint64
alertPublicKey []byte
hasSentKeyBundle bool
proverSyncInProgress atomic.Bool
lastJoinAttemptFrame atomic.Uint64
lastObservedFrame atomic.Uint64
lastRejectFrame atomic.Uint64
proverRootVerifiedFrame atomic.Uint64
proverRootSynced atomic.Bool
lastProposalFrameNumber atomic.Uint64
lastFrameMessageFrameNumber atomic.Uint64
// Message queues
globalConsensusMessageQueue chan *pb.Message
@ -159,6 +169,8 @@ type GlobalConsensusEngine struct {
blacklistMu sync.RWMutex
messageCollectors *keyedaggregator.SequencedCollectors[sequencedGlobalMessage]
messageAggregator *keyedaggregator.SequencedAggregator[sequencedGlobalMessage]
globalMessageSpillover map[uint64][][]byte
globalSpilloverMu sync.Mutex
currentDifficulty uint32
currentDifficultyMu sync.RWMutex
lastProvenFrameTime time.Time
@ -305,6 +317,7 @@ func NewGlobalConsensusEngine(
alertPublicKey: []byte{},
txLockMap: make(map[uint64]map[string]map[string]*LockedTransaction),
appShardCache: make(map[string]*appShardCacheEntry),
globalMessageSpillover: make(map[uint64][][]byte),
}
if err := engine.initGlobalMessageAggregator(); err != nil {
@ -722,6 +735,27 @@ func NewGlobalConsensusEngine(
if err != nil || len(as) == 0 {
engine.initializeGenesis()
}
engine.syncProvider = qsync.NewSyncProvider[
*protobufs.GlobalFrame,
*protobufs.GlobalProposal,
](
logger,
nil,
proverRegistry,
signerRegistry,
peerInfoManager,
qsync.NewGlobalSyncClient(
frameProver,
blsConstructor,
engine,
config,
),
hypergraph,
config,
nil,
engine.proverAddress,
)
}
componentBuilder.AddWorker(engine.peerInfoManager.Start)
@ -1124,6 +1158,32 @@ func (e *GlobalConsensusEngine) GetPeerInfo() *protobufs.PeerInfo {
// Observed addresses are what other peers have told us they see us as
ownAddrs := e.pubsub.GetOwnMultiaddrs()
archiveMode := e.config.Engine != nil && e.config.Engine.ArchiveMode
var lastReceivedFrame uint64
if archiveMode {
lastReceivedFrame = e.lastProposalFrameNumber.Load()
} else {
lastReceivedFrame = e.lastFrameMessageFrameNumber.Load()
}
var lastGlobalHeadFrame uint64
if archiveMode {
if e.clockStore != nil {
if frame, err := e.clockStore.GetLatestGlobalClockFrame(); err == nil &&
frame != nil &&
frame.Header != nil {
lastGlobalHeadFrame = frame.Header.FrameNumber
}
}
} else if e.globalTimeReel != nil {
if frame, err := e.globalTimeReel.GetHead(); err == nil &&
frame != nil &&
frame.Header != nil {
lastGlobalHeadFrame = frame.Header.FrameNumber
}
}
// Get supported capabilities from execution manager
capabilities := e.executionManager.GetSupportedCapabilities()
@ -1132,25 +1192,47 @@ func (e *GlobalConsensusEngine) GetPeerInfo() *protobufs.PeerInfo {
// master node process:
{
var pubsubAddrs, streamAddrs []string
if e.config.Engine.EnableMasterProxy {
pubsubAddrs = e.findObservedAddressesForProxy(
ownAddrs,
e.config.P2P.ListenMultiaddr,
e.config.P2P.ListenMultiaddr,
)
if e.config.P2P.StreamListenMultiaddr != "" {
streamAddrs = e.buildStreamAddressesFromPubsub(
pubsubAddrs, e.config.P2P.StreamListenMultiaddr,
if e.config.P2P.AnnounceListenMultiaddr != "" {
if e.config.P2P.AnnounceStreamListenMultiaddr == "" {
e.logger.Error(
"p2p announce address is configured while stream announce " +
"address is not, please fix",
)
}
_, err := ma.StringCast(e.config.P2P.AnnounceListenMultiaddr)
if err == nil {
pubsubAddrs = append(pubsubAddrs, e.config.P2P.AnnounceListenMultiaddr)
}
if e.config.P2P.AnnounceStreamListenMultiaddr != "" {
_, err = ma.StringCast(e.config.P2P.AnnounceStreamListenMultiaddr)
if err == nil {
streamAddrs = append(
streamAddrs,
e.config.P2P.AnnounceStreamListenMultiaddr,
)
}
}
} else {
pubsubAddrs = e.findObservedAddressesForConfig(
ownAddrs, e.config.P2P.ListenMultiaddr,
)
if e.config.P2P.StreamListenMultiaddr != "" {
streamAddrs = e.buildStreamAddressesFromPubsub(
pubsubAddrs, e.config.P2P.StreamListenMultiaddr,
if e.config.Engine.EnableMasterProxy {
pubsubAddrs = e.findObservedAddressesForProxy(
ownAddrs,
e.config.P2P.ListenMultiaddr,
e.config.P2P.ListenMultiaddr,
)
if e.config.P2P.StreamListenMultiaddr != "" {
streamAddrs = e.buildStreamAddressesFromPubsub(
pubsubAddrs, e.config.P2P.StreamListenMultiaddr,
)
}
} else {
pubsubAddrs = e.findObservedAddressesForConfig(
ownAddrs, e.config.P2P.ListenMultiaddr,
)
if e.config.P2P.StreamListenMultiaddr != "" {
streamAddrs = e.buildStreamAddressesFromPubsub(
pubsubAddrs, e.config.P2P.StreamListenMultiaddr,
)
}
}
}
reachability = append(reachability, &protobufs.Reachability{
@ -1162,34 +1244,33 @@ func (e *GlobalConsensusEngine) GetPeerInfo() *protobufs.PeerInfo {
// worker processes
{
announceP2P, announceStream, ok := e.workerAnnounceAddrs()
p2pPatterns, streamPatterns, filters := e.workerPatterns()
for i := range p2pPatterns {
if p2pPatterns[i] == "" {
continue
}
// find observed P2P addrs for this worker
// (prefer public > local/reserved)
pubsubAddrs := e.findObservedAddressesForConfig(ownAddrs, p2pPatterns[i])
var pubsubAddrs []string
if ok && i < len(announceP2P) && announceP2P[i] != "" {
pubsubAddrs = append(pubsubAddrs, announceP2P[i])
} else {
pubsubAddrs = e.findObservedAddressesForConfig(ownAddrs, p2pPatterns[i])
}
// stream pattern: explicit for this worker or synthesized from P2P IPs
var streamAddrs []string
if i < len(streamPatterns) && streamPatterns[i] != "" {
// Build using the declared worker stream patterns port/protocols.
// Reuse the pubsub IPs so P2P/stream align on the same interface.
if ok && i < len(announceStream) && announceStream[i] != "" {
streamAddrs = append(streamAddrs, announceStream[i])
} else if i < len(streamPatterns) && streamPatterns[i] != "" {
streamAddrs = e.buildStreamAddressesFromPubsub(
pubsubAddrs,
streamPatterns[i],
)
} else {
// No explicit worker stream pattern; if master stream is set, use its
// structure
if e.config.P2P.StreamListenMultiaddr != "" {
streamAddrs = e.buildStreamAddressesFromPubsub(
pubsubAddrs,
e.config.P2P.StreamListenMultiaddr,
)
}
} else if e.config.P2P.StreamListenMultiaddr != "" {
streamAddrs = e.buildStreamAddressesFromPubsub(
pubsubAddrs,
e.config.P2P.StreamListenMultiaddr,
)
}
var filter []byte
@ -1203,7 +1284,6 @@ func (e *GlobalConsensusEngine) GetPeerInfo() *protobufs.PeerInfo {
}
}
// Only append a worker entry if we have at least one P2P addr and filter
if len(pubsubAddrs) > 0 && len(filter) != 0 {
reachability = append(reachability, &protobufs.Reachability{
Filter: filter,
@ -1216,13 +1296,15 @@ func (e *GlobalConsensusEngine) GetPeerInfo() *protobufs.PeerInfo {
// Create our peer info
ourInfo := &protobufs.PeerInfo{
PeerId: e.pubsub.GetPeerID(),
Reachability: reachability,
Timestamp: time.Now().UnixMilli(),
Version: config.GetVersion(),
PatchNumber: []byte{config.GetPatchNumber()},
Capabilities: capabilities,
PublicKey: e.pubsub.GetPublicKey(),
PeerId: e.pubsub.GetPeerID(),
Reachability: reachability,
Timestamp: time.Now().UnixMilli(),
Version: config.GetVersion(),
PatchNumber: []byte{config.GetPatchNumber()},
Capabilities: capabilities,
PublicKey: e.pubsub.GetPublicKey(),
LastReceivedFrame: lastReceivedFrame,
LastGlobalHeadFrame: lastGlobalHeadFrame,
}
// Sign the peer info
@ -1236,6 +1318,26 @@ func (e *GlobalConsensusEngine) GetPeerInfo() *protobufs.PeerInfo {
return ourInfo
}
func (e *GlobalConsensusEngine) recordProposalFrameNumber(
frameNumber uint64,
) {
e.lastProposalFrameNumber.Store(frameNumber)
}
func (e *GlobalConsensusEngine) recordFrameMessageFrameNumber(
frameNumber uint64,
) {
for {
current := e.lastFrameMessageFrameNumber.Load()
if frameNumber <= current {
return
}
if e.lastFrameMessageFrameNumber.CompareAndSwap(current, frameNumber) {
return
}
}
}
func (e *GlobalConsensusEngine) GetWorkerManager() worker.WorkerManager {
return e.workerManager
}
@ -1316,11 +1418,91 @@ func (e *GlobalConsensusEngine) workerPatterns() (
return p2p, stream, filters
}
func (e *GlobalConsensusEngine) workerAnnounceAddrs() (
[]string,
[]string,
bool,
) {
ec := e.config.Engine
if ec == nil {
return nil, nil, false
}
count := ec.DataWorkerCount
if count <= 0 {
return nil, nil, false
}
if len(ec.DataWorkerAnnounceP2PMultiaddrs) == 0 &&
len(ec.DataWorkerAnnounceStreamMultiaddrs) == 0 {
return nil, nil, false
}
if len(ec.DataWorkerAnnounceP2PMultiaddrs) !=
len(ec.DataWorkerAnnounceStreamMultiaddrs) ||
len(ec.DataWorkerAnnounceP2PMultiaddrs) != count {
e.logger.Error(
"data worker announce multiaddr counts do not match",
zap.Int("announce_p2p", len(ec.DataWorkerAnnounceP2PMultiaddrs)),
zap.Int("announce_stream", len(ec.DataWorkerAnnounceStreamMultiaddrs)),
zap.Int("worker_count", count),
)
return nil, nil, false
}
p2p := make([]string, count)
stream := make([]string, count)
valid := true
for i := 0; i < count; i++ {
p := ec.DataWorkerAnnounceP2PMultiaddrs[i]
s := ec.DataWorkerAnnounceStreamMultiaddrs[i]
if p == "" || s == "" {
valid = false
break
}
if _, err := ma.StringCast(p); err != nil {
e.logger.Error(
"invalid worker announce p2p multiaddr",
zap.Int("index", i),
zap.Error(err),
)
valid = false
break
}
if _, err := ma.StringCast(s); err != nil {
e.logger.Error(
"invalid worker announce stream multiaddr",
zap.Int("index", i),
zap.Error(err),
)
valid = false
break
}
p2p[i] = p
stream[i] = s
}
if !valid {
return nil, nil, false
}
return p2p, stream, true
}
func (e *GlobalConsensusEngine) materialize(
txn store.Transaction,
frameNumber uint64,
requests []*protobufs.MessageBundle,
frame *protobufs.GlobalFrame,
) error {
frameNumber := frame.Header.FrameNumber
requests := frame.Requests
expectedProverRoot := frame.Header.ProverTreeCommitment
proposer := frame.Header.Prover
_, err := e.hypergraph.Commit(frameNumber)
if err != nil {
e.logger.Error("error committing hypergraph", zap.Error(err))
return errors.Wrap(err, "materialize")
}
var state state.State
state = hgstate.NewHypergraphState(e.hypergraph)
@ -1328,83 +1510,423 @@ func (e *GlobalConsensusEngine) materialize(
"materializing messages",
zap.Int("message_count", len(requests)),
)
worldSize := e.hypergraph.GetSize(nil, nil).Uint64()
e.currentDifficultyMu.RLock()
difficulty := uint64(e.currentDifficulty)
e.currentDifficultyMu.RUnlock()
eg := errgroup.Group{}
eg.SetLimit(len(requests))
for i, request := range requests {
requestBytes, err := request.ToCanonicalBytes()
eg.Go(func() error {
requestBytes, err := request.ToCanonicalBytes()
if err != nil {
e.logger.Error(
"error serializing request",
zap.Int("message_index", i),
zap.Error(err),
if err != nil {
e.logger.Error(
"error serializing request",
zap.Int("message_index", i),
zap.Error(err),
)
return errors.Wrap(err, "materialize")
}
if len(requestBytes) == 0 {
e.logger.Error(
"empty request bytes",
zap.Int("message_index", i),
)
return errors.Wrap(errors.New("empty request"), "materialize")
}
costBasis, err := e.executionManager.GetCost(requestBytes)
if err != nil {
e.logger.Error(
"invalid message",
zap.Int("message_index", i),
zap.Error(err),
)
return nil
}
var baseline *big.Int
if costBasis.Cmp(big.NewInt(0)) == 0 {
baseline = big.NewInt(0)
} else {
baseline = reward.GetBaselineFee(
difficulty,
worldSize,
costBasis.Uint64(),
8000000000,
)
baseline.Quo(baseline, costBasis)
}
_, err = e.executionManager.ProcessMessage(
frameNumber,
baseline,
bytes.Repeat([]byte{0xff}, 32),
requestBytes,
state,
)
return errors.Wrap(err, "materialize")
}
if err != nil {
e.logger.Error(
"error processing message",
zap.Int("message_index", i),
zap.Error(err),
)
return nil
}
if len(requestBytes) == 0 {
e.logger.Error(
"empty request bytes",
zap.Int("message_index", i),
)
return errors.Wrap(errors.New("empty request"), "materialize")
}
return nil
})
}
costBasis, err := e.executionManager.GetCost(requestBytes)
if err != nil {
e.logger.Error(
"invalid message",
zap.Int("message_index", i),
zap.Error(err),
)
continue
}
e.currentDifficultyMu.RLock()
difficulty := uint64(e.currentDifficulty)
e.currentDifficultyMu.RUnlock()
var baseline *big.Int
if costBasis.Cmp(big.NewInt(0)) == 0 {
baseline = big.NewInt(0)
} else {
baseline = reward.GetBaselineFee(
difficulty,
e.hypergraph.GetSize(nil, nil).Uint64(),
costBasis.Uint64(),
8000000000,
)
baseline.Quo(baseline, costBasis)
}
result, err := e.executionManager.ProcessMessage(
frameNumber,
baseline,
bytes.Repeat([]byte{0xff}, 32),
requestBytes,
state,
)
if err != nil {
e.logger.Error(
"error processing message",
zap.Int("message_index", i),
zap.Error(err),
)
continue
}
state = result.State
if err := eg.Wait(); err != nil {
return err
}
if err := state.Commit(); err != nil {
return errors.Wrap(err, "materialize")
}
err := e.proverRegistry.ProcessStateTransition(state, frameNumber)
err = e.proverRegistry.ProcessStateTransition(state, frameNumber)
if err != nil {
return errors.Wrap(err, "materialize")
}
if e.verifyProverRoot(frameNumber, expectedProverRoot, proposer) {
e.reconcileLocalWorkerAllocations()
}
return nil
}
func (e *GlobalConsensusEngine) verifyProverRoot(
frameNumber uint64,
expected []byte,
proposer []byte,
) bool {
if len(expected) == 0 || e.hypergraph == nil {
return true
}
roots, err := e.hypergraph.GetShardCommits(
frameNumber,
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
)
if err != nil || len(roots) == 0 || len(roots[0]) == 0 {
if err != nil {
e.logger.Warn(
"failed to load local prover root",
zap.Uint64("frame_number", frameNumber),
zap.Error(err),
)
} else {
e.logger.Warn(
"local prover root missing",
zap.Uint64("frame_number", frameNumber),
)
}
return false
}
localRoot := roots[0]
if !bytes.Equal(localRoot, expected) {
e.logger.Debug(
"prover root mismatch",
zap.Uint64("frame_number", frameNumber),
zap.String("expected_root", hex.EncodeToString(expected)),
zap.String("local_root", hex.EncodeToString(localRoot)),
)
e.proverRootSynced.Store(false)
e.proverRootVerifiedFrame.Store(0)
e.triggerProverHypersync(proposer)
return false
}
e.proverRootSynced.Store(true)
e.proverRootVerifiedFrame.Store(frameNumber)
return true
}
func (e *GlobalConsensusEngine) triggerProverHypersync(proposer []byte) {
if e.syncProvider == nil || len(proposer) == 0 {
e.logger.Debug("no sync provider or proposer")
return
}
if bytes.Equal(proposer, e.getProverAddress()) {
e.logger.Debug("we are the proposer")
return
}
if !e.proverSyncInProgress.CompareAndSwap(false, true) {
e.logger.Debug("already syncing")
return
}
go func() {
defer e.proverSyncInProgress.Store(false)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
shardKey := tries.ShardKey{
L1: [3]byte{0x00, 0x00, 0x00},
L2: intrinsics.GLOBAL_INTRINSIC_ADDRESS,
}
e.syncProvider.HyperSync(ctx, proposer, shardKey)
if err := e.proverRegistry.Refresh(); err != nil {
e.logger.Warn(
"failed to refresh prover registry after hypersync",
zap.Error(err),
)
}
}()
}
func (e *GlobalConsensusEngine) reconcileLocalWorkerAllocations() {
if e.workerManager == nil || e.proverRegistry == nil {
return
}
workers, err := e.workerManager.RangeWorkers()
if err != nil || len(workers) == 0 {
if err != nil {
e.logger.Warn(
"failed to range workers for reconciliation",
zap.Error(err),
)
}
return
}
info, err := e.proverRegistry.GetProverInfo(e.getProverAddress())
if err != nil || info == nil {
if err != nil {
e.logger.Warn(
"failed to load prover info for reconciliation",
zap.Error(err),
)
}
return
}
statusByFilter := make(
map[string]typesconsensus.ProverStatus,
len(info.Allocations),
)
for _, alloc := range info.Allocations {
if len(alloc.ConfirmationFilter) == 0 {
continue
}
statusByFilter[hex.EncodeToString(alloc.ConfirmationFilter)] = alloc.Status
}
for _, worker := range workers {
if len(worker.Filter) == 0 {
continue
}
key := hex.EncodeToString(worker.Filter)
status, ok := statusByFilter[key]
if !ok {
if worker.Allocated {
if err := e.workerManager.DeallocateWorker(worker.CoreId); err != nil {
e.logger.Warn(
"failed to deallocate worker for missing allocation",
zap.Uint("core_id", worker.CoreId),
zap.Error(err),
)
}
}
continue
}
switch status {
case typesconsensus.ProverStatusActive:
if !worker.Allocated {
if err := e.workerManager.AllocateWorker(
worker.CoreId,
worker.Filter,
); err != nil {
e.logger.Warn(
"failed to allocate worker after confirmation",
zap.Uint("core_id", worker.CoreId),
zap.Error(err),
)
}
}
case typesconsensus.ProverStatusLeaving,
typesconsensus.ProverStatusRejected,
typesconsensus.ProverStatusKicked:
if worker.Allocated {
if err := e.workerManager.DeallocateWorker(worker.CoreId); err != nil {
e.logger.Warn(
"failed to deallocate worker after status change",
zap.Uint("core_id", worker.CoreId),
zap.Error(err),
)
}
}
}
}
}
func (e *GlobalConsensusEngine) joinProposalReady(
frameNumber uint64,
) (bool, string) {
if e.lastObservedFrame.Load() == 0 {
e.logger.Debug("join proposal blocked: no observed frame")
return false, "awaiting initial frame"
}
if !e.proverRootSynced.Load() {
e.logger.Debug("join proposal blocked: prover root not synced")
return false, "awaiting prover root sync"
}
verified := e.proverRootVerifiedFrame.Load()
if verified == 0 || verified < frameNumber {
e.logger.Debug(
"join proposal blocked: frame not verified",
zap.Uint64("verified_frame", verified),
zap.Uint64("current_frame", frameNumber),
)
return false, "latest frame not yet verified"
}
lastAttempt := e.lastJoinAttemptFrame.Load()
if lastAttempt != 0 {
if frameNumber <= lastAttempt {
e.logger.Debug(
"join proposal blocked: waiting for newer frame",
zap.Uint64("last_attempt", lastAttempt),
zap.Uint64("current_frame", frameNumber),
)
return false, "waiting for newer frame"
}
if frameNumber-lastAttempt < 4 {
e.logger.Debug(
"join proposal blocked: cooling down between attempts",
zap.Uint64("last_attempt", lastAttempt),
zap.Uint64("current_frame", frameNumber),
)
return false, "cooldown between join attempts"
}
}
return true, ""
}
func (e *GlobalConsensusEngine) selectExcessPendingFilters(
self *typesconsensus.ProverInfo,
) [][]byte {
if self == nil || e.config == nil || e.config.Engine == nil {
e.logger.Debug("excess pending evaluation skipped: missing config or prover info")
return nil
}
capacity := e.config.Engine.DataWorkerCount
if capacity <= 0 {
return nil
}
active := 0
pending := make([][]byte, 0, len(self.Allocations))
for _, allocation := range self.Allocations {
if len(allocation.ConfirmationFilter) == 0 {
continue
}
switch allocation.Status {
case typesconsensus.ProverStatusActive:
active++
case typesconsensus.ProverStatusJoining:
filterCopy := make([]byte, len(allocation.ConfirmationFilter))
copy(filterCopy, allocation.ConfirmationFilter)
pending = append(pending, filterCopy)
}
}
allowedPending := capacity - active
if allowedPending < 0 {
allowedPending = 0
}
if len(pending) <= allowedPending {
e.logger.Debug(
"pending joins within limit",
zap.Int("active_allocations", active),
zap.Int("pending_allocations", len(pending)),
zap.Int("capacity", capacity),
)
return nil
}
excess := len(pending) - allowedPending
e.logger.Debug(
"pending joins exceed limit",
zap.Int("active_allocations", active),
zap.Int("pending_allocations", len(pending)),
zap.Int("capacity", capacity),
zap.Int("excess", excess),
)
rand.Shuffle(len(pending), func(i, j int) {
pending[i], pending[j] = pending[j], pending[i]
})
return pending[:excess]
}
func (e *GlobalConsensusEngine) rejectExcessPending(
filters [][]byte,
frameNumber uint64,
) {
if e.workerManager == nil || len(filters) == 0 {
return
}
last := e.lastRejectFrame.Load()
if last != 0 {
if frameNumber <= last {
e.logger.Debug(
"forced rejection skipped: awaiting newer frame",
zap.Uint64("last_reject_frame", last),
zap.Uint64("current_frame", frameNumber),
)
return
}
if frameNumber-last < 4 {
e.logger.Debug(
"deferring forced join rejections",
zap.Uint64("frame_number", frameNumber),
zap.Uint64("last_reject_frame", last),
)
return
}
}
limit := len(filters)
if limit > 100 {
limit = 100
}
rejects := make([][]byte, limit)
for i := 0; i < limit; i++ {
rejects[i] = filters[i]
}
if err := e.workerManager.DecideAllocations(rejects, nil); err != nil {
e.logger.Warn("failed to reject excess joins", zap.Error(err))
return
}
e.lastRejectFrame.Store(frameNumber)
e.logger.Info(
"submitted forced join rejections",
zap.Int("rejections", len(rejects)),
zap.Uint64("frame_number", frameNumber),
)
}
func (e *GlobalConsensusEngine) revert(
txn store.Transaction,
frameNumber uint64,
@ -1924,13 +2446,15 @@ func (e *GlobalConsensusEngine) signPeerInfo(
) ([]byte, error) {
// Create a copy of the peer info without the signature for signing
infoCopy := &protobufs.PeerInfo{
PeerId: info.PeerId,
Reachability: info.Reachability,
Timestamp: info.Timestamp,
Version: info.Version,
PatchNumber: info.PatchNumber,
Capabilities: info.Capabilities,
PublicKey: info.PublicKey,
PeerId: info.PeerId,
Reachability: info.Reachability,
Timestamp: info.Timestamp,
Version: info.Version,
PatchNumber: info.PatchNumber,
Capabilities: info.Capabilities,
PublicKey: info.PublicKey,
LastReceivedFrame: info.LastReceivedFrame,
LastGlobalHeadFrame: info.LastGlobalHeadFrame,
// Exclude Signature field
}
@ -2138,8 +2662,12 @@ func (e *GlobalConsensusEngine) runNodeHealthCheck() {
"latest frame is older than 60 seconds; node may still be synchronizing",
append(
baseFields,
zap.Uint64(
"latest_frame_received",
e.lastFrameMessageFrameNumber.Load(),
),
zap.Uint64("head_frame_number", headFrame.Header.FrameNumber),
zap.Time("head_frame_time", headTime),
zap.String("head_frame_time", headTime.String()),
)...,
)
return
@ -2158,8 +2686,12 @@ func (e *GlobalConsensusEngine) runNodeHealthCheck() {
"node health check passed",
append(
baseFields,
zap.Uint64(
"latest_frame_received",
e.lastFrameMessageFrameNumber.Load(),
),
zap.Uint64("head_frame_number", headFrame.Header.FrameNumber),
zap.Time("head_frame_time", headTime),
zap.String("head_frame_time", headTime.String()),
zap.String("unminted_reward_quil", readable),
zap.String("unminted_reward_raw_units", units.String()),
)...,
@ -2235,13 +2767,15 @@ func (e *GlobalConsensusEngine) validatePeerInfoSignature(
// Create a copy of the peer info without the signature for validation
infoCopy := &protobufs.PeerInfo{
PeerId: peerInfo.PeerId,
Reachability: peerInfo.Reachability,
Timestamp: peerInfo.Timestamp,
Version: peerInfo.Version,
PatchNumber: peerInfo.PatchNumber,
Capabilities: peerInfo.Capabilities,
PublicKey: peerInfo.PublicKey,
PeerId: peerInfo.PeerId,
Reachability: peerInfo.Reachability,
Timestamp: peerInfo.Timestamp,
Version: peerInfo.Version,
PatchNumber: peerInfo.PatchNumber,
Capabilities: peerInfo.Capabilities,
PublicKey: peerInfo.PublicKey,
LastReceivedFrame: peerInfo.LastReceivedFrame,
LastGlobalHeadFrame: peerInfo.LastGlobalHeadFrame,
// Exclude Signature field
}
@ -2720,10 +3254,7 @@ func (e *GlobalConsensusEngine) startConsensus(
e.voteCollectorDistributor, // voteCollectorDistributor
e.timeoutCollectorDistributor, // timeoutCollectorDistributor
e.forks, // forks
validator.NewValidator[
*protobufs.GlobalFrame,
*protobufs.ProposalVote,
](e, e), // validator
validator.NewValidator[*protobufs.GlobalFrame](e, e), // validator
e.voteAggregator, // voteAggregator
e.timeoutAggregator, // timeoutAggregator
e, // finalizer
@ -3147,8 +3678,7 @@ func (e *GlobalConsensusEngine) OnQuorumCertificateTriggeredRankChange(
if err := e.materialize(
txn,
frame.Header.FrameNumber,
frame.Requests,
frame,
); err != nil {
_ = txn.Abort()
e.logger.Error("could not materialize frame requests", zap.Error(err))

View File

@ -78,6 +78,11 @@ type mockIntegrationPubSub struct {
underlyingBlossomSub *qp2p.BlossomSub
}
// Close implements p2p.PubSub.
func (m *mockIntegrationPubSub) Close() error {
panic("unimplemented")
}
// GetOwnMultiaddrs implements p2p.PubSub.
func (m *mockIntegrationPubSub) GetOwnMultiaddrs() []multiaddr.Multiaddr {
ma, _ := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/8336")

View File

@ -7,6 +7,7 @@ import (
"fmt"
"slices"
"go.uber.org/zap"
"golang.org/x/crypto/sha3"
"source.quilibrium.com/quilibrium/monorepo/consensus/models"
@ -144,6 +145,7 @@ func (p *globalMessageProcessor) enforceCollectorLimit(
if len(collector.Records()) >= maxGlobalMessagesPerFrame {
collector.Remove(record)
p.engine.deferGlobalMessage(record.sequence+1, record.payload)
return keyedcollector.NewInvalidRecordError(
record,
fmt.Errorf("message limit reached for frame %d", p.sequence),
@ -166,13 +168,13 @@ func (e *GlobalConsensusEngine) initGlobalMessageAggregator() error {
return fmt.Errorf("global message collector factory: %w", err)
}
e.messageCollectors = keyedaggregator.NewSequencedCollectors[sequencedGlobalMessage](
e.messageCollectors = keyedaggregator.NewSequencedCollectors(
tracer,
0,
collectorFactory,
)
aggregator, err := keyedaggregator.NewSequencedAggregator[sequencedGlobalMessage](
aggregator, err := keyedaggregator.NewSequencedAggregator(
tracer,
0,
e.messageCollectors,
@ -228,3 +230,60 @@ func (e *GlobalConsensusEngine) getMessageCollector(
}
return e.messageCollectors.GetCollector(rank)
}
func (e *GlobalConsensusEngine) deferGlobalMessage(
targetRank uint64,
payload []byte,
) {
if e == nil || len(payload) == 0 || targetRank == 0 {
return
}
cloned := slices.Clone(payload)
e.globalSpilloverMu.Lock()
e.globalMessageSpillover[targetRank] = append(
e.globalMessageSpillover[targetRank],
cloned,
)
pending := len(e.globalMessageSpillover[targetRank])
e.globalSpilloverMu.Unlock()
if e.logger != nil {
e.logger.Debug(
"deferred global message due to collector limit",
zap.Uint64("target_rank", targetRank),
zap.Int("pending", pending),
)
}
}
func (e *GlobalConsensusEngine) flushDeferredGlobalMessages(targetRank uint64) {
if e == nil || e.messageAggregator == nil || targetRank == 0 {
return
}
e.globalSpilloverMu.Lock()
payloads := e.globalMessageSpillover[targetRank]
if len(payloads) > 0 {
delete(e.globalMessageSpillover, targetRank)
}
e.globalSpilloverMu.Unlock()
if len(payloads) == 0 {
return
}
for _, payload := range payloads {
e.messageAggregator.Add(
newSequencedGlobalMessage(targetRank, payload),
)
}
if e.logger != nil {
e.logger.Debug(
"replayed deferred global messages",
zap.Uint64("target_rank", targetRank),
zap.Int("count", len(payloads)),
)
}
}

View File

@ -8,6 +8,7 @@ import (
"encoding/hex"
"fmt"
"slices"
"time"
"github.com/iden3/go-iden3-crypto/poseidon"
pcrypto "github.com/libp2p/go-libp2p/core/crypto"
@ -262,6 +263,10 @@ func (e *GlobalConsensusEngine) handleFrameMessage(
return
}
if frame.Header != nil {
e.recordFrameMessageFrameNumber(frame.Header.FrameNumber)
}
frameIDBI, _ := poseidon.HashBytes(frame.Header.Output)
frameID := frameIDBI.FillBytes(make([]byte, 32))
e.frameStoreMu.Lock()
@ -336,7 +341,7 @@ func (e *GlobalConsensusEngine) handleAppFrameMessage(message *pb.Message) {
bundle := &protobufs.MessageBundle{
Requests: []*protobufs.MessageRequest{
&protobufs.MessageRequest{
{
Request: &protobufs.MessageRequest_Shard{
Shard: frame.Header,
},
@ -386,6 +391,8 @@ func (e *GlobalConsensusEngine) handlePeerInfoMessage(message *pb.Message) {
}
if e.isDuplicatePeerInfo(peerInfo) {
e.peerInfoManager.GetPeerInfo(peerInfo.PeerId).LastSeen =
time.Now().UnixMilli()
return
}
@ -1344,11 +1351,7 @@ func (e *GlobalConsensusEngine) addCertifiedState(
return
}
if err := e.materialize(
txn,
parent.State.Header.FrameNumber,
parent.State.Requests,
); err != nil {
if err := e.materialize(txn, parent.State); err != nil {
_ = txn.Abort()
e.logger.Error("could not materialize frame requests", zap.Error(err))
return
@ -1396,6 +1399,10 @@ func (e *GlobalConsensusEngine) handleProposal(message *pb.Message) {
return
}
if proposal.State != nil && proposal.State.Header != nil {
e.recordProposalFrameNumber(proposal.State.Header.FrameNumber)
}
frameIDBI, _ := poseidon.HashBytes(proposal.State.Header.Output)
frameID := frameIDBI.FillBytes(make([]byte, 32))
e.frameStoreMu.Lock()
@ -1886,24 +1893,3 @@ func (e *GlobalConsensusEngine) peekMessageType(message *pb.Message) uint32 {
// Read type prefix from first 4 bytes
return binary.BigEndian.Uint32(message.Data[:4])
}
func compareBits(b1, b2 []byte) int {
bitCount1 := 0
bitCount2 := 0
for i := 0; i < len(b1); i++ {
for bit := 0; bit < 8; bit++ {
if b1[i]&(1<<bit) != 0 {
bitCount1++
}
}
}
for i := 0; i < len(b2); i++ {
for bit := 0; bit < 8; bit++ {
if b2[i]&(1<<bit) != 0 {
bitCount2++
}
}
}
return bitCount1 - bitCount2
}

View File

@ -288,7 +288,7 @@ func (e *GlobalConsensusEngine) validateProverMessage(
) tp2p.ValidationResult {
e.logger.Debug(
"validating prover message from peer",
zap.String("peer_id", peerID.String()),
zap.String("peer_id", peer.ID(message.From).String()),
)
// Check if data is long enough to contain type prefix
if len(message.Data) < 4 {
@ -307,7 +307,7 @@ func (e *GlobalConsensusEngine) validateProverMessage(
case protobufs.MessageBundleType:
e.logger.Debug(
"validating message bundle from peer",
zap.String("peer_id", peerID.String()),
zap.String("peer_id", peer.ID(message.From).String()),
)
// Prover messages come wrapped in MessageBundle
messageBundle := &protobufs.MessageBundle{}

View File

@ -87,7 +87,7 @@ func NewManager(
}
// PlanAndAllocate picks up to maxAllocations of the best shard filters and
// calls WorkerManager.AllocateWorker for each selected free worker.
// updates the filter in the worker manager for each selected free worker.
// If maxAllocations == 0, it will use as many free workers as available.
func (m *Manager) PlanAndAllocate(
difficulty uint64,
@ -123,7 +123,7 @@ func (m *Manager) PlanAndAllocate(
}
free := make([]uint, 0, len(all))
for _, w := range all {
if !w.Allocated {
if len(w.Filter) == 0 {
free = append(free, w.CoreId)
}
}
@ -231,6 +231,15 @@ func (m *Manager) PlanAndAllocate(
})
}
workerLookup := make(map[uint]*store.WorkerInfo, len(all))
for _, w := range all {
workerLookup[w.CoreId] = w
}
if len(proposals) > 0 {
m.persistPlannedFilters(proposals, workerLookup)
}
// Perform allocations
workerIds := []uint{}
filters := [][]byte{}
@ -251,6 +260,45 @@ func (m *Manager) PlanAndAllocate(
return proposals, errors.Wrap(err, "plan and allocate")
}
func (m *Manager) persistPlannedFilters(
proposals []Proposal,
workers map[uint]*store.WorkerInfo,
) {
for _, proposal := range proposals {
info, ok := workers[proposal.WorkerId]
if !ok {
var err error
info, err = m.store.GetWorker(proposal.WorkerId)
if err != nil {
m.logger.Warn(
"failed to load worker for planned allocation",
zap.Uint("core_id", proposal.WorkerId),
zap.Error(err),
)
continue
}
workers[proposal.WorkerId] = info
}
if bytes.Equal(info.Filter, proposal.Filter) {
continue
}
filterCopy := make([]byte, len(proposal.Filter))
copy(filterCopy, proposal.Filter)
info.Filter = filterCopy
info.Allocated = false
if err := m.workerMgr.RegisterWorker(info); err != nil {
m.logger.Warn(
"failed to persist worker filter",
zap.Uint("core_id", info.CoreId),
zap.Error(err),
)
}
}
}
func (m *Manager) scoreShards(
shards []ShardDescriptor,
basis *big.Int,
@ -296,6 +344,9 @@ func (m *Manager) scoreShards(
if shardsSqrt.IsZero() {
return nil, errors.New("score shards")
}
if ringDiv.IsZero() {
return nil, errors.New("score shards")
}
factor = factor.Div(ringDiv)
factor = factor.Div(shardsSqrt)
@ -322,6 +373,11 @@ func (m *Manager) DecideJoins(
return nil
}
availableWorkers, err := m.unallocatedWorkerCount()
if err != nil {
return errors.Wrap(err, "decide joins")
}
// If no shards remain, we should warn
if len(shards) == 0 {
m.logger.Warn("no shards available to decide")
@ -396,5 +452,38 @@ func (m *Manager) DecideJoins(
}
}
if availableWorkers == 0 && len(confirm) > 0 {
m.logger.Info(
"skipping confirmations due to lack of available workers",
zap.Int("pending_confirmations", len(confirm)),
)
confirm = nil
} else if availableWorkers > 0 && len(confirm) > availableWorkers {
m.logger.Warn(
"limiting confirmations due to worker capacity",
zap.Int("pending_confirmations", len(confirm)),
zap.Int("available_workers", availableWorkers),
)
confirm = confirm[:availableWorkers]
}
return m.workerMgr.DecideAllocations(reject, confirm)
}
func (m *Manager) unallocatedWorkerCount() (int, error) {
workers, err := m.workerMgr.RangeWorkers()
if err != nil {
return 0, err
}
count := 0
for _, worker := range workers {
if worker == nil {
continue
}
if !worker.Allocated {
count++
}
}
return count, nil
}

View File

@ -21,6 +21,11 @@ type mockWorkerManager struct {
confirmed [][]byte
}
// CheckWorkersConnected implements worker.WorkerManager.
func (m *mockWorkerManager) CheckWorkersConnected() ([]uint, error) {
panic("unimplemented")
}
func (m *mockWorkerManager) DecideAllocations(reject [][]byte, confirm [][]byte) error {
m.rejected = reject
m.confirmed = confirm
@ -44,7 +49,14 @@ func (m *mockWorkerManager) GetWorkerIdByFilter(filter []byte) (uint, error) {
}
func (m *mockWorkerManager) RegisterWorker(info *store.WorkerInfo) error {
panic("unimplemented")
for i, worker := range m.workers {
if worker.CoreId == info.CoreId {
m.workers[i] = info
return nil
}
}
m.workers = append(m.workers, info)
return nil
}
func (m *mockWorkerManager) Start(ctx context.Context) error {
@ -128,6 +140,11 @@ func TestPlanAndAllocate_EqualScores_RandomizedWhenNotDataGreedy(t *testing.T) {
t.Fatalf("expected one allocation, got %d", len(wm.lastFiltersHex))
}
firstPickCounts[wm.lastFiltersHex[0]]++
// Reset worker filter to simulate completion
for _, worker := range wm.workers {
worker.Filter = nil
}
}
distinct := 0

View File

@ -112,12 +112,6 @@ func (r *ProverRegistry) ProcessStateTransition(
change.Domain,
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
) {
r.logger.Debug(
"processing prover change",
zap.String("address", fmt.Sprintf("%x", change.Address)),
zap.Uint8("change_type", uint8(change.StateChange)),
)
if err := r.processProverChange(change, frameNumber); err != nil {
r.logger.Debug(
"failed to process prover change",
@ -680,12 +674,6 @@ func (r *ProverRegistry) extractGlobalState() error {
mappedStatus = consensus.ProverStatusUnknown
}
r.logger.Debug(
"processing prover vertex",
zap.String("address", fmt.Sprintf("%x", proverAddress)),
zap.Uint8("status", uint8(mappedStatus)),
)
// Extract available storage
var availableStorage uint64
storageBytes, err := r.rdfMultiprover.Get(
@ -1000,13 +988,6 @@ func (r *ProverRegistry) extractGlobalState() error {
}
}
r.logger.Debug(
"processing allocation vertex",
zap.String("prover_ref", fmt.Sprintf("%x", proverRef)),
zap.String("filter", fmt.Sprintf("%x", confirmationFilter)),
zap.Uint8("status", uint8(mappedStatus)),
)
// If allocation is active and we can identify them, add to
// filter-specific trie
if mappedStatus == consensus.ProverStatusActive &&
@ -1128,7 +1109,6 @@ func (r *ProverRegistry) processProverChange(
// Check if this is a Prover or ProverAllocation
switch t {
case "prover:Prover":
r.logger.Debug("processing prover change")
publicKey, err := r.rdfMultiprover.Get(
global.GLOBAL_RDF_SCHEMA,
"prover:Prover",
@ -1151,7 +1131,6 @@ func (r *ProverRegistry) processProverChange(
return nil // Skip if no status
}
status := statusBytes[0]
r.logger.Debug("status of prover change", zap.Int("status", int(status)))
// Map internal status to our ProverStatus enum
var mappedStatus consensus.ProverStatus
@ -1238,7 +1217,6 @@ func (r *ProverRegistry) processProverChange(
proverInfo.KickFrameNumber = kickFrameNumber
}
case "allocation:ProverAllocation":
r.logger.Debug("processing prover allocation change")
proverRef, err := r.rdfMultiprover.Get(
global.GLOBAL_RDF_SCHEMA,
"allocation:ProverAllocation",
@ -1282,12 +1260,6 @@ func (r *ProverRegistry) processProverChange(
mappedStatus = consensus.ProverStatusUnknown
}
r.logger.Debug(
"processing allocation update",
zap.String("prover_ref", fmt.Sprintf("%x", proverRef)),
zap.Uint8("status", uint8(mappedStatus)),
)
// Extract data
confirmationFilter, err := r.rdfMultiprover.Get(
global.GLOBAL_RDF_SCHEMA,
@ -1727,12 +1699,6 @@ func (r *ProverRegistry) GetAllActiveAppShardProvers() (
// Check if this prover has any active allocations (app shard provers)
hasActiveAllocation := false
for _, allocation := range proverInfo.Allocations {
r.logger.Debug(
"checking allocation status",
zap.String("address", hex.EncodeToString(proverInfo.Address)),
zap.String("filter", hex.EncodeToString(allocation.ConfirmationFilter)),
zap.Uint32("status", uint32(allocation.Status)),
)
if allocation.Status == consensus.ProverStatusActive &&
len(allocation.ConfirmationFilter) > 0 {
hasActiveAllocation = true

View File

@ -24,7 +24,7 @@ const (
// Default cache size for LRU
defaultGlobalCacheSize = 10000
// Maximum tree depth to prevent unbounded growth
maxGlobalTreeDepth = 360
maxGlobalTreeDepth = 10
)
// TimeReelEventType represents different types of events that can occur in a
@ -99,8 +99,7 @@ type GlobalTimeReel struct {
// Materialize side effects
materializeFunc func(
txn store.Transaction,
frameNumber uint64,
requests []*protobufs.MessageBundle,
frame *protobufs.GlobalFrame,
) error
// Revert side effects
@ -155,8 +154,7 @@ func NewGlobalTimeReel(
equivocators: make(map[uint64]map[int]bool),
materializeFunc: func(
txn store.Transaction,
frameNumber uint64,
requests []*protobufs.MessageBundle,
frame *protobufs.GlobalFrame,
) error {
return nil
},
@ -176,8 +174,7 @@ func NewGlobalTimeReel(
func (g *GlobalTimeReel) SetMaterializeFunc(
materializeFunc func(
txn store.Transaction,
frameNumber uint64,
requests []*protobufs.MessageBundle,
frame *protobufs.GlobalFrame,
) error,
) {
g.materializeFunc = materializeFunc
@ -1567,7 +1564,7 @@ func (g *GlobalTimeReel) bootstrapFromStore() error {
var start uint64
if !g.archiveMode && latestNum+1 > maxGlobalTreeDepth {
// Non-archive mode: only load last 360 frames
// Non-archive mode: only load last 10 frames
start = latestNum - (maxGlobalTreeDepth - 1)
} else {
// Archive mode or insufficient frames: load all available
@ -1656,7 +1653,7 @@ func (g *GlobalTimeReel) bootstrapFromStore() error {
if !g.archiveMode && g.root != nil {
g.logger.Info(
"non-archive mode: accepting last 360 frames as valid chain",
"non-archive mode: accepting last 10 frames as valid chain",
zap.Uint64("pseudo_root_frame", g.root.Frame.Header.FrameNumber),
zap.Uint64("head_frame", g.head.Frame.Header.FrameNumber),
)
@ -1682,11 +1679,7 @@ func (g *GlobalTimeReel) persistCanonicalFrames(
}
for _, f := range frames {
if err := g.materializeFunc(
txn,
f.Header.FrameNumber,
f.Requests,
); err != nil {
if err := g.materializeFunc(txn, f); err != nil {
_ = txn.Abort()
return errors.Wrap(err, "persist canonical frames")
}

View File

@ -106,10 +106,6 @@ func (b *BLSAppFrameValidator) Validate(
for i, prover := range provers {
if slices.Contains(bits, uint8(i)) {
info := prover
if err != nil {
b.logger.Error("could not get prover info", zap.Error(err))
return false, errors.Wrap(err, "validate")
}
activeProverSet = append(activeProverSet, info.PublicKey)
throwawaySet = append(throwawaySet, throwaway.Public().([]byte))
continue

View File

@ -102,10 +102,6 @@ func (b *BLSGlobalFrameValidator) Validate(
for i, prover := range provers {
if slices.Contains(bits, uint8(i)) {
info := prover
if err != nil {
b.logger.Error("could not get prover info", zap.Error(err))
return false, errors.Wrap(err, "validate")
}
activeProverSet = append(activeProverSet, info.PublicKey)
throwawaySet = append(
throwawaySet,

View File

@ -104,10 +104,10 @@ func NewAppShardTimeoutAggregator[PeerIDT models.Unique](
currentRank uint64,
) (consensus.TimeoutAggregator[*protobufs.ProposalVote], error) {
// initialize the Validator
validator := validator.NewValidator[
*protobufs.AppShardFrame,
*protobufs.ProposalVote,
](committee, consensusVerifier)
validator := validator.NewValidator[*protobufs.AppShardFrame](
committee,
consensusVerifier,
)
timeoutProcessorFactory := timeoutcollector.NewTimeoutProcessorFactory[
*protobufs.AppShardFrame,

View File

@ -593,9 +593,11 @@ func (e *ComputeExecutionEngine) handleDeploy(
frameNumber uint64,
feePaid *big.Int,
state state.State,
) (*execution.ProcessMessageResult, error) {
) (*execution.ProcessMessageResult, []byte, error) {
var deployAddress []byte
if len(payload) < 4 {
return nil, errors.Wrap(errors.New("invalid payload"), "handle deploy")
return nil, nil, errors.Wrap(errors.New("invalid payload"), "handle deploy")
}
deployType := binary.BigEndian.Uint32(payload[:4])
@ -604,7 +606,7 @@ func (e *ComputeExecutionEngine) handleDeploy(
args := protobufs.ComputeDeploy{}
err := args.FromCanonicalBytes(payload)
if err != nil {
return nil, errors.Wrap(err, "handle deploy")
return nil, nil, errors.Wrap(err, "handle deploy")
}
// Create configuration from deploy arguments
@ -626,11 +628,11 @@ func (e *ComputeExecutionEngine) handleDeploy(
e.compiler,
)
if err != nil {
return nil, errors.Wrap(err, "handle deploy")
return nil, nil, errors.Wrap(err, "handle deploy")
}
// Deploy the intrinsic
state, err = intrinsic.Deploy(
state, deployAddress, err = intrinsic.Deploy(
compute.COMPUTE_INTRINSIC_DOMAIN,
nil,
nil,
@ -640,7 +642,7 @@ func (e *ComputeExecutionEngine) handleDeploy(
state,
)
if err != nil {
return nil, errors.Wrap(err, "handle deploy")
return nil, nil, errors.Wrap(err, "handle deploy")
}
e.logger.Info(
@ -652,7 +654,7 @@ func (e *ComputeExecutionEngine) handleDeploy(
updatePb := &protobufs.ComputeUpdate{}
err := updatePb.FromCanonicalBytes(payload)
if err != nil {
return nil, errors.Wrap(err, "handle deploy")
return nil, nil, errors.Wrap(err, "handle deploy")
}
// Load existing compute intrinsic
@ -668,13 +670,13 @@ func (e *ComputeExecutionEngine) handleDeploy(
e.compiler,
)
if err != nil {
return nil, errors.Wrap(err, "handle deploy")
return nil, nil, errors.Wrap(err, "handle deploy")
}
// Deploy (update) the intrinsic
var domain [32]byte
copy(domain[:], address)
state, err = intrinsic.Deploy(
state, deployAddress, err = intrinsic.Deploy(
domain,
nil, // provers
nil, // creator
@ -684,7 +686,7 @@ func (e *ComputeExecutionEngine) handleDeploy(
state,
)
if err != nil {
return nil, errors.Wrap(err, "handle deploy")
return nil, nil, errors.Wrap(err, "handle deploy")
}
e.logger.Info(
@ -692,24 +694,21 @@ func (e *ComputeExecutionEngine) handleDeploy(
zap.String("address", hex.EncodeToString(intrinsic.Address())),
)
} else {
return nil, errors.Wrap(
return nil, nil, errors.Wrap(
errors.New("invalid deployment type"),
"handle deploy",
)
}
// Get the deployed address
deployedAddress := intrinsic.Address()
// Store the intrinsic
e.intrinsicsMutex.Lock()
e.intrinsics[string(deployedAddress)] = intrinsic
e.intrinsics[string(deployAddress)] = intrinsic
e.intrinsicsMutex.Unlock()
return &execution.ProcessMessageResult{
Messages: []*protobufs.Message{},
State: state,
}, nil
}, deployAddress, nil
}
func (e *ComputeExecutionEngine) handleBundle(
@ -765,7 +764,6 @@ func (e *ComputeExecutionEngine) handleBundle(
continue
}
changesetLen := len(state.Changeset())
feeForOp := big.NewInt(0)
if fees.NeedsOneFee(op, DefaultFeeMarket) {
// Pre-checked; defensive guard helpful for future policy changes
@ -781,34 +779,25 @@ func (e *ComputeExecutionEngine) handleBundle(
// Process the individual operation by calling ProcessMessage recursively
// but with the individual operation payload
opResponses, err := e.processIndividualMessage(
opResponses, applicableDeploy, err := e.processIndividualMessage(
frameNumber,
feeForOp,
feeMultiplier,
movingAddress,
op,
true,
responses.State,
state,
)
if err != nil {
return nil, errors.Wrapf(err, "handle bundle: operation %d failed", i)
}
if op.GetComputeDeploy() != nil {
if len(state.Changeset()) == changesetLen {
return nil, errors.Wrap(
errors.New("deploy did not produce changeset"),
"handle bundle",
)
}
changeset := state.Changeset()
movingAddress = changeset[len(changeset)-1].Domain
movingAddress = applicableDeploy
}
// Collect responses
responses.Messages = append(responses.Messages, opResponses.Messages...)
responses.State = opResponses.State
}
e.logger.Info(
@ -830,10 +819,10 @@ func (e *ComputeExecutionEngine) processIndividualMessage(
message *protobufs.MessageRequest,
fromBundle bool,
state state.State,
) (*execution.ProcessMessageResult, error) {
) (*execution.ProcessMessageResult, []byte, error) {
payload, err := e.tryExtractMessageForIntrinsic(message)
if err != nil {
return nil, errors.Wrap(err, "process individual message")
return nil, nil, errors.Wrap(err, "process individual message")
}
// Read the type prefix to determine if it's a deploy or operation
@ -845,7 +834,7 @@ func (e *ComputeExecutionEngine) processIndividualMessage(
if fromBundle {
return e.handleDeploy(address, payload, frameNumber, feePaid, state)
} else {
return nil, errors.Wrap(
return nil, nil, errors.Wrap(
errors.New("deploy or update messages must be bundled"),
"process individual message",
)
@ -859,7 +848,7 @@ func (e *ComputeExecutionEngine) processIndividualMessage(
[64]byte(slices.Concat(address, bytes.Repeat([]byte{0xff}, 32))),
)
if err == nil || !fromBundle {
return nil, errors.Wrap(
return nil, nil, errors.Wrap(
errors.New("non-deploy messages not allowed in global mode"),
"process individual message",
)
@ -869,12 +858,7 @@ func (e *ComputeExecutionEngine) processIndividualMessage(
// Otherwise, try to handle it as an operation on existing intrinsic
intrinsic, err := e.tryGetIntrinsic(address)
if err != nil {
return nil, errors.Wrap(err, "process individual message")
}
err = e.validateIndividualMessage(frameNumber, address, message, fromBundle)
if err != nil {
return nil, errors.Wrap(err, "process individual message")
return nil, nil, errors.Wrap(err, "process individual message")
}
// Process the operation
@ -886,7 +870,7 @@ func (e *ComputeExecutionEngine) processIndividualMessage(
state,
)
if err != nil {
return nil, errors.Wrap(err, "process individual message")
return nil, nil, errors.Wrap(err, "process individual message")
}
// Log state changes for debugging
@ -898,7 +882,7 @@ func (e *ComputeExecutionEngine) processIndividualMessage(
return &execution.ProcessMessageResult{
Messages: []*protobufs.Message{},
State: state,
}, nil
}, nil, nil
}
func (e *ComputeExecutionEngine) tryGetIntrinsic(

View File

@ -357,7 +357,7 @@ func (e *GlobalExecutionEngine) handleBundle(
address,
op,
true,
responses.State,
state,
)
if err != nil {
// Skip non-global operations (e.g., token payments, compute ops)
@ -371,7 +371,6 @@ func (e *GlobalExecutionEngine) handleBundle(
// Collect responses
responses.Messages = append(responses.Messages, opResponses.Messages...)
responses.State = opResponses.State
}
e.logger.Info(
@ -404,11 +403,6 @@ func (e *GlobalExecutionEngine) processIndividualMessage(
return nil, errors.Wrap(err, "process individual message")
}
err = e.validateIndividualMessage(frameNumber, address, message, fromBundle)
if err != nil {
return nil, errors.Wrap(err, "process individual message")
}
// Process the operation
_, err = intrinsic.InvokeStep(
frameNumber,
@ -421,11 +415,6 @@ func (e *GlobalExecutionEngine) processIndividualMessage(
return nil, errors.Wrap(err, "process individual message")
}
newState, err := intrinsic.Commit()
if err != nil {
return nil, errors.Wrap(err, "process individual message")
}
e.logger.Debug(
"processed individual message",
zap.String("address", hex.EncodeToString(address)),
@ -433,7 +422,7 @@ func (e *GlobalExecutionEngine) processIndividualMessage(
return &execution.ProcessMessageResult{
Messages: []*protobufs.Message{},
State: newState,
State: state,
}, nil
}

View File

@ -565,7 +565,7 @@ func (e *HypergraphExecutionEngine) handleBundle(
address,
op,
true,
responses.State,
state,
)
if err != nil {
return nil, errors.Wrapf(err, "handle bundle: operation %d failed", i)
@ -573,7 +573,6 @@ func (e *HypergraphExecutionEngine) handleBundle(
// Collect responses
responses.Messages = append(responses.Messages, opResponses.Messages...)
responses.State = opResponses.State
}
e.logger.Info(
@ -654,11 +653,6 @@ func (e *HypergraphExecutionEngine) processIndividualMessage(
return nil, errors.Wrap(err, "process individual message")
}
err = e.validateIndividualMessage(frameNumber, address, message, fromBundle)
if err != nil {
return nil, errors.Wrap(err, "process individual message")
}
// Process the operation
_, err = intrinsic.InvokeStep(
frameNumber,
@ -694,6 +688,7 @@ func (e *HypergraphExecutionEngine) handleDeploy(
feePaid *big.Int,
state state.State,
) (*execution.ProcessMessageResult, error) {
var applicableDeploy []byte
var intrinsic *hypergraphintrinsic.HypergraphIntrinsic
if bytes.Equal(address, hypergraphintrinsic.HYPERGRAPH_BASE_DOMAIN[:]) {
// Deserialize the deploy arguments
@ -721,7 +716,7 @@ func (e *HypergraphExecutionEngine) handleDeploy(
)
// Deploy the intrinsic
state, err = intrinsic.Deploy(
state, applicableDeploy, err = intrinsic.Deploy(
hypergraphintrinsic.HYPERGRAPH_BASE_DOMAIN,
nil, // provers
nil, // creator
@ -734,17 +729,14 @@ func (e *HypergraphExecutionEngine) handleDeploy(
return nil, errors.Wrap(err, "handle deploy")
}
// Get the deployed address
deployedAddress := intrinsic.Address()
// Store the intrinsic
e.intrinsicsMutex.Lock()
e.intrinsics[string(deployedAddress)] = intrinsic
e.intrinsics[string(applicableDeploy)] = intrinsic
e.intrinsicsMutex.Unlock()
e.logger.Info(
"deployed hypergraph intrinsic",
zap.String("address", hex.EncodeToString(deployedAddress)),
zap.String("address", hex.EncodeToString(applicableDeploy)),
)
} else {
// Deserialize the update arguments
@ -770,7 +762,7 @@ func (e *HypergraphExecutionEngine) handleDeploy(
// Deploy (update) the intrinsic
var domain [32]byte
copy(domain[:], address)
state, err = intrinsic.Deploy(
state, applicableDeploy, err = intrinsic.Deploy(
domain,
nil, // provers
nil, // creator
@ -785,12 +777,12 @@ func (e *HypergraphExecutionEngine) handleDeploy(
// Store the intrinsic
e.intrinsicsMutex.Lock()
e.intrinsics[string(address)] = intrinsic
e.intrinsics[string(applicableDeploy)] = intrinsic
e.intrinsicsMutex.Unlock()
e.logger.Info(
"updated hypergraph intrinsic",
zap.String("address", hex.EncodeToString(address)),
zap.String("address", hex.EncodeToString(applicableDeploy)),
)
}

View File

@ -481,7 +481,7 @@ func (e *TokenExecutionEngine) ProcessMessage(
}
// Otherwise, delegate to individual message processing
result, err := e.processIndividualMessage(
result, _, err := e.processIndividualMessage(
frameNumber,
big.NewInt(0),
feeMultiplier,
@ -624,14 +624,14 @@ func (e *TokenExecutionEngine) handleBundle(
// Process the individual operation by calling ProcessMessage recursively
// but with the individual operation payload
opResponses, err := e.processIndividualMessage(
opResponses, _, err := e.processIndividualMessage(
frameNumber,
feeForOp,
feeMultiplier,
address,
op,
true,
responses.State,
state,
)
if err != nil {
return nil, errors.Wrapf(err, "handle bundle: operation %d failed", i)
@ -639,7 +639,6 @@ func (e *TokenExecutionEngine) handleBundle(
// Collect responses
responses.Messages = append(responses.Messages, opResponses.Messages...)
responses.State = opResponses.State
}
e.logger.Info(
@ -661,7 +660,7 @@ func (e *TokenExecutionEngine) processIndividualMessage(
message *protobufs.MessageRequest,
fromBundle bool,
state state.State,
) (*execution.ProcessMessageResult, error) {
) (*execution.ProcessMessageResult, []byte, error) {
payload := []byte{}
var err error
domain := address
@ -683,7 +682,7 @@ func (e *TokenExecutionEngine) processIndividualMessage(
err = errors.New("unsupported message type")
}
if err != nil {
return nil, errors.Wrap(err, "process individual message")
return nil, nil, errors.Wrap(err, "process individual message")
}
// Read the type prefix to determine if it's a deploy or operation
@ -695,7 +694,7 @@ func (e *TokenExecutionEngine) processIndividualMessage(
if fromBundle {
return e.handleDeploy(domain, payload, frameNumber, feePaid, state)
} else {
return nil, errors.Wrap(
return nil, nil, errors.Wrap(
errors.New("deploy or update messages must be bundled"),
"process individual message",
)
@ -709,7 +708,7 @@ func (e *TokenExecutionEngine) processIndividualMessage(
[64]byte(slices.Concat(domain, bytes.Repeat([]byte{0xff}, 32))),
)
if err == nil || !fromBundle {
return nil, errors.Wrap(
return nil, nil, errors.Wrap(
errors.New("non-deploy messages not allowed in global mode"),
"process individual message",
)
@ -719,12 +718,7 @@ func (e *TokenExecutionEngine) processIndividualMessage(
// Otherwise, try to handle it as an operation on existing intrinsic
intrinsic, err := e.tryGetIntrinsic(domain)
if err != nil {
return nil, errors.Wrap(err, "process individual message")
}
err = e.validateIndividualMessage(frameNumber, domain, message, fromBundle)
if err != nil {
return nil, errors.Wrap(err, "process individual message")
return nil, nil, errors.Wrap(err, "process individual message")
}
// Process the operation
@ -736,7 +730,7 @@ func (e *TokenExecutionEngine) processIndividualMessage(
state,
)
if err != nil {
return nil, errors.Wrap(err, "process individual message")
return nil, nil, errors.Wrap(err, "process individual message")
}
e.logger.Debug(
@ -747,7 +741,7 @@ func (e *TokenExecutionEngine) processIndividualMessage(
return &execution.ProcessMessageResult{
Messages: []*protobufs.Message{},
State: newState,
}, nil
}, nil, nil
}
func (e *TokenExecutionEngine) handleDeploy(
@ -756,9 +750,10 @@ func (e *TokenExecutionEngine) handleDeploy(
frameNumber uint64,
feePaid *big.Int,
state state.State,
) (*execution.ProcessMessageResult, error) {
) (*execution.ProcessMessageResult, []byte, error) {
var applicableDeploy []byte
if bytes.Equal(address, token.QUIL_TOKEN_ADDRESS) {
return nil, errors.Wrap(errors.New("reserved"), "handle deploy")
return nil, nil, errors.Wrap(errors.New("reserved"), "handle deploy")
}
var intrinsic *token.TokenIntrinsic
@ -767,12 +762,12 @@ func (e *TokenExecutionEngine) handleDeploy(
deployPb := &protobufs.TokenDeploy{}
err := deployPb.FromCanonicalBytes(payload)
if err != nil {
return nil, errors.Wrap(err, "handle deploy")
return nil, nil, errors.Wrap(err, "handle deploy")
}
deployArgs, err := token.TokenDeployFromProtobuf(deployPb)
if err != nil {
return nil, errors.Wrap(err, "handle deploy")
return nil, nil, errors.Wrap(err, "handle deploy")
}
// Create new token intrinsic
@ -786,11 +781,11 @@ func (e *TokenExecutionEngine) handleDeploy(
e.keyManager,
)
if err != nil {
return nil, errors.Wrap(err, "handle deploy")
return nil, nil, errors.Wrap(err, "handle deploy")
}
// Deploy the intrinsic
state, err = intrinsic.Deploy(
state, applicableDeploy, err = intrinsic.Deploy(
token.TOKEN_BASE_DOMAIN,
nil, // provers
nil, // creator
@ -800,20 +795,17 @@ func (e *TokenExecutionEngine) handleDeploy(
state,
)
if err != nil {
return nil, errors.Wrap(err, "handle deploy")
return nil, nil, errors.Wrap(err, "handle deploy")
}
// Get the deployed address
deployedAddress := intrinsic.Address()
// Store the intrinsic
e.intrinsicsMutex.Lock()
e.intrinsics[string(deployedAddress)] = intrinsic
e.intrinsics[string(applicableDeploy)] = intrinsic
e.intrinsicsMutex.Unlock()
e.logger.Info(
"deployed token intrinsic",
zap.String("address", hex.EncodeToString(deployedAddress)),
zap.String("address", hex.EncodeToString(applicableDeploy)),
zap.String("name", deployArgs.Config.Name),
zap.String("symbol", deployArgs.Config.Symbol),
)
@ -822,12 +814,12 @@ func (e *TokenExecutionEngine) handleDeploy(
updatePb := &protobufs.TokenUpdate{}
err := updatePb.FromCanonicalBytes(payload)
if err != nil {
return nil, errors.Wrap(err, "handle deploy")
return nil, nil, errors.Wrap(err, "handle deploy")
}
deployArgs, err := token.TokenUpdateFromProtobuf(updatePb)
if err != nil {
return nil, errors.Wrap(err, "handle deploy")
return nil, nil, errors.Wrap(err, "handle deploy")
}
// Load existing token intrinsic
@ -842,11 +834,11 @@ func (e *TokenExecutionEngine) handleDeploy(
e.clockStore,
)
if err != nil {
return nil, errors.Wrap(err, "handle deploy")
return nil, nil, errors.Wrap(err, "handle deploy")
}
// Update the intrinsic
state, err = intrinsic.Deploy(
state, applicableDeploy, err = intrinsic.Deploy(
[32]byte(address),
nil, // provers
nil, // creator
@ -856,20 +848,17 @@ func (e *TokenExecutionEngine) handleDeploy(
state,
)
if err != nil {
return nil, errors.Wrap(err, "handle deploy")
return nil, nil, errors.Wrap(err, "handle deploy")
}
// Get the deployed address
deployedAddress := intrinsic.Address()
// Store the intrinsic
e.intrinsicsMutex.Lock()
e.intrinsics[string(deployedAddress)] = intrinsic
e.intrinsics[string(applicableDeploy)] = intrinsic
e.intrinsicsMutex.Unlock()
e.logger.Info(
"updated token intrinsic",
zap.String("address", hex.EncodeToString(deployedAddress)),
zap.String("address", hex.EncodeToString(applicableDeploy)),
zap.String("name", deployArgs.Config.Name),
zap.String("symbol", deployArgs.Config.Symbol),
)
@ -878,7 +867,7 @@ func (e *TokenExecutionEngine) handleDeploy(
return &execution.ProcessMessageResult{
Messages: []*protobufs.Message{},
State: state,
}, nil
}, applicableDeploy, nil
}
func (e *TokenExecutionEngine) tryGetIntrinsic(

View File

@ -356,7 +356,7 @@ func (c *ComputeIntrinsic) Deploy(
contextData []byte,
frameNumber uint64,
hgstate state.State,
) (state.State, error) {
) (state.State, []byte, error) {
if !bytes.Equal(domain[:], COMPUTE_INTRINSIC_DOMAIN[:]) {
vert, err := hgstate.Get(
domain[:],
@ -364,14 +364,14 @@ func (c *ComputeIntrinsic) Deploy(
hg.VertexAddsDiscriminator,
)
if err != nil {
return nil, errors.Wrap(
return nil, nil, errors.Wrap(
state.ErrInvalidDomain,
"deploy",
)
}
if vert == nil {
return nil, errors.Wrap(
return nil, nil, errors.Wrap(
state.ErrInvalidDomain,
"deploy",
)
@ -381,16 +381,16 @@ func (c *ComputeIntrinsic) Deploy(
updatePb := &protobufs.ComputeUpdate{}
err = updatePb.FromCanonicalBytes(contextData)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
deployArgs, err := ComputeUpdateFromProtobuf(updatePb)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
if err := updatePb.Validate(); err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
updateWithoutSignature := proto.Clone(updatePb).(*protobufs.ComputeUpdate)
@ -398,7 +398,7 @@ func (c *ComputeIntrinsic) Deploy(
updateWithoutSignature.PublicKeySignatureBls48581 = nil
message, err := updateWithoutSignature.ToCanonicalBytes()
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
validSig, err := c.keyManager.ValidateSignature(
@ -409,7 +409,7 @@ func (c *ComputeIntrinsic) Deploy(
slices.Concat(domain[:], []byte("COMPUTE_UPDATE")),
)
if err != nil || !validSig {
return nil, errors.Wrap(errors.New("invalid signature"), "deploy")
return nil, nil, errors.Wrap(errors.New("invalid signature"), "deploy")
}
vertexAddress := slices.Concat(
@ -420,17 +420,17 @@ func (c *ComputeIntrinsic) Deploy(
// Ensure the vertex is present and has not been removed
_, err = c.hypergraph.GetVertex([64]byte(vertexAddress))
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
prior, err := c.hypergraph.GetVertexData([64]byte(vertexAddress))
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
tree, err := c.hypergraph.GetVertexData([64]byte(vertexAddress))
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
// Retrieve the existing RDF schema from the tree
@ -446,19 +446,19 @@ func (c *ComputeIntrinsic) Deploy(
deployArgs.Config,
)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
commit := configTree.Commit(c.inclusionProver, false)
out, err := tries.SerializeNonLazyTree(configTree)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
err = tree.Insert([]byte{16 << 2}, out, commit, configTree.GetSize())
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
}
@ -469,7 +469,7 @@ func (c *ComputeIntrinsic) Deploy(
// Validate that the new schema is valid
_, err := c.rdfMultiprover.GetSchemaMap(newSchemaDoc)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
// Validate that the update only adds new classes/properties, never
@ -477,7 +477,7 @@ func (c *ComputeIntrinsic) Deploy(
if existingRDFSchema != "" {
err = c.validateRDFSchemaUpdate(existingRDFSchema, newSchemaDoc)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
}
@ -489,7 +489,7 @@ func (c *ComputeIntrinsic) Deploy(
big.NewInt(int64(len(deployArgs.RDFSchema))),
)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
c.rdfHypergraphSchema = newSchemaDoc
@ -512,12 +512,12 @@ func (c *ComputeIntrinsic) Deploy(
),
)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
c.state = hgstate
return hgstate, nil
return hgstate, slices.Clone(c.Address()), nil
}
// Initialize consensus metadata
@ -531,7 +531,7 @@ func (c *ComputeIntrinsic) Deploy(
var err error
additionalData[13], err = newComputeConfigurationMetadata(c.config)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
// Generate compute domain - include config commitment in domain generation
@ -542,14 +542,14 @@ func (c *ComputeIntrinsic) Deploy(
),
)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
computeDomain := computeDomainBI.FillBytes(make([]byte, 32))
rdfHypergraphSchema, err := c.newComputeRDFHypergraphSchema(contextData)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
// Initialize the state
@ -561,7 +561,7 @@ func (c *ComputeIntrinsic) Deploy(
additionalData,
COMPUTE_INTRINSIC_DOMAIN[:],
); err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
c.state = hgstate
@ -571,7 +571,7 @@ func (c *ComputeIntrinsic) Deploy(
c.sumcheckInfo = sumcheckInfo
c.rdfHypergraphSchema = rdfHypergraphSchema
return c.state, nil
return c.state, slices.Clone(c.Address()), nil
}
// Validate implements intrinsics.Intrinsic.

View File

@ -248,7 +248,7 @@ func TestComputeIntrinsic_Integration(t *testing.T) {
// Test Deploy
t.Run("Deploy", func(t *testing.T) {
var state state.State = hgstate.NewHypergraphState(hg)
state, err = computeIntrinsic.Deploy(
state, _, err = computeIntrinsic.Deploy(
compute.COMPUTE_INTRINSIC_DOMAIN,
[][]byte{creator}, // provers
creator,
@ -396,7 +396,7 @@ func main(a, b int) int {
t.Run("LoadComputeIntrinsic", func(t *testing.T) {
var state state.State = hgstate.NewHypergraphState(hg)
// First deploy a compute intrinsic
state, err := computeIntrinsic.Deploy(
state, _, err := computeIntrinsic.Deploy(
compute.COMPUTE_INTRINSIC_DOMAIN,
[][]byte{creator},
creator,

View File

@ -99,7 +99,7 @@ compute:Output a rdfs:Property;
`
// Deploy the compute intrinsic
deployState, err = computeIntrinsic.Deploy(
deployState, _, err = computeIntrinsic.Deploy(
compute.COMPUTE_INTRINSIC_DOMAIN,
nil,
[]byte("creator"),
@ -163,7 +163,7 @@ compute:Output a rdfs:Property;
var domain [32]byte
copy(domain[:], computeAddress)
updateState, err := computeIntrinsic.Deploy(
updateState, _, err := computeIntrinsic.Deploy(
domain,
nil,
[]byte("updater"),
@ -236,7 +236,7 @@ compute:Value a rdfs:Property;
var domain [32]byte
copy(domain[:], computeAddress)
updateState, err := computeIntrinsic.Deploy(
updateState, _, err := computeIntrinsic.Deploy(
domain,
nil,
[]byte("updater"),
@ -293,7 +293,7 @@ compute:Input a rdfs:Property;
var domain [32]byte
copy(domain[:], computeAddress)
updateState, err := computeIntrinsic.Deploy(
updateState, _, err := computeIntrinsic.Deploy(
domain,
nil,
[]byte("updater"),
@ -357,7 +357,7 @@ compute:Output a rdfs:Property;
var domain [32]byte
copy(domain[:], computeAddress)
updateState, err := computeIntrinsic.Deploy(
updateState, _, err := computeIntrinsic.Deploy(
domain,
nil,
[]byte("updater"),
@ -396,7 +396,7 @@ compute:Output a rdfs:Property;
var domain [32]byte
copy(domain[:], computeAddress)
updateState, err := computeIntrinsic.Deploy(
updateState, _, err := computeIntrinsic.Deploy(
domain,
nil,
[]byte("unauthorized"),
@ -453,7 +453,7 @@ compute:Output a rdfs:Property;
var domain [32]byte
copy(domain[:], computeAddress)
updateState, err := computeIntrinsic.Deploy(
updateState, _, err := computeIntrinsic.Deploy(
domain,
nil,
[]byte("wrong-owner"),

View File

@ -208,8 +208,8 @@ func (a *GlobalIntrinsic) Deploy(
contextData []byte,
frameNumber uint64,
state state.State,
) (state.State, error) {
return nil, errors.Wrap(
) (state.State, []byte, error) {
return nil, nil, errors.Wrap(
errors.New("global intrinsic cannot be deployed"),
"deploy",
)
@ -756,23 +756,6 @@ func (a *GlobalIntrinsic) InvokeStep(
op.hypergraph = a.hypergraph
op.keyManager = a.keyManager
valid, err := op.Verify(frameNumber)
if err != nil {
observability.InvokeStepErrors.WithLabelValues(
"global",
"prover_join",
).Inc()
return nil, errors.Wrap(err, "invoke step")
}
if !valid {
observability.InvokeStepErrors.WithLabelValues(
"global",
"prover_join",
).Inc()
return nil, errors.Wrap(errors.New("invalid prover join"), "invoke step")
}
matTimer := prometheus.NewTimer(
observability.MaterializeDuration.WithLabelValues("global"),
)
@ -826,23 +809,6 @@ func (a *GlobalIntrinsic) InvokeStep(
op.hypergraph = a.hypergraph
op.keyManager = a.keyManager
valid, err := op.Verify(frameNumber)
if err != nil {
observability.InvokeStepErrors.WithLabelValues(
"global",
"prover_leave",
).Inc()
return nil, errors.Wrap(err, "invoke step")
}
if !valid {
observability.InvokeStepErrors.WithLabelValues(
"global",
"prover_leave",
).Inc()
return nil, errors.Wrap(errors.New("invalid prover leave"), "invoke step")
}
matTimer := prometheus.NewTimer(
observability.MaterializeDuration.WithLabelValues("global"),
)
@ -899,23 +865,6 @@ func (a *GlobalIntrinsic) InvokeStep(
op.hypergraph = a.hypergraph
op.keyManager = a.keyManager
valid, err := op.Verify(frameNumber)
if err != nil {
observability.InvokeStepErrors.WithLabelValues(
"global",
"prover_pause",
).Inc()
return nil, errors.Wrap(err, "invoke step")
}
if !valid {
observability.InvokeStepErrors.WithLabelValues(
"global",
"prover_pause",
).Inc()
return nil, errors.Wrap(errors.New("invalid prover pause"), "invoke step")
}
matTimer := prometheus.NewTimer(
observability.MaterializeDuration.WithLabelValues("global"),
)
@ -975,26 +924,6 @@ func (a *GlobalIntrinsic) InvokeStep(
op.hypergraph = a.hypergraph
op.keyManager = a.keyManager
valid, err := op.Verify(frameNumber)
if err != nil {
observability.InvokeStepErrors.WithLabelValues(
"global",
"prover_resume",
).Inc()
return nil, errors.Wrap(err, "invoke step")
}
if !valid {
observability.InvokeStepErrors.WithLabelValues(
"global",
"prover_resume",
).Inc()
return nil, errors.Wrap(
errors.New("invalid prover resume"),
"invoke step",
)
}
matTimer := prometheus.NewTimer(
observability.MaterializeDuration.WithLabelValues("global"),
)
@ -1054,26 +983,6 @@ func (a *GlobalIntrinsic) InvokeStep(
op.hypergraph = a.hypergraph
op.keyManager = a.keyManager
valid, err := op.Verify(frameNumber)
if err != nil {
observability.InvokeStepErrors.WithLabelValues(
"global",
"prover_confirm",
).Inc()
return nil, errors.Wrap(err, "invoke step")
}
if !valid {
observability.InvokeStepErrors.WithLabelValues(
"global",
"prover_confirm",
).Inc()
return nil, errors.Wrap(
errors.New("invalid prover confirm"),
"invoke step",
)
}
matTimer := prometheus.NewTimer(
observability.MaterializeDuration.WithLabelValues("global"),
)
@ -1133,26 +1042,6 @@ func (a *GlobalIntrinsic) InvokeStep(
op.hypergraph = a.hypergraph
op.keyManager = a.keyManager
valid, err := op.Verify(frameNumber)
if err != nil {
observability.InvokeStepErrors.WithLabelValues(
"global",
"prover_reject",
).Inc()
return nil, errors.Wrap(err, "invoke step")
}
if !valid {
observability.InvokeStepErrors.WithLabelValues(
"global",
"prover_reject",
).Inc()
return nil, errors.Wrap(
errors.New("invalid prover reject"),
"invoke step",
)
}
matTimer := prometheus.NewTimer(
observability.MaterializeDuration.WithLabelValues("global"),
)
@ -1202,26 +1091,6 @@ func (a *GlobalIntrinsic) InvokeStep(
op.rdfMultiprover = a.rdfMultiprover
op.hypergraph = a.hypergraph
valid, err := op.Verify(frameNumber)
if err != nil {
observability.InvokeStepErrors.WithLabelValues(
"global",
"prover_kick",
).Inc()
return nil, errors.Wrap(err, "invoke step")
}
if !valid {
observability.InvokeStepErrors.WithLabelValues(
"global",
"prover_kick",
).Inc()
return nil, errors.Wrap(
errors.New("invalid prover kick"),
"invoke step",
)
}
matTimer := prometheus.NewTimer(
observability.MaterializeDuration.WithLabelValues("global"),
)
@ -1268,26 +1137,6 @@ func (a *GlobalIntrinsic) InvokeStep(
a.blsConstructor,
)
valid, err := op.Verify(frameNumber)
if err != nil {
observability.InvokeStepErrors.WithLabelValues(
"global",
"prover_shard_update",
).Inc()
return nil, errors.Wrap(err, "invoke step")
}
if !valid {
observability.InvokeStepErrors.WithLabelValues(
"global",
"prover_shard_update",
).Inc()
return nil, errors.Wrap(
errors.New("invalid prover shard update"),
"invoke step",
)
}
matTimer := prometheus.NewTimer(
observability.MaterializeDuration.WithLabelValues("global"),
)

View File

@ -5,9 +5,12 @@ package global_test
import (
"encoding/binary"
"fmt"
"math/big"
"slices"
"sync"
"testing"
"time"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/stretchr/testify/assert"
@ -18,7 +21,10 @@ import (
"source.quilibrium.com/quilibrium/monorepo/bulletproofs"
"source.quilibrium.com/quilibrium/monorepo/config"
hgcrdt "source.quilibrium.com/quilibrium/monorepo/hypergraph"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/provers"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/reward"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/global"
hgstate "source.quilibrium.com/quilibrium/monorepo/node/execution/state/hypergraph"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/store"
"source.quilibrium.com/quilibrium/monorepo/node/tests"
@ -49,6 +55,152 @@ func createHypergraph(t *testing.T) (hypergraph.Hypergraph, *bls48581.KZGInclusi
return hg, ip, rm
}
func TestGlobalIntrinsicProverJoinFlow(t *testing.T) {
logger := zap.NewNop()
blsConstructor := &bls48581.Bls48581KeyConstructor{}
keyManager := keys.NewInMemoryKeyManager(blsConstructor, &bulletproofs.Decaf448KeyConstructor{})
signer, _, err := keyManager.CreateSigningKey("q-prover-key", crypto.KeyTypeBLS48581G1)
require.NoError(t, err)
require.NotNil(t, signer)
frameNumber := uint64(100)
hg, inclusionProver, rdfMultiprover := createHypergraph(t)
frameProver := vdf.NewWesolowskiFrameProver(logger)
pebbleDB := store.NewPebbleDB(logger, &config.DBConfig{InMemoryDONOTUSE: true, Path: ".test/global_intrinsic"}, 0)
frameStore := store.NewPebbleClockStore(pebbleDB, logger)
txn, err := frameStore.NewTransaction(false)
require.NoError(t, err)
err = frameStore.PutGlobalClockFrame(&protobufs.GlobalFrame{
Header: &protobufs.GlobalFrameHeader{
FrameNumber: frameNumber,
Output: make([]byte, 516),
Difficulty: 50000,
},
}, txn)
require.NoError(t, err)
require.NoError(t, txn.Commit())
rewardIssuance := reward.NewOptRewardIssuance()
proverRegistry, err := provers.NewProverRegistry(logger, hg)
require.NoError(t, err)
intrinsic, err := global.LoadGlobalIntrinsic(
logger,
intrinsics.GLOBAL_INTRINSIC_ADDRESS[:],
hg,
inclusionProver,
keyManager,
frameProver,
frameStore,
rewardIssuance,
proverRegistry,
blsConstructor,
)
require.NoError(t, err)
addresses := make([][]byte, 100)
payloads := make([][]byte, 100)
wg := sync.WaitGroup{}
initialState := hgstate.NewHypergraphState(hg)
now := time.Now()
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
filter := slices.Concat([]byte("integration-test-filter000000000000000"), []byte{byte(i)})
keyManager := keys.NewInMemoryKeyManager(blsConstructor, &bulletproofs.Decaf448KeyConstructor{})
signer, _, err := keyManager.CreateSigningKey("q-prover-key", crypto.KeyTypeBLS48581G1)
addressBI, err := poseidon.HashBytes(signer.Public().([]byte))
require.NoError(t, err)
proverAddress := addressBI.FillBytes(make([]byte, 32))
addresses[i] = proverAddress
proverJoin, err := global.NewProverJoin(
[][]byte{filter},
frameNumber,
nil,
nil,
keyManager,
hg,
rdfMultiprover,
frameProver,
frameStore,
)
require.NoError(t, err)
challenge := sha3.Sum256(make([]byte, 516))
proof := frameProver.CalculateMultiProof(
challenge,
50000,
[][]byte{slices.Concat(proverAddress, filter, binary.BigEndian.AppendUint32(nil, 0))},
0,
)
proverJoin.Proof = proof[:]
err = proverJoin.Prove(frameNumber)
require.NoError(t, err)
payload, err := proverJoin.ToBytes()
require.NoError(t, err)
payloads[i] = payload
}()
}
wg.Wait()
fmt.Println("prove", time.Since(now))
now = time.Now()
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
resultState, err := intrinsic.InvokeStep(
frameNumber,
payloads[i],
big.NewInt(0),
big.NewInt(1),
initialState,
)
require.NoError(t, err)
require.NotNil(t, resultState)
}()
}
wg.Wait()
fmt.Println(len(initialState.Changeset()))
fmt.Println("invoke", time.Since(now))
now = time.Now()
_, err = intrinsic.Commit()
fmt.Println("commit", time.Since(now))
require.NoError(t, err)
for i := 0; i < 100; i++ {
fullAddress := [64]byte{}
copy(fullAddress[:32], intrinsics.GLOBAL_INTRINSIC_ADDRESS[:])
copy(fullAddress[32:], addresses[i])
proverTree, err := hg.GetVertexData(fullAddress)
require.NoError(t, err)
require.NotNil(t, proverTree)
statusBytes, err := rdfMultiprover.Get(
global.GLOBAL_RDF_SCHEMA,
"prover:Prover",
"Status",
proverTree,
)
require.NoError(t, err)
require.Equal(t, []byte{0}, statusBytes)
_, err = rdfMultiprover.Get(
global.GLOBAL_RDF_SCHEMA,
"prover:Prover",
"PublicKey",
proverTree,
)
require.NoError(t, err)
}
}
// Helper function to create an active prover with allocations in the hypergraph
func createActiveProverWithAllocation(t *testing.T, hg hypergraph.Hypergraph, ip *bls48581.KZGInclusionProver, rm *schema.RDFMultiprover, pubKey []byte, filter []byte, joinFrame uint64) {
addrBI, err := poseidon.HashBytes(pubKey)
@ -373,8 +525,8 @@ func TestGlobalProverOperations_Integration(t *testing.T) {
hg.SetVertexData(txn, [64]byte(slices.Concat(intrinsics.GLOBAL_INTRINSIC_ADDRESS[:], allocationAddress)), allocationTree)
txn.Commit()
// Try to confirm at frame 255840 + 360
confirmFrame := uint64(255840 + 360)
// Try to confirm at frame 255840 + 1080
confirmFrame := uint64(token.FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END)
proverConfirm, err := global.NewProverConfirm(filter, confirmFrame, keyManager, hg, rm)
require.NoError(t, err)

View File

@ -387,6 +387,7 @@ func TestProverConfirm_Materialize(t *testing.T) {
// Setup
mockKeyManager := new(mocks.MockKeyManager)
mockHypergraph := new(mocks.MockHypergraph)
mockInclusionProver := new(mocks.MockInclusionProver)
mockHypergraph.On("GetCoveredPrefix").Return([]int{}, nil)
hypergraphState := hgstate.NewHypergraphState(mockHypergraph)
@ -438,6 +439,7 @@ func TestProverConfirm_Materialize(t *testing.T) {
// Mock allocation vertex data
mockHypergraph.On("GetVertex", allocFullAddr).Return(nil, nil).Maybe()
mockHypergraph.On("GetProver").Return(mockInclusionProver).Maybe()
mockHypergraph.On("GetVertexData", allocFullAddr).Return(allocTrie, nil).Maybe()
mockHypergraph.On("GetHyperedge", mock.Anything).Return(&mockHyperedge{}, nil)

View File

@ -197,7 +197,7 @@ func (p *ProverKick) Materialize(
)
}
vertices := tries.GetAllPreloadedLeaves(he.GetExtrinsicTree().Root)
if err == nil && len(vertices) > 0 {
if len(vertices) > 0 {
for _, vertex := range vertices {
allocationFullAddress := vertex.Key
@ -372,7 +372,7 @@ func (p *ProverKick) GetWriteAddresses(frameNumber uint64) ([][]byte, error) {
addresses[string(hyperedgeAddress[:])] = struct{}{}
vertices := tries.GetAllPreloadedLeaves(hyperedge.GetExtrinsicTree().Root)
if err == nil && len(vertices) > 0 {
if len(vertices) > 0 {
for _, vertex := range vertices {
addresses[string(vertex.Key)] = struct{}{}
}
@ -523,10 +523,6 @@ func (p *ProverKick) verifyEquivocation(kickedPublicKey []byte) bool {
return false
}
if frame1 == nil || frame2 == nil {
return false
}
frameNumber1 = frame1.FrameNumber
frameNumber2 = frame2.FrameNumber
output1 = frame1.Output

View File

@ -381,7 +381,7 @@ func FuzzProverKick_Deserialization(f *testing.F) {
// We expect errors for malformed data, but shouldn't panic
if err == nil {
// Verify successful deserialization
if pk.KickedProverPublicKey == nil || len(pk.KickedProverPublicKey) == 0 {
if len(pk.KickedProverPublicKey) == 0 {
t.Errorf("KickedProverPublicKey should not be nil or empty after successful deserialization")
}
}

View File

@ -58,7 +58,7 @@ func UpdateAggregateProverStatus(
}
vertices := tries.GetAllPreloadedLeaves(he.GetExtrinsicTree().Root)
if err == nil && len(vertices) > 0 {
if len(vertices) > 0 {
for _, vertex := range vertices {
allocationFullAddress := vertex.Key

View File

@ -62,7 +62,7 @@ func TestHypergraphIntrinsicIntegration(t *testing.T) {
var deployState state.State = hgstate.NewHypergraphState(hg)
// Deploy the hypergraph intrinsic
deployState, err = hypergraphIntrinsic.Deploy(
deployState, _, err = hypergraphIntrinsic.Deploy(
hypergraph.HYPERGRAPH_BASE_DOMAIN,
[][]byte{}, // No provers for hypergraph intrinsic
[]byte("creator"),

View File

@ -491,7 +491,7 @@ func (h *HypergraphIntrinsic) Deploy(
contextData []byte,
frameNumber uint64,
hgstate state.State,
) (state.State, error) {
) (state.State, []byte, error) {
if !bytes.Equal(domain[:], HYPERGRAPH_BASE_DOMAIN[:]) {
vert, err := hgstate.Get(
domain[:],
@ -499,14 +499,14 @@ func (h *HypergraphIntrinsic) Deploy(
hg.VertexAddsDiscriminator,
)
if err != nil {
return nil, errors.Wrap(
return nil, nil, errors.Wrap(
state.ErrInvalidDomain,
"deploy",
)
}
if vert == nil {
return nil, errors.Wrap(
return nil, nil, errors.Wrap(
state.ErrInvalidDomain,
"deploy",
)
@ -516,16 +516,16 @@ func (h *HypergraphIntrinsic) Deploy(
updatePb := &protobufs.HypergraphUpdate{}
err = updatePb.FromCanonicalBytes(contextData)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
if err := updatePb.Validate(); err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
deployArgs, err := HypergraphUpdateFromProtobuf(updatePb)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
updateWithoutSignature := proto.Clone(updatePb).(*protobufs.HypergraphUpdate)
@ -533,7 +533,7 @@ func (h *HypergraphIntrinsic) Deploy(
updateWithoutSignature.PublicKeySignatureBls48581 = nil
message, err := updateWithoutSignature.ToCanonicalBytes()
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
validSig, err := h.keyManager.ValidateSignature(
@ -544,7 +544,7 @@ func (h *HypergraphIntrinsic) Deploy(
slices.Concat(domain[:], []byte("HYPERGRAPH_UPDATE")),
)
if err != nil || !validSig {
return nil, errors.Wrap(errors.New("invalid signature"), "deploy")
return nil, nil, errors.Wrap(errors.New("invalid signature"), "deploy")
}
vertexAddress := slices.Concat(
@ -555,17 +555,17 @@ func (h *HypergraphIntrinsic) Deploy(
// Ensure the vertex is present and has not been removed
_, err = h.hypergraph.GetVertex([64]byte(vertexAddress))
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
prior, err := h.hypergraph.GetVertexData([64]byte(vertexAddress))
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
tree, err := h.hypergraph.GetVertexData([64]byte(vertexAddress))
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
// Retrieve the existing RDF schema from the tree
@ -581,19 +581,19 @@ func (h *HypergraphIntrinsic) Deploy(
deployArgs.Config,
)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
commit := configTree.Commit(h.inclusionProver, false)
out, err := qcrypto.SerializeNonLazyTree(configTree)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
err = tree.Insert([]byte{16 << 2}, out, commit, configTree.GetSize())
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
}
@ -604,7 +604,7 @@ func (h *HypergraphIntrinsic) Deploy(
// Validate that the new schema is valid
_, err := h.rdfMultiprover.GetSchemaMap(newSchemaDoc)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
// Validate that the update only adds new classes/properties, never
@ -612,7 +612,7 @@ func (h *HypergraphIntrinsic) Deploy(
if existingRDFSchema != "" {
err = h.validateRDFSchemaUpdate(existingRDFSchema, newSchemaDoc)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
}
@ -624,7 +624,7 @@ func (h *HypergraphIntrinsic) Deploy(
big.NewInt(int64(len(deployArgs.RDFSchema))),
)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
h.rdfHypergraphSchema = newSchemaDoc
@ -647,24 +647,24 @@ func (h *HypergraphIntrinsic) Deploy(
),
)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
h.state = hgstate
return hgstate, nil
return hgstate, slices.Clone(h.Address()), nil
}
initialConsensusMetadata, err := newHypergraphConsensusMetadata(
provers,
)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
initialSumcheckInfo, err := newHypergraphSumcheckInfo()
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
additionalData := make([]*qcrypto.VectorCommitmentTree, 14)
@ -672,7 +672,7 @@ func (h *HypergraphIntrinsic) Deploy(
h.config,
)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
hypergraphDomainBI, err := poseidon.HashBytes(
@ -682,14 +682,14 @@ func (h *HypergraphIntrinsic) Deploy(
),
)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
hypergraphDomain := hypergraphDomainBI.FillBytes(make([]byte, 32))
rdfHypergraphSchema, err := h.newHypergraphRDFHypergraphSchema(contextData)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
h.domain = hypergraphDomain
@ -702,12 +702,12 @@ func (h *HypergraphIntrinsic) Deploy(
additionalData,
HYPERGRAPH_BASE_DOMAIN[:],
); err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
h.state = hgstate
return h.state, nil
return h.state, slices.Clone(hypergraphDomain), nil
}
// Validate implements intrinsics.Intrinsic.

View File

@ -88,7 +88,7 @@ test:Value a rdfs:Property;
`
// Deploy the hypergraph
deployState, err = hypergraphIntrinsic.Deploy(
deployState, _, err = hypergraphIntrinsic.Deploy(
hgi.HYPERGRAPH_BASE_DOMAIN,
nil,
[]byte("creator"),
@ -170,7 +170,7 @@ test:Description a rdfs:Property;
var domain [32]byte
copy(domain[:], hypergraphAddress)
updateState, err := hypergraphIntrinsic.Deploy(
updateState, _, err := hypergraphIntrinsic.Deploy(
domain,
nil,
[]byte("updater"),
@ -231,7 +231,7 @@ test:Name a rdfs:Property;
var domain [32]byte
copy(domain[:], hypergraphAddress)
updateState, err := hypergraphIntrinsic.Deploy(
updateState, _, err := hypergraphIntrinsic.Deploy(
domain,
nil,
[]byte("updater"),
@ -299,7 +299,7 @@ test:Value a rdfs:Property;
var domain [32]byte
copy(domain[:], hypergraphAddress)
updateState, err := hypergraphIntrinsic.Deploy(
updateState, _, err := hypergraphIntrinsic.Deploy(
domain,
nil,
[]byte("updater"),
@ -332,7 +332,7 @@ test:Value a rdfs:Property;
var domain [32]byte
copy(domain[:], hypergraphAddress)
updateState, err := hypergraphIntrinsic.Deploy(
updateState, _, err := hypergraphIntrinsic.Deploy(
domain,
nil,
[]byte("unauthorized"),
@ -383,7 +383,7 @@ test:Value a rdfs:Property;
var domain [32]byte
copy(domain[:], hypergraphAddress)
updateState, err := hypergraphIntrinsic.Deploy(
updateState, _, err := hypergraphIntrinsic.Deploy(
domain,
nil,
[]byte("wrong-owner"),

View File

@ -90,6 +90,7 @@ func (t *TokenIntrinsic) Deploy(
hgstate state.State,
) (
state.State,
[]byte,
error,
) {
timer := prometheus.NewTimer(
@ -104,14 +105,14 @@ func (t *TokenIntrinsic) Deploy(
hg.VertexAddsDiscriminator,
)
if err != nil {
return nil, errors.Wrap(
return nil, nil, errors.Wrap(
state.ErrInvalidDomain,
"deploy",
)
}
if vert == nil {
return nil, errors.Wrap(
return nil, nil, errors.Wrap(
state.ErrInvalidDomain,
"deploy",
)
@ -121,16 +122,16 @@ func (t *TokenIntrinsic) Deploy(
updatePb := &protobufs.TokenUpdate{}
err = updatePb.FromCanonicalBytes(contextData)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
deployArgs, err := TokenUpdateFromProtobuf(updatePb)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
if err := updatePb.Validate(); err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
updateWithoutSignature := proto.Clone(updatePb).(*protobufs.TokenUpdate)
@ -138,7 +139,7 @@ func (t *TokenIntrinsic) Deploy(
updateWithoutSignature.PublicKeySignatureBls48581 = nil
message, err := updateWithoutSignature.ToCanonicalBytes()
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
validSig, err := t.keyManager.ValidateSignature(
@ -149,11 +150,11 @@ func (t *TokenIntrinsic) Deploy(
slices.Concat(domain[:], []byte("TOKEN_UPDATE")),
)
if err != nil || !validSig {
return nil, errors.Wrap(errors.New("invalid signature"), "deploy")
return nil, nil, errors.Wrap(errors.New("invalid signature"), "deploy")
}
if t.config.Behavior != deployArgs.Config.Behavior {
return nil, errors.Wrap(
return nil, nil, errors.Wrap(
errors.New("behavior cannot be updated"),
"deploy",
)
@ -161,7 +162,7 @@ func (t *TokenIntrinsic) Deploy(
if t.config.MintStrategy != nil {
if deployArgs.Config.MintStrategy == nil {
return nil, errors.Wrap(
return nil, nil, errors.Wrap(
errors.New("mint strategy missing"),
"deploy",
)
@ -169,11 +170,11 @@ func (t *TokenIntrinsic) Deploy(
err := validateTokenConfiguration(deployArgs.Config)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
if deployArgs.Config.Supply.Cmp(t.config.Supply) < 0 {
return nil, errors.Wrap(
return nil, nil, errors.Wrap(
errors.New("supply cannot be reduced"),
"deploy",
)
@ -181,7 +182,7 @@ func (t *TokenIntrinsic) Deploy(
if deployArgs.Config.Units != nil &&
deployArgs.Config.Units.Cmp(t.config.Units) != 0 {
return nil, errors.Wrap(
return nil, nil, errors.Wrap(
errors.New("supply cannot be reduced"),
"deploy",
)
@ -196,17 +197,17 @@ func (t *TokenIntrinsic) Deploy(
// Ensure the vertex is present and has not been removed
_, err = t.hypergraph.GetVertex([64]byte(vertexAddress))
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
prior, err := t.hypergraph.GetVertexData([64]byte(vertexAddress))
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
tree, err := t.hypergraph.GetVertexData([64]byte(vertexAddress))
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
configTree, err := NewTokenConfigurationMetadata(
@ -214,19 +215,19 @@ func (t *TokenIntrinsic) Deploy(
t.rdfMultiprover,
)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
commit := configTree.Commit(t.inclusionProver, false)
out, err := tries.SerializeNonLazyTree(configTree)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
err = tree.Insert([]byte{16 << 2}, out, commit, configTree.GetSize())
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
err = hgstate.Set(
@ -243,24 +244,24 @@ func (t *TokenIntrinsic) Deploy(
),
)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
t.state = hgstate
return hgstate, nil
return hgstate, slices.Clone(t.Address()), nil
}
initialConsensusMetadata, err := newTokenConsensusMetadata(
provers,
)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
initialSumcheckInfo, err := newTokenSumcheckInfo()
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
additionalData := make([]*qcrypto.VectorCommitmentTree, 14)
@ -269,7 +270,7 @@ func (t *TokenIntrinsic) Deploy(
t.rdfMultiprover,
)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
tokenDomainBI, err := poseidon.HashBytes(
@ -279,7 +280,7 @@ func (t *TokenIntrinsic) Deploy(
),
)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
tokenDomain := tokenDomainBI.FillBytes(make([]byte, 32))
@ -291,7 +292,7 @@ func (t *TokenIntrinsic) Deploy(
t.config,
)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
if err := hgstate.Init(
@ -302,12 +303,12 @@ func (t *TokenIntrinsic) Deploy(
additionalData,
TOKEN_BASE_DOMAIN[:],
); err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
if (t.config.Behavior & Divisible) == 0 {
if len(contextData)%120 != 0 {
return nil, errors.Wrap(
return nil, nil, errors.Wrap(
errors.New("non-divisible token must have correct context data"),
"deploy",
)
@ -322,7 +323,7 @@ func (t *TokenIntrinsic) Deploy(
big.NewInt(64),
)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
err = additionalReferenceTree.Insert(
@ -332,7 +333,7 @@ func (t *TokenIntrinsic) Deploy(
big.NewInt(56),
)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
}
@ -350,13 +351,13 @@ func (t *TokenIntrinsic) Deploy(
),
)
if err != nil {
return nil, errors.Wrap(err, "deploy")
return nil, nil, errors.Wrap(err, "deploy")
}
}
t.state = hgstate
t.rdfHypergraphSchema = rdfHypergraphSchema
return t.state, nil
return t.state, slices.Clone(tokenDomain), nil
}
// Validate implements intrinsics.Intrinsic.

View File

@ -2233,7 +2233,7 @@ func TestValidAltTransaction(t *testing.T) {
assert.NoError(t, err)
var st state.State = hgstate.NewHypergraphState(hg)
st, err = intrinsic.Deploy(token.TOKEN_BASE_DOMAIN, [][]byte{}, []byte{}, big.NewInt(0), nil, 0, st)
st, _, err = intrinsic.Deploy(token.TOKEN_BASE_DOMAIN, [][]byte{}, []byte{}, big.NewInt(0), nil, 0, st)
assert.NoError(t, err)
domain := st.Changeset()[0].Domain
err = st.Commit()
@ -2808,7 +2808,7 @@ func TestFullTokenFlow_MintPendingTransactionNonDivisible(t *testing.T) {
)
assert.NoError(t, err)
nhgs, err := intrinsic.Deploy(token.TOKEN_BASE_DOMAIN, [][]byte{}, []byte{}, big.NewInt(0), make([]byte, 120), 0, hgs)
nhgs, _, err := intrinsic.Deploy(token.TOKEN_BASE_DOMAIN, [][]byte{}, []byte{}, big.NewInt(0), make([]byte, 120), 0, hgs)
assert.NoError(t, err)
err = nhgs.Commit()
assert.NoError(t, err)

View File

@ -186,7 +186,7 @@ func TestDeploy(t *testing.T) {
fee := big.NewInt(10)
var st state.State = hgstate.NewHypergraphState(hypergraph)
st, err = intrinsic.Deploy(domain, provers, creator, fee, []byte{}, 1, st)
st, _, err = intrinsic.Deploy(domain, provers, creator, fee, []byte{}, 1, st)
require.NoError(t, err)
require.Len(t, st.Changeset(), 1)

View File

@ -24,7 +24,7 @@ import (
const FRAME_2_1_CUTOVER = 244200
const FRAME_2_1_EXTENDED_ENROLL_END = 255840
const FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END = FRAME_2_1_EXTENDED_ENROLL_END + 360
const FRAME_2_1_EXTENDED_ENROLL_CONFIRM_END = FRAME_2_1_EXTENDED_ENROLL_END + 1080
// used to skip frame-based checks, for tests
var BEHAVIOR_PASS = false

View File

@ -88,7 +88,7 @@ test:Amount a rdfs:Property;
`
// Deploy the token
deployState, err = tokenIntrinsic.Deploy(
deployState, _, err = tokenIntrinsic.Deploy(
token.TOKEN_BASE_DOMAIN,
nil,
[]byte("creator"),
@ -155,7 +155,7 @@ test:Amount a rdfs:Property;
var domain [32]byte
copy(domain[:], tokenAddress)
updateState, err := tokenIntrinsic.Deploy(
updateState, _, err := tokenIntrinsic.Deploy(
domain,
nil,
[]byte("updater"),
@ -214,7 +214,7 @@ test:Amount a rdfs:Property;
var domain [32]byte
copy(domain[:], tokenAddress)
updateState, err := tokenIntrinsic.Deploy(
updateState, _, err := tokenIntrinsic.Deploy(
domain,
nil,
[]byte("updater"),
@ -251,7 +251,7 @@ test:Amount a rdfs:Property;
var domain [32]byte
copy(domain[:], tokenAddress)
updateState, err := tokenIntrinsic.Deploy(
updateState, _, err := tokenIntrinsic.Deploy(
domain,
nil,
[]byte("unauthorized"),
@ -305,7 +305,7 @@ test:Amount a rdfs:Property;
var domain [32]byte
copy(domain[:], tokenAddress)
updateState, err := tokenIntrinsic.Deploy(
updateState, _, err := tokenIntrinsic.Deploy(
domain,
nil,
[]byte("wrong-owner"),

View File

@ -5,6 +5,7 @@ import (
"fmt"
"math/big"
"slices"
"sync"
"time"
"github.com/iden3/go-iden3-crypto/poseidon"
@ -27,6 +28,7 @@ const VERTEX_DATA_DELETION_INTERVAL = 10 * 60 * 1000
var HYPERGRAPH_METADATA_ADDRESS = bytes.Repeat([]byte{0xff}, 32)
type HypergraphState struct {
mu sync.Mutex
hypergraph hypergraph.Hypergraph
changeset []state.StateChange
}
@ -47,6 +49,12 @@ func (h *HypergraphState) NewVertexAddMaterializedState(
prior *tries.VectorCommitmentTree,
data *tries.VectorCommitmentTree,
) *VertexAddMaterializedState {
if prior != nil {
prior.Commit(h.GetProver(), false)
}
if data != nil {
data.Commit(h.GetProver(), false)
}
return &VertexAddMaterializedState{
h,
appAddress,
@ -137,6 +145,9 @@ func (h *HypergraphState) NewVertexRemoveMaterializedState(
commitment []byte,
originalSize *big.Int,
) *VertexRemoveMaterializedState {
if prior != nil {
prior.Commit(h.GetProver(), false)
}
return &VertexRemoveMaterializedState{
h,
appAddress,
@ -211,6 +222,12 @@ func (h *HypergraphState) NewHyperedgeAddMaterializedState(
prior *tries.VectorCommitmentTree,
value hypergraph.Hyperedge,
) *HyperedgeAddMaterializedState {
if prior != nil {
prior.Commit(h.GetProver(), false)
}
if value != nil {
value.Commit(h.GetProver())
}
return &HyperedgeAddMaterializedState{
h,
frameNumber,
@ -275,6 +292,12 @@ func (h *HypergraphState) NewHyperedgeRemoveMaterializedState(
prior *tries.VectorCommitmentTree,
value hypergraph.Hyperedge,
) *HyperedgeRemoveMaterializedState {
if prior != nil {
prior.Commit(h.GetProver(), false)
}
if value != nil {
value.Commit(h.GetProver())
}
return &HyperedgeRemoveMaterializedState{
h,
frameNumber,
@ -588,7 +611,7 @@ func (h *HypergraphState) Init(
initializedDomain := make([]byte, 32)
copy(initializedDomain, domain)
h.mu.Lock()
h.changeset = append(h.changeset, state.StateChange{
Domain: initializedDomain,
Address: HYPERGRAPH_METADATA_ADDRESS,
@ -601,6 +624,7 @@ func (h *HypergraphState) Init(
data: publicStateInformation,
},
})
h.mu.Unlock()
return nil
}
@ -659,6 +683,7 @@ func (h *HypergraphState) Delete(
return errors.Wrap(state.ErrInvalidDiscriminator, "delete")
}
h.mu.Lock()
h.changeset = append(h.changeset, state.StateChange{
Domain: domain,
Address: address,
@ -666,6 +691,7 @@ func (h *HypergraphState) Delete(
StateChange: state.DeleteStateChangeEvent,
Value: value,
})
h.mu.Unlock()
return nil
}
@ -677,12 +703,16 @@ func (h *HypergraphState) Get(
address []byte,
discriminator []byte,
) (interface{}, error) {
h.mu.Lock()
for _, c := range slices.Backward(h.changeset) {
if bytes.Equal(c.Address, address) && bytes.Equal(c.Domain, domain) &&
bytes.Equal(c.Discriminator, discriminator) {
h.mu.Unlock()
return c.Value.DataValue(), nil
}
}
h.mu.Unlock()
id := [64]byte{}
copy(id[:32], domain)
@ -755,6 +785,7 @@ func (h *HypergraphState) Set(
stateChange = state.UpdateStateChangeEvent
}
h.mu.Lock()
h.changeset = append(h.changeset, state.StateChange{
Domain: domain,
Address: address,
@ -762,6 +793,7 @@ func (h *HypergraphState) Set(
StateChange: stateChange,
Value: value,
})
h.mu.Unlock()
return nil
}
@ -773,6 +805,8 @@ func (h *HypergraphState) Commit() error {
return errors.Wrap(err, "commit")
}
h.mu.Lock()
defer h.mu.Unlock()
for _, change := range h.changeset {
if err := change.Value.Commit(txn); err != nil {
if err := txn.Abort(); err != nil {
@ -792,7 +826,9 @@ func (h *HypergraphState) Commit() error {
// Abort implements state.State, aborting the (db-level) transaction set.
func (h *HypergraphState) Abort() error {
h.mu.Lock()
h.changeset = []state.StateChange{}
h.mu.Unlock()
return nil
}

View File

@ -169,11 +169,10 @@ func signatureCheckDefault() bool {
return true
}
// monitorParentProcess watches parent process and signals quit channel if
// parent dies
// monitorParentProcess watches parent process and stops the worker if parent dies
func monitorParentProcess(
parentProcessId int,
quitCh chan struct{},
stopFunc func(),
logger *zap.Logger,
) {
for {
@ -181,7 +180,9 @@ func monitorParentProcess(
proc, err := os.FindProcess(parentProcessId)
if err != nil {
logger.Error("parent process not found, terminating")
close(quitCh)
if stopFunc != nil {
stopFunc()
}
return
}
@ -190,7 +191,9 @@ func monitorParentProcess(
err := proc.Signal(syscall.Signal(0))
if err != nil {
logger.Error("parent process not found, terminating")
close(quitCh)
if stopFunc != nil {
stopFunc()
}
return
}
}
@ -471,11 +474,7 @@ func main() {
}
if *parentProcess != 0 {
go monitorParentProcess(
*parentProcess,
dataWorkerNode.GetQuitChannel(),
logger,
)
go monitorParentProcess(*parentProcess, dataWorkerNode.Stop, logger)
}
done := make(chan os.Signal, 1)
@ -720,6 +719,20 @@ func printPeerInfo(logger *zap.Logger, cfg *config.Config) {
printReachability(p.Reachability)
printCapabilities(p.Capabilities)
if p.LastReceivedFrame != 0 {
fmt.Printf(
" Last Received Global Frame: %d\n",
p.LastReceivedFrame,
)
}
if p.LastGlobalHeadFrame != 0 {
fmt.Printf(
" Last Global Head Frame: %d\n",
p.LastGlobalHeadFrame,
)
}
if len(p.PublicKey) > 0 {
fmt.Println(" Public Key:", hex.EncodeToString(p.PublicKey))
}

View File

@ -1,7 +1,6 @@
package p2p
import (
"bytes"
"sync"
"time"
@ -16,19 +15,17 @@ type InMemoryPeerInfoManager struct {
peerInfoCh chan *protobufs.PeerInfo
peerInfoMx sync.RWMutex
peerMap map[string]*p2p.PeerInfo
fastestPeers []*p2p.PeerInfo
ctx lifecycle.SignalerContext
peerMap map[string]*p2p.PeerInfo
ctx lifecycle.SignalerContext
}
var _ p2p.PeerInfoManager = (*InMemoryPeerInfoManager)(nil)
func NewInMemoryPeerInfoManager(logger *zap.Logger) *InMemoryPeerInfoManager {
return &InMemoryPeerInfoManager{
logger: logger,
peerInfoCh: make(chan *protobufs.PeerInfo, 1000),
fastestPeers: []*p2p.PeerInfo{},
peerMap: make(map[string]*p2p.PeerInfo),
logger: logger,
peerInfoCh: make(chan *protobufs.PeerInfo, 1000),
peerMap: make(map[string]*p2p.PeerInfo),
}
}
@ -59,25 +56,16 @@ func (m *InMemoryPeerInfoManager) Start(
seen := time.Now().UnixMilli()
m.peerInfoMx.Lock()
m.peerMap[string(info.PeerId)] = &p2p.PeerInfo{
PeerId: info.PeerId,
Bandwidth: 100,
Capabilities: capabilities,
Reachability: reachability,
Cores: uint32(len(reachability)),
LastSeen: seen,
Version: info.Version,
PatchNumber: info.PatchNumber,
PeerId: info.PeerId,
Capabilities: capabilities,
Reachability: reachability,
Cores: uint32(len(reachability)),
LastSeen: seen,
Version: info.Version,
PatchNumber: info.PatchNumber,
LastReceivedFrame: info.LastReceivedFrame,
LastGlobalHeadFrame: info.LastGlobalHeadFrame,
}
m.searchAndInsertPeer(&p2p.PeerInfo{
PeerId: info.PeerId,
Bandwidth: 100,
Capabilities: capabilities,
Reachability: reachability,
Cores: uint32(len(reachability)),
LastSeen: seen,
Version: info.Version,
PatchNumber: info.PatchNumber,
})
m.peerInfoMx.Unlock()
case <-ctx.Done():
return
@ -116,33 +104,9 @@ func (m *InMemoryPeerInfoManager) GetPeerMap() map[string]*p2p.PeerInfo {
func (m *InMemoryPeerInfoManager) GetPeersBySpeed() [][]byte {
result := [][]byte{}
m.peerInfoMx.RLock()
for _, info := range m.fastestPeers {
for _, info := range m.peerMap {
result = append(result, info.PeerId)
}
m.peerInfoMx.RUnlock()
return result
}
// blatantly lifted from slices.BinarySearchFunc, optimized for direct insertion
// and uint64 comparison without overflow
func (m *InMemoryPeerInfoManager) searchAndInsertPeer(info *p2p.PeerInfo) {
n := len(m.fastestPeers)
i, j := 0, n
for i < j {
h := int(uint(i+j) >> 1)
if m.fastestPeers[h].Bandwidth > info.Bandwidth {
i = h + 1
} else {
j = h
}
}
if i < n && m.fastestPeers[i].Bandwidth == info.Bandwidth &&
bytes.Equal(m.fastestPeers[i].PeerId, info.PeerId) {
m.fastestPeers[i] = info
} else {
m.fastestPeers = append(m.fastestPeers, new(p2p.PeerInfo))
copy(m.fastestPeers[i+1:], m.fastestPeers[i:])
m.fastestPeers[i] = info
}
}

View File

@ -160,12 +160,14 @@ func (r *RPCServer) GetPeerInfo(
})
}
out = append(out, &protobufs.PeerInfo{
PeerId: pi.PeerId,
Reachability: re,
Timestamp: pi.LastSeen,
Capabilities: cs,
Version: pi.Version,
PatchNumber: pi.PatchNumber,
PeerId: pi.PeerId,
Reachability: re,
Timestamp: pi.LastSeen,
Capabilities: cs,
Version: pi.Version,
PatchNumber: pi.PatchNumber,
LastReceivedFrame: pi.LastReceivedFrame,
LastGlobalHeadFrame: pi.LastGlobalHeadFrame,
})
}

View File

@ -695,7 +695,10 @@ func (c *PubSubProxyClient) handleValidationRequests(ctx context.Context) {
// Ensure PubSubProxyClient implements p2p.PubSub
var _ p2p.PubSub = (*PubSubProxyClient)(nil)
func (c *PubSubProxyClient) PublishToBitmask(bitmask []byte, data []byte) error {
func (c *PubSubProxyClient) PublishToBitmask(
bitmask []byte,
data []byte,
) error {
_, err := c.client.PublishToBitmask(
context.Background(),
&protobufs.PublishToBitmaskRequest{

View File

@ -27,6 +27,8 @@ var pebbleMigrations = []func(*pebble.Batch) error{
migration_2_1_0_8,
migration_2_1_0_81,
migration_2_1_0_10,
migration_2_1_0_10,
migration_2_1_0_11,
}
func NewPebbleDB(
@ -464,3 +466,7 @@ func migration_2_1_0_10(b *pebble.Batch) error {
// nodes are consistent
return nil
}
func migration_2_1_0_11(b *pebble.Batch) error {
return nil
}

View File

@ -306,7 +306,36 @@ func (w *WorkerManager) registerWorker(info *typesStore.WorkerInfo) error {
return errors.New("worker manager not started")
}
w.logger.Info("registering worker",
existing, err := w.store.GetWorker(info.CoreId)
creating := false
if err != nil {
if errors.Is(err, store.ErrNotFound) {
creating = true
} else {
workerOperationsTotal.WithLabelValues("register", "error").Inc()
return errors.Wrap(err, "register worker")
}
}
if !creating {
if info.ListenMultiaddr == "" {
info.ListenMultiaddr = existing.ListenMultiaddr
}
if info.StreamListenMultiaddr == "" {
info.StreamListenMultiaddr = existing.StreamListenMultiaddr
}
if info.TotalStorage == 0 {
info.TotalStorage = existing.TotalStorage
}
info.Automatic = existing.Automatic
}
logMsg := "registering worker"
if !creating {
logMsg = "updating worker"
}
w.logger.Info(logMsg,
zap.Uint("core_id", info.CoreId),
zap.String("listen_addr", info.ListenMultiaddr),
zap.Uint("total_storage", info.TotalStorage),
@ -335,12 +364,21 @@ func (w *WorkerManager) registerWorker(info *typesStore.WorkerInfo) error {
w.setWorkerFilterMapping(info.CoreId, info.Filter)
// Update metrics
activeWorkersGauge.Inc()
totalStorageGauge.Add(float64(info.TotalStorage))
if creating {
activeWorkersGauge.Inc()
totalStorageGauge.Add(float64(info.TotalStorage))
} else if existing != nil && info.TotalStorage != existing.TotalStorage {
delta := float64(int64(info.TotalStorage) - int64(existing.TotalStorage))
totalStorageGauge.Add(delta)
}
workerOperationsTotal.WithLabelValues("register", "success").Inc()
msg := "worker registered successfully"
if !creating {
msg = "worker updated successfully"
}
w.logger.Info(
"worker registered successfully",
msg,
zap.Uint("core_id", info.CoreId),
)
@ -463,8 +501,9 @@ func (w *WorkerManager) DeallocateWorker(coreId uint) error {
)
}
// Update allocation status
// Update allocation status and clear filter
worker.Allocated = false
worker.Filter = nil
// Save to store
txn, err := w.store.NewTransaction(false)
@ -484,14 +523,15 @@ func (w *WorkerManager) DeallocateWorker(coreId uint) error {
return errors.Wrap(err, "deallocate worker")
}
// Update cache
w.setWorkerFilterMapping(coreId, nil)
w.setWorkerAllocation(coreId, false)
// Refresh worker
if err := w.respawnWorker(coreId, []byte{}); err != nil {
return errors.Wrap(err, "allocate worker")
}
// Mark as deallocated in cache
w.setWorkerAllocation(coreId, false)
// Update metrics
allocatedWorkersGauge.Dec()
workerOperationsTotal.WithLabelValues("deallocate", "success").Inc()
@ -809,7 +849,7 @@ func (w *WorkerManager) ensureWorkerRegistered(
if err == nil {
return nil
}
if err != nil && !errors.Is(err, store.ErrNotFound) {
if !errors.Is(err, store.ErrNotFound) {
return err
}

View File

@ -49,6 +49,14 @@ func (g *QuorumCertificate) Identity() models.Identity {
// Source implements models.TimeoutCertificate.
func (g *TimeoutCertificate) Equals(other models.TimeoutCertificate) bool {
if other == nil {
return false
}
if t, ok := other.(*TimeoutCertificate); !ok || t == nil {
return false
}
return bytes.Equal(g.Filter, other.GetFilter()) &&
g.Rank == other.GetRank() &&
slices.Equal(g.LatestRanks, other.GetLatestRanks()) &&

View File

@ -174,6 +174,28 @@ func (p *PeerInfo) ToCanonicalBytes() ([]byte, error) {
return nil, errors.Wrap(err, "to canonical bytes")
}
// Write last_received_frame
if p.LastReceivedFrame != 0 {
if err := binary.Write(
buf,
binary.BigEndian,
p.LastReceivedFrame,
); err != nil {
return nil, errors.Wrap(err, "to canonical bytes")
}
}
// Write last_global_head_frame
if p.LastGlobalHeadFrame != 0 {
if err := binary.Write(
buf,
binary.BigEndian,
p.LastGlobalHeadFrame,
); err != nil {
return nil, errors.Wrap(err, "to canonical bytes")
}
}
return buf.Bytes(), nil
}
@ -336,6 +358,24 @@ func (p *PeerInfo) FromCanonicalBytes(data []byte) error {
return errors.Wrap(err, "from canonical bytes")
}
// Read last_received_frame
if err := binary.Read(
buf,
binary.BigEndian,
&p.LastReceivedFrame,
); err != nil {
return nil
}
// Read last_global_head_frame
if err := binary.Read(
buf,
binary.BigEndian,
&p.LastGlobalHeadFrame,
); err != nil {
return nil
}
return nil
}

View File

@ -223,6 +223,10 @@ type PeerInfo struct {
PublicKey []byte `protobuf:"bytes,7,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"`
// The signature of the node.
Signature []byte `protobuf:"bytes,8,opt,name=signature,proto3" json:"signature,omitempty"`
// The last global frame received by the node.
LastReceivedFrame uint64 `protobuf:"varint,9,opt,name=last_received_frame,json=lastReceivedFrame,proto3" json:"last_received_frame,omitempty"`
// The last global frame advanced to the head of the time reel.
LastGlobalHeadFrame uint64 `protobuf:"varint,10,opt,name=last_global_head_frame,json=lastGlobalHeadFrame,proto3" json:"last_global_head_frame,omitempty"`
}
func (x *PeerInfo) Reset() {
@ -313,6 +317,20 @@ func (x *PeerInfo) GetSignature() []byte {
return nil
}
func (x *PeerInfo) GetLastReceivedFrame() uint64 {
if x != nil {
return x.LastReceivedFrame
}
return 0
}
func (x *PeerInfo) GetLastGlobalHeadFrame() uint64 {
if x != nil {
return x.LastGlobalHeadFrame
}
return 0
}
type PeerInfoResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@ -365,13 +383,15 @@ type NodeInfoResponse struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
PeerId string `protobuf:"bytes,1,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
PeerScore uint64 `protobuf:"varint,2,opt,name=peer_score,json=peerScore,proto3" json:"peer_score,omitempty"`
Version []byte `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
PeerSeniority []byte `protobuf:"bytes,4,opt,name=peer_seniority,json=peerSeniority,proto3" json:"peer_seniority,omitempty"`
RunningWorkers uint32 `protobuf:"varint,5,opt,name=running_workers,json=runningWorkers,proto3" json:"running_workers,omitempty"`
AllocatedWorkers uint32 `protobuf:"varint,6,opt,name=allocated_workers,json=allocatedWorkers,proto3" json:"allocated_workers,omitempty"`
PatchNumber []byte `protobuf:"bytes,7,opt,name=patch_number,json=patchNumber,proto3" json:"patch_number,omitempty"`
PeerId string `protobuf:"bytes,1,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
PeerScore uint64 `protobuf:"varint,2,opt,name=peer_score,json=peerScore,proto3" json:"peer_score,omitempty"`
Version []byte `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
PeerSeniority []byte `protobuf:"bytes,4,opt,name=peer_seniority,json=peerSeniority,proto3" json:"peer_seniority,omitempty"`
RunningWorkers uint32 `protobuf:"varint,5,opt,name=running_workers,json=runningWorkers,proto3" json:"running_workers,omitempty"`
AllocatedWorkers uint32 `protobuf:"varint,6,opt,name=allocated_workers,json=allocatedWorkers,proto3" json:"allocated_workers,omitempty"`
PatchNumber []byte `protobuf:"bytes,7,opt,name=patch_number,json=patchNumber,proto3" json:"patch_number,omitempty"`
LastReceivedFrame uint64 `protobuf:"varint,8,opt,name=last_received_frame,json=lastReceivedFrame,proto3" json:"last_received_frame,omitempty"`
LastGlobalHeadFrame uint64 `protobuf:"varint,9,opt,name=last_global_head_frame,json=lastGlobalHeadFrame,proto3" json:"last_global_head_frame,omitempty"`
}
func (x *NodeInfoResponse) Reset() {
@ -455,6 +475,20 @@ func (x *NodeInfoResponse) GetPatchNumber() []byte {
return nil
}
func (x *NodeInfoResponse) GetLastReceivedFrame() uint64 {
if x != nil {
return x.LastReceivedFrame
}
return 0
}
func (x *NodeInfoResponse) GetLastGlobalHeadFrame() uint64 {
if x != nil {
return x.LastGlobalHeadFrame
}
return 0
}
type WorkerInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@ -1929,7 +1963,7 @@ var file_node_proto_rawDesc = []byte{
0x75, 0x62, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, 0x73, 0x12, 0x2b, 0x0a, 0x11,
0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72,
0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4d,
0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, 0x73, 0x22, 0xcf, 0x02, 0x0a, 0x08, 0x50, 0x65,
0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, 0x73, 0x22, 0xb4, 0x03, 0x0a, 0x08, 0x50, 0x65,
0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69,
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12,
0x49, 0x0a, 0x0c, 0x72, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18,
@ -1950,293 +1984,305 @@ var file_node_proto_rawDesc = []byte{
0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x1c, 0x0a,
0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c,
0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x52, 0x0a, 0x10, 0x50,
0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
0x3e, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x21, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e,
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x65, 0x65,
0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x22,
0x84, 0x02, 0x0a, 0x10, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a,
0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
0x04, 0x52, 0x09, 0x70, 0x65, 0x65, 0x72, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x18, 0x0a, 0x07,
0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x76,
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x73,
0x65, 0x6e, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d,
0x70, 0x65, 0x65, 0x72, 0x53, 0x65, 0x6e, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x27, 0x0a,
0x0f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73,
0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x57,
0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61,
0x74, 0x65, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28,
0x0d, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b,
0x65, 0x72, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6e, 0x75, 0x6d,
0x62, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x70, 0x61, 0x74, 0x63, 0x68,
0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x8f, 0x01, 0x0a, 0x0a, 0x57, 0x6f, 0x72, 0x6b, 0x65,
0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x17, 0x0a, 0x07, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x69, 0x64,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, 0x6f, 0x72, 0x65, 0x49, 0x64, 0x12, 0x16,
0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06,
0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61,
0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
0x04, 0x52, 0x10, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72,
0x61, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f,
0x72, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61,
0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0x5a, 0x0a, 0x12, 0x57, 0x6f, 0x72, 0x6b,
0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44,
0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d,
0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x57, 0x6f,
0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72,
0x49, 0x6e, 0x66, 0x6f, 0x22, 0x6e, 0x0a, 0x0a, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69,
0x74, 0x79, 0x12, 0x2f, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x69,
0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52,
0x12, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66,
0x69, 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61,
0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
0x52, 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61,
0x64, 0x61, 0x74, 0x61, 0x22, 0x2f, 0x0a, 0x09, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x4b, 0x65,
0x79, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03,
0x72, 0x65, 0x66, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x41, 0x0a, 0x07, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67,
0x12, 0x36, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22,
0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65,
0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x4b,
0x65, 0x79, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x22, 0x4f, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x69,
0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65,
0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0d, 0x52, 0x0c, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12,
0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x54, 0x0a, 0x0c, 0x44, 0x65, 0x6c,
0x69, 0x76, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x44, 0x0a, 0x08, 0x6d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75,
0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68,
0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x78, 0x4d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22,
0xdd, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x42, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61,
0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x75, 0x6e, 0x64,
0x6c, 0x65, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x61,
0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
0x01, 0x28, 0x0c, 0x52, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0d, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f,
0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x71, 0x75, 0x69,
0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64,
0x65, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74,
0x61, 0x52, 0x0c, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x22,
0x5a, 0x0a, 0x0c, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
0x4a, 0x0a, 0x0d, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61,
0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72,
0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62,
0x2e, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0c, 0x64,
0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x22, 0x30, 0x0a, 0x14, 0x4f,
0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74,
0x52, 0x65, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x68, 0x0a,
0x0f, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74,
0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70,
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69,
0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12,
0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52,
0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0xce, 0x01, 0x0a, 0x0a, 0x41, 0x63, 0x63, 0x6f,
0x75, 0x6e, 0x74, 0x52, 0x65, 0x66, 0x12, 0x5e, 0x0a, 0x12, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e,
0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e,
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4f, 0x72, 0x69,
0x67, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65,
0x66, 0x48, 0x00, 0x52, 0x11, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x41,
0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x55, 0x0a, 0x10, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63,
0x69, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f,
0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6d, 0x70, 0x6c, 0x69,
0x63, 0x69, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0f, 0x69, 0x6d,
0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x09, 0x0a,
0x07, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x7d, 0x0a, 0x04, 0x43, 0x6f, 0x69, 0x6e,
0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x6e, 0x74, 0x65,
0x72, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c,
0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x05,
0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x71, 0x75,
0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f,
0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x66,
0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x22, 0x7c, 0x0a, 0x0a, 0x4c, 0x65, 0x67, 0x61, 0x63,
0x79, 0x43, 0x6f, 0x69, 0x6e, 0x12, 0x31, 0x0a, 0x04, 0x63, 0x6f, 0x69, 0x6e, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d,
0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f,
0x69, 0x6e, 0x52, 0x04, 0x63, 0x6f, 0x69, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d,
0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x61,
0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64,
0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x88, 0x03, 0x0a, 0x17, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69,
0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f,
0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72,
0x61, 0x77, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
0x52, 0x0a, 0x72, 0x61, 0x77, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c,
0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01,
0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12,
0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20,
0x01, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12,
0x20, 0x0a, 0x0c, 0x6f, 0x6e, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18,
0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4b, 0x65,
0x79, 0x12, 0x29, 0x0a, 0x10, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x76, 0x65, 0x72,
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x0a, 0x0c,
0x63, 0x6f, 0x69, 0x6e, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x69, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12,
0x12, 0x0a, 0x04, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6d,
0x61, 0x73, 0x6b, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61,
0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28,
0x0c, 0x52, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66,
0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x38, 0x0a, 0x18, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69,
0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6b,
0x65, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x16, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69,
0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4b, 0x65, 0x79,
0x22, 0x8c, 0x06, 0x0a, 0x1e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65,
0x64, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74,
0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1f, 0x0a,
0x0b, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x0a, 0x72, 0x61, 0x77, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x21,
0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03,
0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65,
0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18,
0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e,
0x74, 0x12, 0x25, 0x0a, 0x0f, 0x74, 0x6f, 0x5f, 0x6f, 0x6e, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65,
0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x74, 0x6f, 0x4f, 0x6e,
0x65, 0x54, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x13, 0x72, 0x65, 0x66, 0x75,
0x6e, 0x64, 0x5f, 0x6f, 0x6e, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18,
0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x4f, 0x6e, 0x65,
0x54, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x2e, 0x0a, 0x13, 0x74, 0x6f, 0x5f, 0x76, 0x65,
0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07,
0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x74, 0x6f, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x72, 0x65, 0x66, 0x75, 0x6e,
0x64, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b,
0x65, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x15, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64,
0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12,
0x26, 0x0a, 0x0f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x69, 0x6e, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e,
0x63, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x74, 0x6f, 0x43, 0x6f, 0x69, 0x6e,
0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x66, 0x75, 0x6e,
0x64, 0x5f, 0x63, 0x6f, 0x69, 0x6e, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0a,
0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x69, 0x6e,
0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x6f, 0x5f, 0x6d, 0x61,
0x73, 0x6b, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x74, 0x6f, 0x4d, 0x61, 0x73, 0x6b,
0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18,
0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x4d, 0x61, 0x73,
0x6b, 0x12, 0x36, 0x0a, 0x17, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x15, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c,
0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x3d, 0x0a, 0x1b, 0x74, 0x6f, 0x5f,
0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72,
0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18,
0x74, 0x6f, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65,
0x72, 0x65, 0x6e, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x3e, 0x0a, 0x1b, 0x72, 0x65, 0x66, 0x75,
0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65,
0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x19, 0x72,
0x65, 0x66, 0x75, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52,
0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x1f, 0x72, 0x65, 0x66, 0x75,
0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65,
0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28,
0x0c, 0x52, 0x1c, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f,
0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x12,
0x1e, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20,
0x01, 0x28, 0x04, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22,
0x4d, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63,
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07,
0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61,
0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e,
0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0xa6,
0x02, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63,
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a,
0x0c, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x63, 0x6f, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d,
0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x65,
0x67, 0x61, 0x63, 0x79, 0x43, 0x6f, 0x69, 0x6e, 0x52, 0x0b, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79,
0x43, 0x6f, 0x69, 0x6e, 0x73, 0x12, 0x54, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x71, 0x75,
0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f,
0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a,
0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x74,
0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x6a, 0x0a, 0x14, 0x70,
0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x71, 0x75, 0x69, 0x6c,
0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65,
0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64,
0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x52, 0x13, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x61, 0x6e, 0x73,
0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x28, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x61,
0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c,
0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
0x72, 0x22, 0x11, 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8b, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a,
0x6f, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x1c, 0x0a, 0x09, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x09, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x12, 0x1e, 0x0a,
0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0d, 0x52, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x12, 0x10, 0x0a,
0x03, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x03, 0x69, 0x64, 0x73, 0x12,
0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18,
0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x64,
0x65, 0x78, 0x22, 0x35, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e,
0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a,
0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x9c, 0x04, 0x0a, 0x0b, 0x4e, 0x6f,
0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x65, 0x0a, 0x0b, 0x47, 0x65, 0x74,
0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e,
0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69,
0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e,
0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x12, 0x65, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12,
0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64,
0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64,
0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x71,
0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e,
0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x57, 0x6f,
0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2d, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e,
0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62,
0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x6c,
0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x61,
0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x52, 0x65,
0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x33, 0x0a, 0x16, 0x6c,
0x61, 0x73, 0x74, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x5f,
0x66, 0x72, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x13, 0x6c, 0x61, 0x73,
0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x48, 0x65, 0x61, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65,
0x22, 0x52, 0x0a, 0x10, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66,
0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62,
0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70,
0x62, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x04, 0x53, 0x65, 0x6e, 0x64, 0x12, 0x24, 0x2e, 0x71,
0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e,
0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x25, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e,
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x6e,
0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7d, 0x0a, 0x12, 0x47, 0x65, 0x74,
0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12,
0x32, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64,
0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b,
0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d,
0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65,
0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xe4, 0x01, 0x0a, 0x0e, 0x44, 0x61, 0x74,
0x61, 0x49, 0x50, 0x43, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5c, 0x0a, 0x07, 0x52,
0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x12, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72,
0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62,
0x2e, 0x52, 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64,
0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x61, 0x77,
0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x74, 0x0a, 0x0f, 0x43, 0x72, 0x65,
0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x2f, 0x2e, 0x71,
0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e,
0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69,
0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e,
0x62, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72,
0x49, 0x6e, 0x66, 0x6f, 0x22, 0xe9, 0x02, 0x0a, 0x10, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x65, 0x65,
0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72,
0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65,
0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x70, 0x65, 0x65, 0x72, 0x53, 0x63, 0x6f, 0x72,
0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70,
0x65, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x6e, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20,
0x01, 0x28, 0x0c, 0x52, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x53, 0x65, 0x6e, 0x69, 0x6f, 0x72, 0x69,
0x74, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x77, 0x6f,
0x72, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x72, 0x75, 0x6e,
0x6e, 0x69, 0x6e, 0x67, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61,
0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73,
0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65,
0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x61, 0x74, 0x63,
0x68, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b,
0x70, 0x61, 0x74, 0x63, 0x68, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x13, 0x6c,
0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x61,
0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x52, 0x65,
0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x33, 0x0a, 0x16, 0x6c,
0x61, 0x73, 0x74, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x5f,
0x66, 0x72, 0x61, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x13, 0x6c, 0x61, 0x73,
0x74, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x48, 0x65, 0x61, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65,
0x22, 0x8f, 0x01, 0x0a, 0x0a, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12,
0x17, 0x0a, 0x07, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d,
0x52, 0x06, 0x63, 0x6f, 0x72, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74,
0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72,
0x12, 0x2b, 0x0a, 0x11, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74,
0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x61, 0x76, 0x61,
0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x23, 0x0a,
0x0d, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x04,
0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61,
0x67, 0x65, 0x22, 0x5a, 0x0a, 0x12, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b,
0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e,
0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e,
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f,
0x69, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42,
0x3a, 0x5a, 0x38, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62,
0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72,
0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f, 0x6e, 0x6f, 0x72, 0x65, 0x70, 0x6f, 0x2f, 0x6e, 0x6f, 0x64,
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e,
0x66, 0x6f, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x6e,
0x0a, 0x0a, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x2f, 0x0a, 0x13,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66,
0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x63, 0x6f, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x2f, 0x0a,
0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61,
0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x61, 0x64, 0x64, 0x69,
0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x2f,
0x0a, 0x09, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x72,
0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, 0x10, 0x0a,
0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22,
0x41, 0x0a, 0x07, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x04, 0x6b, 0x65,
0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e,
0x70, 0x62, 0x2e, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x04, 0x6b, 0x65,
0x79, 0x73, 0x22, 0x4f, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65,
0x74, 0x68, 0x6f, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79,
0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x64, 0x65, 0x6c,
0x69, 0x76, 0x65, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64,
0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72,
0x65, 0x73, 0x73, 0x22, 0x54, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x44,
0x61, 0x74, 0x61, 0x12, 0x44, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18,
0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69,
0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e,
0x70, 0x62, 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52,
0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0xdd, 0x01, 0x0a, 0x0b, 0x53, 0x65,
0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d,
0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69,
0x6e, 0x12, 0x42, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e,
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x07, 0x72, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74,
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x61,
0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a,
0x0d, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75,
0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x44,
0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0c, 0x64, 0x65, 0x6c,
0x69, 0x76, 0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x22, 0x5a, 0x0a, 0x0c, 0x53, 0x65, 0x6e,
0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0d, 0x64, 0x65, 0x6c,
0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x25, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f,
0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x69, 0x76,
0x65, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0c, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72,
0x79, 0x44, 0x61, 0x74, 0x61, 0x22, 0x30, 0x0a, 0x14, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61,
0x74, 0x65, 0x64, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x66, 0x12, 0x18, 0x0a,
0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07,
0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x68, 0x0a, 0x0f, 0x49, 0x6d, 0x70, 0x6c, 0x69,
0x63, 0x69, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6d,
0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0d, 0x52, 0x0c, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12,
0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d,
0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69,
0x6e, 0x22, 0xce, 0x01, 0x0a, 0x0a, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x66,
0x12, 0x5e, 0x0a, 0x12, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61,
0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x71,
0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e,
0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x65,
0x64, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x66, 0x48, 0x00, 0x52, 0x11, 0x6f,
0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74,
0x12, 0x55, 0x0a, 0x10, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x61, 0x63, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x71, 0x75, 0x69,
0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64,
0x65, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x41, 0x63, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0f, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74,
0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x61, 0x63, 0x63, 0x6f, 0x75,
0x6e, 0x74, 0x22, 0x7d, 0x0a, 0x04, 0x43, 0x6f, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d,
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75,
0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73,
0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18,
0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69,
0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e,
0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x66, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65,
0x72, 0x22, 0x7c, 0x0a, 0x0a, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x43, 0x6f, 0x69, 0x6e, 0x12,
0x31, 0x0a, 0x04, 0x63, 0x6f, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e,
0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e,
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x69, 0x6e, 0x52, 0x04, 0x63, 0x6f,
0x69, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62,
0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e,
0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22,
0x88, 0x03, 0x0a, 0x17, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64,
0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61,
0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64,
0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x61, 0x6c,
0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x72, 0x61, 0x77, 0x42,
0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f,
0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x72,
0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d,
0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x63,
0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x0c, 0x6f, 0x6e, 0x65,
0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52,
0x0a, 0x6f, 0x6e, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x76,
0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18,
0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x69, 0x6e, 0x5f, 0x62,
0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f,
0x69, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x61, 0x73,
0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x31, 0x0a,
0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65,
0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x61, 0x64, 0x64,
0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
0x12, 0x38, 0x0a, 0x18, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72,
0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0a, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x16, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65,
0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x8c, 0x06, 0x0a, 0x1e, 0x4d,
0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x50, 0x65, 0x6e, 0x64, 0x69,
0x6e, 0x67, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07,
0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x61, 0x77, 0x5f, 0x62,
0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x72, 0x61,
0x77, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x61, 0x6d,
0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
0x66, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x63,
0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52,
0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0f, 0x74,
0x6f, 0x5f, 0x6f, 0x6e, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05,
0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x74, 0x6f, 0x4f, 0x6e, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4b,
0x65, 0x79, 0x12, 0x2d, 0x0a, 0x13, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x6f, 0x6e, 0x65,
0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52,
0x10, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x4f, 0x6e, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4b, 0x65,
0x79, 0x12, 0x2e, 0x0a, 0x13, 0x74, 0x6f, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11,
0x74, 0x6f, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65,
0x79, 0x12, 0x36, 0x0a, 0x17, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x69,
0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x08, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x15, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x6f, 0x5f,
0x63, 0x6f, 0x69, 0x6e, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x09, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x0d, 0x74, 0x6f, 0x43, 0x6f, 0x69, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63,
0x65, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x69, 0x6e,
0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11,
0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x69, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63,
0x65, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x6f, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0b, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x06, 0x74, 0x6f, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65,
0x66, 0x75, 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52,
0x0a, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x36, 0x0a, 0x17, 0x74,
0x6f, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66,
0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x15, 0x74, 0x6f,
0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65,
0x6e, 0x63, 0x65, 0x12, 0x3d, 0x0a, 0x1b, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69,
0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6b,
0x65, 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x69,
0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4b,
0x65, 0x79, 0x12, 0x3e, 0x0a, 0x1b, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64,
0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x19, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x41,
0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e,
0x63, 0x65, 0x12, 0x45, 0x0a, 0x1f, 0x72, 0x65, 0x66, 0x75, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64,
0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x72, 0x65, 0x66,
0x75, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x66,
0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x78, 0x70,
0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x65,
0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4d, 0x0a, 0x19, 0x47, 0x65, 0x74,
0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0xa6, 0x02, 0x0a, 0x1a, 0x47, 0x65, 0x74,
0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x0c, 0x6c, 0x65, 0x67, 0x61, 0x63,
0x79, 0x5f, 0x63, 0x6f, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e,
0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e,
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x43, 0x6f,
0x69, 0x6e, 0x52, 0x0b, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x43, 0x6f, 0x69, 0x6e, 0x73, 0x12,
0x54, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69,
0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e,
0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e,
0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x6a, 0x0a, 0x14, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67,
0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d,
0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x61,
0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e,
0x67, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x70, 0x65,
0x6e, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
0x73, 0x22, 0x28, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x11, 0x0a, 0x0f, 0x52,
0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8b,
0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x72, 0x6f,
0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x68, 0x61,
0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x68,
0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69,
0x63, 0x75, 0x6c, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x64, 0x69, 0x66,
0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x03,
0x20, 0x03, 0x28, 0x0c, 0x52, 0x03, 0x69, 0x64, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f,
0x76, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52,
0x0b, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x35, 0x0a, 0x17,
0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x32, 0x9c, 0x04, 0x0a, 0x0b, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x12, 0x65, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e,
0x66, 0x6f, 0x12, 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e,
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74,
0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x29, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64,
0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e,
0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x0b, 0x47, 0x65,
0x74, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c,
0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65,
0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72,
0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62,
0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x12, 0x6b, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e,
0x66, 0x6f, 0x12, 0x2d, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e,
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74,
0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x2b, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e,
0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x57, 0x6f, 0x72, 0x6b,
0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53,
0x0a, 0x04, 0x53, 0x65, 0x6e, 0x64, 0x12, 0x24, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72,
0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62,
0x2e, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x71,
0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e,
0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x12, 0x7d, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73,
0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x32, 0x2e, 0x71, 0x75, 0x69, 0x6c,
0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65,
0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x79, 0x41,
0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e,
0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e,
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
0x73, 0x42, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x32, 0xe4, 0x01, 0x0a, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x49, 0x50, 0x43, 0x53, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5c, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e,
0x12, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f,
0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x61,
0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x71, 0x75, 0x69, 0x6c,
0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65,
0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x12, 0x74, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69,
0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72,
0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x62,
0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62,
0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70,
0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x6f,
0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3a, 0x5a, 0x38, 0x73, 0x6f, 0x75,
0x72, 0x63, 0x65, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f,
0x6e, 0x6f, 0x72, 0x65, 0x70, 0x6f, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (

View File

@ -41,6 +41,10 @@ message PeerInfo {
bytes public_key = 7;
// The signature of the node.
bytes signature = 8;
// The last global frame received by the node.
uint64 last_received_frame = 9;
// The last global frame advanced to the head of the time reel.
uint64 last_global_head_frame = 10;
}
message PeerInfoResponse {
@ -55,6 +59,8 @@ message NodeInfoResponse {
uint32 running_workers = 5;
uint32 allocated_workers = 6;
bytes patch_number = 7;
uint64 last_received_frame = 8;
uint64 last_global_head_frame = 9;
}
message WorkerInfo {

View File

@ -24,7 +24,7 @@ type Intrinsic interface {
contextData []byte,
frameNumber uint64,
state state.State,
) (state.State, error)
) (state.State, []byte, error)
// Locks addresses for writing or reading
Lock(frameNumber uint64, input []byte) ([][]byte, error)
// Unlocks addresses for writing or reading

View File

@ -25,12 +25,13 @@ type Capability struct {
}
type PeerInfo struct {
PeerId []byte
Cores uint32
Capabilities []Capability
Reachability []Reachability
Bandwidth uint64
LastSeen int64
Version []byte
PatchNumber []byte
PeerId []byte
Cores uint32
Capabilities []Capability
Reachability []Reachability
LastSeen int64
Version []byte
PatchNumber []byte
LastReceivedFrame uint64
LastGlobalHeadFrame uint64
}