From 4b61a000959cb2bf072e18e39f062384a184a677 Mon Sep 17 00:00:00 2001 From: Cassandra Heart Date: Thu, 31 Oct 2024 23:33:46 -0500 Subject: [PATCH] restore prover rings --- go-libp2p-blossomsub/blossomsub.go | 10 +- node/config/config.go | 2 +- node/config/version.go | 8 +- .../data/data_clock_consensus_engine.go | 93 +++-- node/consensus/data/main_data_loop.go | 267 ++++++------ node/consensus/data/message_handler.go | 379 +++++++++--------- node/consensus/data/peer_messaging.go | 2 +- .../token/application/token_handle_mint.go | 2 +- node/p2p/blossomsub.go | 2 +- 9 files changed, 405 insertions(+), 360 deletions(-) diff --git a/go-libp2p-blossomsub/blossomsub.go b/go-libp2p-blossomsub/blossomsub.go index 6540ec2..94017d4 100644 --- a/go-libp2p-blossomsub/blossomsub.go +++ b/go-libp2p-blossomsub/blossomsub.go @@ -214,7 +214,13 @@ func NewBlossomSubWithRouter(ctx context.Context, h host.Host, rt PubSubRouter, } // NewBlossomSubRouter returns a new BlossomSubRouter with custom parameters. -func NewBlossomSubRouter(h host.Host, params BlossomSubParams) *BlossomSubRouter { +func NewBlossomSubRouter(h host.Host, params BlossomSubParams, network uint8) *BlossomSubRouter { + if network != 0 { + BlossomSubDefaultProtocols[0] = protocol.ID( + string(BlossomSubID_v2) + fmt.Sprintf("-network-%d", network), + ) + } + return &BlossomSubRouter{ peers: make(map[peer.ID]protocol.ID), mesh: make(map[string]map[peer.ID]struct{}), @@ -233,6 +239,7 @@ func NewBlossomSubRouter(h host.Host, params BlossomSubParams) *BlossomSubRouter feature: BlossomSubDefaultFeatures, tagTracer: newTagTracer(h.ConnManager()), params: params, + network: network, } } @@ -453,6 +460,7 @@ type BlossomSubRouter struct { connect chan connectInfo // px connection requests cab peerstore.AddrBook meshMx sync.RWMutex + network uint8 protos []protocol.ID feature BlossomSubFeatureTest diff --git a/node/config/config.go b/node/config/config.go index 8ad8df6..25dbddf 100644 --- a/node/config/config.go +++ b/node/config/config.go @@ -137,7 +137,7 @@ var unlock *SignedGenesisUnlock func DownloadAndVerifyGenesis(network uint) (*SignedGenesisUnlock, error) { if network != 0 { unlock = &SignedGenesisUnlock{ - GenesisSeedHex: "726573697374206d7563682c206f626579206c6974746c657c000000000000000000000005", + GenesisSeedHex: "726573697374206d7563682c206f626579206c6974746c657c000000000000000000000006", Beacon: []byte{ 0x58, 0xef, 0xd9, 0x7e, 0xdd, 0x0e, 0xb6, 0x2f, 0x51, 0xc7, 0x5d, 0x00, 0x29, 0x12, 0x45, 0x49, diff --git a/node/config/version.go b/node/config/version.go index 7242252..5910de1 100644 --- a/node/config/version.go +++ b/node/config/version.go @@ -6,15 +6,15 @@ import ( ) func GetMinimumVersionCutoff() time.Time { - return time.Date(2024, time.October, 24, 11, 0, 0, 0, time.UTC) + return time.Date(2024, time.November, 2, 0, 0, 0, 0, time.UTC) } func GetMinimumVersion() []byte { - return []byte{0x02, 0x00, 0x02} + return []byte{0x02, 0x00, 0x03} } func GetVersion() []byte { - return []byte{0x02, 0x00, 0x02} + return []byte{0x02, 0x00, 0x03} } func GetVersionString() string { @@ -36,7 +36,7 @@ func FormatVersion(version []byte) string { } func GetPatchNumber() byte { - return 0x04 + return 0x00 } func GetRCNumber() byte { diff --git a/node/consensus/data/data_clock_consensus_engine.go b/node/consensus/data/data_clock_consensus_engine.go index 4f69024..571340a 100644 --- a/node/consensus/data/data_clock_consensus_engine.go +++ b/node/consensus/data/data_clock_consensus_engine.go @@ -1,6 +1,7 @@ package data import ( + "bytes" "context" "crypto" "encoding/binary" @@ -93,24 +94,24 @@ type DataClockConsensusEngine struct { currentReceivingSyncPeersMx sync.Mutex currentReceivingSyncPeers int - frameChan chan *protobufs.ClockFrame - executionEngines map[string]execution.ExecutionEngine - filter []byte - input []byte - parentSelector []byte - syncingStatus SyncStatusType - syncingTarget []byte - previousHead *protobufs.ClockFrame - engineMx sync.Mutex - dependencyMapMx sync.Mutex - stagedTransactions *protobufs.TokenRequests - stagedTransactionsMx sync.Mutex - peerMapMx sync.RWMutex - peerAnnounceMapMx sync.Mutex - // proverTrieJoinRequests map[string]string - // proverTrieLeaveRequests map[string]string - // proverTriePauseRequests map[string]string - // proverTrieResumeRequests map[string]string + frameChan chan *protobufs.ClockFrame + executionEngines map[string]execution.ExecutionEngine + filter []byte + input []byte + parentSelector []byte + syncingStatus SyncStatusType + syncingTarget []byte + previousHead *protobufs.ClockFrame + engineMx sync.Mutex + dependencyMapMx sync.Mutex + stagedTransactions *protobufs.TokenRequests + stagedTransactionsMx sync.Mutex + peerMapMx sync.RWMutex + peerAnnounceMapMx sync.Mutex + proverTrieJoinRequests map[string]string + proverTrieLeaveRequests map[string]string + proverTriePauseRequests map[string]string + proverTrieResumeRequests map[string]string proverTrieRequestsMx sync.Mutex lastKeyBundleAnnouncementFrame uint64 peerSeniority *peerSeniority @@ -615,24 +616,24 @@ func (e *DataClockConsensusEngine) Stop(force bool) <-chan error { e.state = consensus.EngineStateStopping errChan := make(chan error) - // msg := []byte("pause") - // msg = binary.BigEndian.AppendUint64(msg, e.GetFrame().FrameNumber) - // msg = append(msg, e.filter...) - // sig, err := e.pubSub.SignMessage(msg) - // if err != nil { - // panic(err) - // } + msg := []byte("pause") + msg = binary.BigEndian.AppendUint64(msg, e.GetFrame().FrameNumber) + msg = append(msg, e.filter...) + sig, err := e.pubSub.SignMessage(msg) + if err != nil { + panic(err) + } - // e.publishMessage(e.filter, &protobufs.AnnounceProverPause{ - // Filter: e.filter, - // FrameNumber: e.GetFrame().FrameNumber, - // PublicKeySignatureEd448: &protobufs.Ed448Signature{ - // PublicKey: &protobufs.Ed448PublicKey{ - // KeyValue: e.pubSub.GetPublicKey(), - // }, - // Signature: sig, - // }, - // }) + e.publishMessage(e.filter, &protobufs.AnnounceProverPause{ + Filter: e.filter, + FrameNumber: e.GetFrame().FrameNumber, + PublicKeySignatureEd448: &protobufs.Ed448Signature{ + PublicKey: &protobufs.Ed448PublicKey{ + KeyValue: e.pubSub.GetPublicKey(), + }, + Signature: sig, + }, + }) wg := sync.WaitGroup{} wg.Add(len(e.executionEngines)) @@ -961,3 +962,25 @@ func (e *DataClockConsensusEngine) createParallelDataClientsFromBaseMultiaddr( ) return clients, nil } + +func (e *DataClockConsensusEngine) announceProverJoin() { + msg := []byte("join") + head, _ := e.dataTimeReel.Head() + msg = binary.BigEndian.AppendUint64(msg, head.FrameNumber) + msg = append(msg, bytes.Repeat([]byte{0xff}, 32)...) + sig, err := e.pubSub.SignMessage(msg) + if err != nil { + panic(err) + } + + e.publishMessage(e.filter, &protobufs.AnnounceProverJoin{ + Filter: bytes.Repeat([]byte{0xff}, 32), + FrameNumber: head.FrameNumber, + PublicKeySignatureEd448: &protobufs.Ed448Signature{ + Signature: sig, + PublicKey: &protobufs.Ed448PublicKey{ + KeyValue: e.provingKeyBytes, + }, + }, + }) +} diff --git a/node/consensus/data/main_data_loop.go b/node/consensus/data/main_data_loop.go index 55064b1..81de10d 100644 --- a/node/consensus/data/main_data_loop.go +++ b/node/consensus/data/main_data_loop.go @@ -3,6 +3,7 @@ package data import ( "bytes" "crypto/rand" + "slices" "time" "go.uber.org/zap" @@ -92,72 +93,72 @@ func (e *DataClockConsensusEngine) runLoop() { continue } - // e.proverTrieRequestsMx.Lock() - // joinAddrs := tries.NewMinHeap[peerSeniorityItem]() - // leaveAddrs := tries.NewMinHeap[peerSeniorityItem]() - // for _, addr := range e.proverTrieJoinRequests { - // if _, ok := (*e.peerSeniority)[addr]; !ok { - // joinAddrs.Push(peerSeniorityItem{ - // addr: addr, - // seniority: 0, - // }) - // } else { - // joinAddrs.Push((*e.peerSeniority)[addr]) - // } - // } - // for _, addr := range e.proverTrieLeaveRequests { - // if _, ok := (*e.peerSeniority)[addr]; !ok { - // leaveAddrs.Push(peerSeniorityItem{ - // addr: addr, - // seniority: 0, - // }) - // } else { - // leaveAddrs.Push((*e.peerSeniority)[addr]) - // } - // } - // for _, addr := range e.proverTrieResumeRequests { - // if _, ok := e.proverTriePauseRequests[addr]; ok { - // delete(e.proverTriePauseRequests, addr) - // } - // } + e.proverTrieRequestsMx.Lock() + joinAddrs := tries.NewMinHeap[peerSeniorityItem]() + leaveAddrs := tries.NewMinHeap[peerSeniorityItem]() + for _, addr := range e.proverTrieJoinRequests { + if _, ok := (*e.peerSeniority)[addr]; !ok { + joinAddrs.Push(peerSeniorityItem{ + addr: addr, + seniority: 0, + }) + } else { + joinAddrs.Push((*e.peerSeniority)[addr]) + } + } + for _, addr := range e.proverTrieLeaveRequests { + if _, ok := (*e.peerSeniority)[addr]; !ok { + leaveAddrs.Push(peerSeniorityItem{ + addr: addr, + seniority: 0, + }) + } else { + leaveAddrs.Push((*e.peerSeniority)[addr]) + } + } + for _, addr := range e.proverTrieResumeRequests { + if _, ok := e.proverTriePauseRequests[addr]; ok { + delete(e.proverTriePauseRequests, addr) + } + } - // joinReqs := make([]peerSeniorityItem, len(joinAddrs.All())) - // copy(joinReqs, joinAddrs.All()) - // slices.Reverse(joinReqs) - // leaveReqs := make([]peerSeniorityItem, len(leaveAddrs.All())) - // copy(leaveReqs, leaveAddrs.All()) - // slices.Reverse(leaveReqs) + joinReqs := make([]peerSeniorityItem, len(joinAddrs.All())) + copy(joinReqs, joinAddrs.All()) + slices.Reverse(joinReqs) + leaveReqs := make([]peerSeniorityItem, len(leaveAddrs.All())) + copy(leaveReqs, leaveAddrs.All()) + slices.Reverse(leaveReqs) - // e.proverTrieJoinRequests = make(map[string]string) - // e.proverTrieLeaveRequests = make(map[string]string) - // e.proverTrieRequestsMx.Unlock() + e.proverTrieJoinRequests = make(map[string]string) + e.proverTrieLeaveRequests = make(map[string]string) + e.proverTrieRequestsMx.Unlock() - // e.frameProverTriesMx.Lock() - // for _, addr := range joinReqs { - // rings := len(e.frameProverTries) - // last := e.frameProverTries[rings-1] - // set := last.FindNearestAndApproximateNeighbors(make([]byte, 32)) - // if len(set) == 1024 { - // e.frameProverTries = append( - // e.frameProverTries, - // &tries.RollingFrecencyCritbitTrie{}, - // ) - // last = e.frameProverTries[rings] - // } - // last.Add([]byte(addr.addr), nextFrame.FrameNumber) - // } - // for _, addr := range leaveReqs { - // for _, t := range e.frameProverTries { - // if bytes.Equal( - // t.FindNearest([]byte(addr.addr)).External.Key, - // []byte(addr.addr), - // ) { - // t.Remove([]byte(addr.addr)) - // break - // } - // } - // } - // e.frameProverTriesMx.Unlock() + e.frameProverTriesMx.Lock() + for _, addr := range joinReqs { + rings := len(e.frameProverTries) + last := e.frameProverTries[rings-1] + set := last.FindNearestAndApproximateNeighbors(make([]byte, 32)) + if len(set) == 1024 { + e.frameProverTries = append( + e.frameProverTries, + &tries.RollingFrecencyCritbitTrie{}, + ) + last = e.frameProverTries[rings] + } + last.Add([]byte(addr.addr), nextFrame.FrameNumber) + } + for _, addr := range leaveReqs { + for _, t := range e.frameProverTries { + if bytes.Equal( + t.FindNearest([]byte(addr.addr)).External.Key, + []byte(addr.addr), + ) { + t.Remove([]byte(addr.addr)) + break + } + } + } + e.frameProverTriesMx.Unlock() e.dataTimeReel.Insert(nextFrame, true) @@ -166,6 +167,10 @@ func (e *DataClockConsensusEngine) runLoop() { e.state = consensus.EngineStateCollecting } break + } else { + if !e.IsInProverTrie(e.provingKeyBytes) { + e.announceProverJoin() + } } case <-time.After(20 * time.Second): dataFrame, err := e.dataTimeReel.Head() @@ -196,7 +201,15 @@ func (e *DataClockConsensusEngine) runLoop() { e.latestFrameReceived = latestFrame.FrameNumber } - for _, trie := range e.GetFrameProverTries() { + trie := e.GetFrameProverTries()[0] + selBI, _ := dataFrame.GetSelector() + sel := make([]byte, 32) + sel = selBI.FillBytes(sel) + + if bytes.Equal( + trie.FindNearest(sel).External.Key, + e.provingKeyAddress, + ) { if bytes.Equal( trie.FindNearest(e.provingKeyAddress).External.Key, e.provingKeyAddress, @@ -208,72 +221,72 @@ func (e *DataClockConsensusEngine) runLoop() { continue } - // e.proverTrieRequestsMx.Lock() - // joinAddrs := tries.NewMinHeap[peerSeniorityItem]() - // leaveAddrs := tries.NewMinHeap[peerSeniorityItem]() - // for _, addr := range e.proverTrieJoinRequests { - // if _, ok := (*e.peerSeniority)[addr]; !ok { - // joinAddrs.Push(peerSeniorityItem{ - // addr: addr, - // seniority: 0, - // }) - // } else { - // joinAddrs.Push((*e.peerSeniority)[addr]) - // } - // } - // for _, addr := range e.proverTrieLeaveRequests { - // if _, ok := (*e.peerSeniority)[addr]; !ok { - // leaveAddrs.Push(peerSeniorityItem{ - // addr: addr, - // seniority: 0, - // }) - // } else { - // leaveAddrs.Push((*e.peerSeniority)[addr]) - // } - // } - // for _, addr := range e.proverTrieResumeRequests { - // if _, ok := e.proverTriePauseRequests[addr]; ok { - // delete(e.proverTriePauseRequests, addr) - // } - // } + e.proverTrieRequestsMx.Lock() + joinAddrs := tries.NewMinHeap[peerSeniorityItem]() + leaveAddrs := tries.NewMinHeap[peerSeniorityItem]() + for _, addr := range e.proverTrieJoinRequests { + if _, ok := (*e.peerSeniority)[addr]; !ok { + joinAddrs.Push(peerSeniorityItem{ + addr: addr, + seniority: 0, + }) + } else { + joinAddrs.Push((*e.peerSeniority)[addr]) + } + } + for _, addr := range e.proverTrieLeaveRequests { + if _, ok := (*e.peerSeniority)[addr]; !ok { + leaveAddrs.Push(peerSeniorityItem{ + addr: addr, + seniority: 0, + }) + } else { + leaveAddrs.Push((*e.peerSeniority)[addr]) + } + } + for _, addr := range e.proverTrieResumeRequests { + if _, ok := e.proverTriePauseRequests[addr]; ok { + delete(e.proverTriePauseRequests, addr) + } + } - // joinReqs := make([]peerSeniorityItem, len(joinAddrs.All())) - // copy(joinReqs, joinAddrs.All()) - // slices.Reverse(joinReqs) - // leaveReqs := make([]peerSeniorityItem, len(leaveAddrs.All())) - // copy(leaveReqs, leaveAddrs.All()) - // slices.Reverse(leaveReqs) + joinReqs := make([]peerSeniorityItem, len(joinAddrs.All())) + copy(joinReqs, joinAddrs.All()) + slices.Reverse(joinReqs) + leaveReqs := make([]peerSeniorityItem, len(leaveAddrs.All())) + copy(leaveReqs, leaveAddrs.All()) + slices.Reverse(leaveReqs) - // e.proverTrieJoinRequests = make(map[string]string) - // e.proverTrieLeaveRequests = make(map[string]string) - // e.proverTrieRequestsMx.Unlock() + e.proverTrieJoinRequests = make(map[string]string) + e.proverTrieLeaveRequests = make(map[string]string) + e.proverTrieRequestsMx.Unlock() - // e.frameProverTriesMx.Lock() - // for _, addr := range joinReqs { - // rings := len(e.frameProverTries) - // last := e.frameProverTries[rings-1] - // set := last.FindNearestAndApproximateNeighbors(make([]byte, 32)) - // if len(set) == 8 { - // e.frameProverTries = append( - // e.frameProverTries, - // &tries.RollingFrecencyCritbitTrie{}, - // ) - // last = e.frameProverTries[rings] - // } - // last.Add([]byte(addr.addr), nextFrame.FrameNumber) - // } - // for _, addr := range leaveReqs { - // for _, t := range e.frameProverTries { - // if bytes.Equal( - // t.FindNearest([]byte(addr.addr)).External.Key, - // []byte(addr.addr), - // ) { - // t.Remove([]byte(addr.addr)) - // break - // } - // } - // } - // e.frameProverTriesMx.Unlock() + e.frameProverTriesMx.Lock() + for _, addr := range joinReqs { + rings := len(e.frameProverTries) + last := e.frameProverTries[rings-1] + set := last.FindNearestAndApproximateNeighbors(make([]byte, 32)) + if len(set) == 8 { + e.frameProverTries = append( + e.frameProverTries, + &tries.RollingFrecencyCritbitTrie{}, + ) + last = e.frameProverTries[rings] + } + last.Add([]byte(addr.addr), nextFrame.FrameNumber) + } + for _, addr := range leaveReqs { + for _, t := range e.frameProverTries { + if bytes.Equal( + t.FindNearest([]byte(addr.addr)).External.Key, + []byte(addr.addr), + ) { + t.Remove([]byte(addr.addr)) + break + } + } + } + e.frameProverTriesMx.Unlock() e.dataTimeReel.Insert(nextFrame, true) diff --git a/node/consensus/data/message_handler.go b/node/consensus/data/message_handler.go index 6fee6c0..8f5102d 100644 --- a/node/consensus/data/message_handler.go +++ b/node/consensus/data/message_handler.go @@ -114,45 +114,46 @@ func (e *DataClockConsensusEngine) runMessageHandler() { ); err != nil { return } - // case protobufs.AnnounceProverJoinType: - // if err := e.handleDataAnnounceProverJoin( - // message.From, - // msg.Address, - // any, - // ); err != nil { - // return - // } - // case protobufs.AnnounceProverLeaveType: - // if !e.IsInProverTrie(peer.peerId) { - // return - // } - // if err := e.handleDataAnnounceProverLeave( - // message.From, - // msg.Address, - // any, - // ); err != nil { - // return - // } + case protobufs.AnnounceProverJoinType: + if err := e.handleDataAnnounceProverJoin( + message.From, + msg.Address, + any, + ); err != nil { + return + } + case protobufs.AnnounceProverLeaveType: + if !e.IsInProverTrie(message.From) { + return + } + if err := e.handleDataAnnounceProverLeave( + message.From, + msg.Address, + any, + ); err != nil { + return + } case protobufs.AnnounceProverPauseType: - // stop spamming - e.pubSub.AddPeerScore(message.From, -1000) - // if err := e.handleDataAnnounceProverPause( - // message.From, - // msg.Address, - // any, - // ); err != nil { - // return - // } + if !e.IsInProverTrie(message.From) { + return + } + // Limit score to penalize frequent restarts + e.pubSub.AddPeerScore(message.From, -100) + if err := e.handleDataAnnounceProverPause( + message.From, + msg.Address, + any, + ); err != nil { + return + } case protobufs.AnnounceProverResumeType: - // stop spamming - e.pubSub.AddPeerScore(message.From, -1000) - // if err := e.handleDataAnnounceProverResume( - // message.From, - // msg.Address, - // any, - // ); err != nil { - // return - // } + if err := e.handleDataAnnounceProverResume( + message.From, + msg.Address, + any, + ); err != nil { + return + } } }() } @@ -417,182 +418,182 @@ func (e *DataClockConsensusEngine) getAddressFromSignature( return addrBI.FillBytes(make([]byte, 32)), nil } -// func (e *DataClockConsensusEngine) handleDataAnnounceProverJoin( -// peerID []byte, -// address []byte, -// any *anypb.Any, -// ) error { -// if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) { -// announce := &protobufs.AnnounceProverJoin{} -// if err := any.UnmarshalTo(announce); err != nil { -// return errors.Wrap(err, "handle data announce prover join") -// } +func (e *DataClockConsensusEngine) handleDataAnnounceProverJoin( + peerID []byte, + address []byte, + any *anypb.Any, +) error { + if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) { + announce := &protobufs.AnnounceProverJoin{} + if err := any.UnmarshalTo(announce); err != nil { + return errors.Wrap(err, "handle data announce prover join") + } -// if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { -// return errors.Wrap( -// errors.New("invalid data"), -// "handle data announce prover join", -// ) -// } + if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { + return errors.Wrap( + errors.New("invalid data"), + "handle data announce prover join", + ) + } -// address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448) -// if err != nil { -// return errors.Wrap(err, "handle data announce prover join") -// } + address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448) + if err != nil { + return errors.Wrap(err, "handle data announce prover join") + } -// msg := []byte("join") -// msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) -// msg = append(msg, announce.Filter...) -// if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { -// return errors.Wrap(err, "handle data announce prover join") -// } + msg := []byte("join") + msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) + msg = append(msg, announce.Filter...) + if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { + return errors.Wrap(err, "handle data announce prover join") + } -// e.proverTrieRequestsMx.Lock() -// if len(announce.Filter) != len(e.filter) { -// return errors.Wrap( -// errors.New("filter width mismatch"), -// "handle data announce prover join", -// ) -// } + e.proverTrieRequestsMx.Lock() + if len(announce.Filter) != len(e.filter) { + return errors.Wrap( + errors.New("filter width mismatch"), + "handle data announce prover join", + ) + } -// e.proverTrieJoinRequests[string(address)] = string(announce.Filter) -// e.proverTrieRequestsMx.Unlock() -// } -// return nil -// } + e.proverTrieJoinRequests[string(address)] = string(announce.Filter) + e.proverTrieRequestsMx.Unlock() + } + return nil +} -// func (e *DataClockConsensusEngine) handleDataAnnounceProverLeave( -// peerID []byte, -// address []byte, -// any *anypb.Any, -// ) error { -// if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) { -// announce := &protobufs.AnnounceProverLeave{} -// if err := any.UnmarshalTo(announce); err != nil { -// return errors.Wrap(err, "handle data announce prover leave") -// } +func (e *DataClockConsensusEngine) handleDataAnnounceProverLeave( + peerID []byte, + address []byte, + any *anypb.Any, +) error { + if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) { + announce := &protobufs.AnnounceProverLeave{} + if err := any.UnmarshalTo(announce); err != nil { + return errors.Wrap(err, "handle data announce prover leave") + } -// if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { -// return errors.Wrap( -// errors.New("invalid data"), -// "handle data announce prover leave", -// ) -// } + if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { + return errors.Wrap( + errors.New("invalid data"), + "handle data announce prover leave", + ) + } -// e.proverTrieRequestsMx.Lock() + e.proverTrieRequestsMx.Lock() -// if len(announce.Filter) != len(e.filter) { -// return errors.Wrap( -// errors.New("filter width mismatch"), -// "handle data announce prover leave", -// ) -// } + if len(announce.Filter) != len(e.filter) { + return errors.Wrap( + errors.New("filter width mismatch"), + "handle data announce prover leave", + ) + } -// msg := []byte("leave") -// msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) -// msg = append(msg, announce.Filter...) -// if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { -// return errors.Wrap(err, "handle data announce prover leave") -// } + msg := []byte("leave") + msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) + msg = append(msg, announce.Filter...) + if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { + return errors.Wrap(err, "handle data announce prover leave") + } -// address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448) -// if err != nil { -// return errors.Wrap(err, "handle data announce prover leave") -// } + address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448) + if err != nil { + return errors.Wrap(err, "handle data announce prover leave") + } -// e.proverTrieLeaveRequests[string(address)] = string(announce.Filter) -// e.proverTrieRequestsMx.Unlock() -// } -// return nil -// } + e.proverTrieLeaveRequests[string(address)] = string(announce.Filter) + e.proverTrieRequestsMx.Unlock() + } + return nil +} -// func (e *DataClockConsensusEngine) handleDataAnnounceProverPause( -// peerID []byte, -// address []byte, -// any *anypb.Any, -// ) error { -// if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) { -// announce := &protobufs.AnnounceProverPause{} -// if err := any.UnmarshalTo(announce); err != nil { -// return errors.Wrap(err, "handle data announce prover pause") -// } +func (e *DataClockConsensusEngine) handleDataAnnounceProverPause( + peerID []byte, + address []byte, + any *anypb.Any, +) error { + if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) { + announce := &protobufs.AnnounceProverPause{} + if err := any.UnmarshalTo(announce); err != nil { + return errors.Wrap(err, "handle data announce prover pause") + } -// if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { -// return errors.Wrap( -// errors.New("invalid data"), -// "handle data announce prover leave", -// ) -// } + if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { + return errors.Wrap( + errors.New("invalid data"), + "handle data announce prover leave", + ) + } -// e.proverTrieRequestsMx.Lock() -// if len(announce.Filter) != len(e.filter) { -// return errors.Wrap( -// errors.New("filter width mismatch"), -// "handle data announce prover pause", -// ) -// } + e.proverTrieRequestsMx.Lock() + if len(announce.Filter) != len(e.filter) { + return errors.Wrap( + errors.New("filter width mismatch"), + "handle data announce prover pause", + ) + } -// msg := []byte("pause") -// msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) -// msg = append(msg, announce.Filter...) -// if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { -// return errors.Wrap(err, "handle data announce prover pause") -// } + msg := []byte("pause") + msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) + msg = append(msg, announce.Filter...) + if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { + return errors.Wrap(err, "handle data announce prover pause") + } -// address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448) -// if err != nil { -// return errors.Wrap(err, "handle data announce prover pause") -// } + address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448) + if err != nil { + return errors.Wrap(err, "handle data announce prover pause") + } -// e.proverTriePauseRequests[string(address)] = string(announce.Filter) -// e.proverTrieRequestsMx.Unlock() -// } -// return nil -// } + e.proverTriePauseRequests[string(address)] = string(announce.Filter) + e.proverTrieRequestsMx.Unlock() + } + return nil +} -// func (e *DataClockConsensusEngine) handleDataAnnounceProverResume( -// peerID []byte, -// address []byte, -// any *anypb.Any, -// ) error { -// if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) { -// announce := &protobufs.AnnounceProverResume{} -// if err := any.UnmarshalTo(announce); err != nil { -// return errors.Wrap(err, "handle data announce prover resume") -// } +func (e *DataClockConsensusEngine) handleDataAnnounceProverResume( + peerID []byte, + address []byte, + any *anypb.Any, +) error { + if e.GetFrameProverTries()[0].Contains(e.provingKeyAddress) { + announce := &protobufs.AnnounceProverResume{} + if err := any.UnmarshalTo(announce); err != nil { + return errors.Wrap(err, "handle data announce prover resume") + } -// if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { -// return errors.Wrap( -// errors.New("invalid data"), -// "handle data announce prover resume", -// ) -// } + if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { + return errors.Wrap( + errors.New("invalid data"), + "handle data announce prover resume", + ) + } -// e.proverTrieRequestsMx.Lock() -// if len(announce.Filter) != len(e.filter) { -// return errors.Wrap( -// errors.New("filter width mismatch"), -// "handle data announce prover resume", -// ) -// } + e.proverTrieRequestsMx.Lock() + if len(announce.Filter) != len(e.filter) { + return errors.Wrap( + errors.New("filter width mismatch"), + "handle data announce prover resume", + ) + } -// address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448) -// if err != nil { -// return errors.Wrap(err, "handle data announce prover resume") -// } + address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448) + if err != nil { + return errors.Wrap(err, "handle data announce prover resume") + } -// msg := []byte("resume") -// msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) -// msg = append(msg, announce.Filter...) -// if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { -// return errors.Wrap(err, "handle data announce prover resume") -// } + msg := []byte("resume") + msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) + msg = append(msg, announce.Filter...) + if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { + return errors.Wrap(err, "handle data announce prover resume") + } -// e.proverTrieResumeRequests[string(address)] = string(announce.Filter) -// e.proverTrieRequestsMx.Unlock() -// } -// return nil -// } + e.proverTrieResumeRequests[string(address)] = string(announce.Filter) + e.proverTrieRequestsMx.Unlock() + } + return nil +} func (e *DataClockConsensusEngine) handleTokenRequest( transition *protobufs.TokenRequest, diff --git a/node/consensus/data/peer_messaging.go b/node/consensus/data/peer_messaging.go index d6fc5e0..c038561 100644 --- a/node/consensus/data/peer_messaging.go +++ b/node/consensus/data/peer_messaging.go @@ -234,7 +234,7 @@ func (e *DataClockConsensusEngine) handleMint( t.Proofs[0], []byte("pre-dusk"), ) && (!bytes.Equal(t.Proofs[1], make([]byte, 32)) || - head.FrameNumber < 67000) && e.GetFrameProverTries()[0].Contains( + time.Now().Unix() < 1730523600) && e.GetFrameProverTries()[0].Contains( e.provingKeyAddress, ) { prevInput := []byte{} diff --git a/node/execution/intrinsics/token/application/token_handle_mint.go b/node/execution/intrinsics/token/application/token_handle_mint.go index bb09b58..6ffeed5 100644 --- a/node/execution/intrinsics/token/application/token_handle_mint.go +++ b/node/execution/intrinsics/token/application/token_handle_mint.go @@ -115,7 +115,7 @@ func (a *TokenApplication) handleMint( }, } return outputs, nil - } else if len(t.Proofs) != 3 && currentFrameNumber > 77000 { + } else if len(t.Proofs) != 3 && currentFrameNumber > 0 { if _, touched := lockMap[string(t.Signature.PublicKey.KeyValue)]; touched { return nil, errors.Wrap(ErrInvalidStateTransition, "handle mint") } diff --git a/node/p2p/blossomsub.go b/node/p2p/blossomsub.go index 71d520b..3e0961a 100644 --- a/node/p2p/blossomsub.go +++ b/node/p2p/blossomsub.go @@ -372,7 +372,7 @@ func NewBlossomSub( })) params := mergeDefaults(p2pConfig) - rt := blossomsub.NewBlossomSubRouter(h, params) + rt := blossomsub.NewBlossomSubRouter(h, params, bs.network) pubsub, err := blossomsub.NewBlossomSubWithRouter(ctx, h, rt, blossomOpts...) if err != nil { panic(err)