mirror of
https://github.com/QuilibriumNetwork/ceremonyclient.git
synced 2026-02-21 10:27:26 +08:00
* wip: conversion of hotstuff from flow into Q-oriented model * bulk of tests * remaining non-integration tests * add integration test, adjust log interface, small tweaks * further adjustments, restore full pacemaker shape * add component lifecycle management+supervisor * further refinements * resolve timeout hanging * mostly finalized state for consensus * bulk of engine swap out * lifecycle-ify most types * wiring nearly complete, missing needed hooks for proposals * plugged in, vetting message validation paths * global consensus, plugged in and verified * app shard now wired in too * do not decode empty keys.yml (#456) * remove obsolete engine.maxFrames config parameter (#454) * default to Info log level unless debug is enabled (#453) * respect config's "logging" section params, remove obsolete single-file logging (#452) * Trivial code cleanup aiming to reduce Go compiler warnings (#451) * simplify range traversal * simplify channel read for single select case * delete rand.Seed() deprecated in Go 1.20 and no-op as of Go 1.24 * simplify range traversal * simplify channel read for single select case * remove redundant type from array * simplify range traversal * simplify channel read for single select case * RC slate * finalize 2.1.0.5 * Update comments in StrictMonotonicCounter Fix comment formatting and clarify description. --------- Co-authored-by: Black Swan <3999712+blacks1ne@users.noreply.github.com>
145 lines
3.5 KiB
Go
145 lines
3.5 KiB
Go
package p2p
|
|
|
|
import (
|
|
"bytes"
|
|
"sync"
|
|
"time"
|
|
|
|
"go.uber.org/zap"
|
|
"source.quilibrium.com/quilibrium/monorepo/lifecycle"
|
|
"source.quilibrium.com/quilibrium/monorepo/protobufs"
|
|
"source.quilibrium.com/quilibrium/monorepo/types/p2p"
|
|
)
|
|
|
|
type InMemoryPeerInfoManager struct {
|
|
logger *zap.Logger
|
|
peerInfoCh chan *protobufs.PeerInfo
|
|
peerInfoMx sync.RWMutex
|
|
|
|
peerMap map[string]*p2p.PeerInfo
|
|
fastestPeers []*p2p.PeerInfo
|
|
ctx lifecycle.SignalerContext
|
|
}
|
|
|
|
var _ p2p.PeerInfoManager = (*InMemoryPeerInfoManager)(nil)
|
|
|
|
func NewInMemoryPeerInfoManager(logger *zap.Logger) *InMemoryPeerInfoManager {
|
|
return &InMemoryPeerInfoManager{
|
|
logger: logger,
|
|
peerInfoCh: make(chan *protobufs.PeerInfo, 1000),
|
|
fastestPeers: []*p2p.PeerInfo{},
|
|
peerMap: make(map[string]*p2p.PeerInfo),
|
|
}
|
|
}
|
|
|
|
func (m *InMemoryPeerInfoManager) Start(
|
|
ctx lifecycle.SignalerContext,
|
|
ready lifecycle.ReadyFunc,
|
|
) {
|
|
m.ctx = ctx
|
|
ready()
|
|
for {
|
|
select {
|
|
case info := <-m.peerInfoCh:
|
|
m.peerInfoMx.Lock()
|
|
reachability := []p2p.Reachability{}
|
|
for _, r := range info.Reachability {
|
|
reachability = append(reachability, p2p.Reachability{
|
|
Filter: r.Filter,
|
|
PubsubMultiaddrs: r.PubsubMultiaddrs,
|
|
StreamMultiaddrs: r.StreamMultiaddrs,
|
|
})
|
|
}
|
|
capabilities := []p2p.Capability{}
|
|
for _, c := range info.Capabilities {
|
|
capabilities = append(capabilities, p2p.Capability{
|
|
ProtocolIdentifier: c.ProtocolIdentifier,
|
|
AdditionalMetadata: c.AdditionalMetadata,
|
|
})
|
|
}
|
|
seen := time.Now().UnixMilli()
|
|
m.peerMap[string(info.PeerId)] = &p2p.PeerInfo{
|
|
PeerId: info.PeerId,
|
|
Bandwidth: 100,
|
|
Capabilities: capabilities,
|
|
Reachability: reachability,
|
|
Cores: uint32(len(reachability)),
|
|
LastSeen: seen,
|
|
}
|
|
m.searchAndInsertPeer(&p2p.PeerInfo{
|
|
PeerId: info.PeerId,
|
|
Bandwidth: 100,
|
|
Capabilities: capabilities,
|
|
Reachability: reachability,
|
|
Cores: uint32(len(reachability)),
|
|
LastSeen: seen,
|
|
})
|
|
m.peerInfoMx.Unlock()
|
|
case <-ctx.Done():
|
|
return
|
|
}
|
|
}
|
|
}
|
|
|
|
func (m *InMemoryPeerInfoManager) AddPeerInfo(info *protobufs.PeerInfo) {
|
|
select {
|
|
case <-m.ctx.Done():
|
|
case m.peerInfoCh <- info:
|
|
}
|
|
}
|
|
|
|
func (m *InMemoryPeerInfoManager) GetPeerInfo(peerId []byte) *p2p.PeerInfo {
|
|
m.peerInfoMx.RLock()
|
|
manifest, ok := m.peerMap[string(peerId)]
|
|
m.peerInfoMx.RUnlock()
|
|
if !ok {
|
|
return nil
|
|
}
|
|
return manifest
|
|
}
|
|
|
|
func (m *InMemoryPeerInfoManager) GetPeerMap() map[string]*p2p.PeerInfo {
|
|
data := make(map[string]*p2p.PeerInfo)
|
|
m.peerInfoMx.RLock()
|
|
for k, v := range m.peerMap {
|
|
data[k] = v
|
|
}
|
|
m.peerInfoMx.RUnlock()
|
|
|
|
return data
|
|
}
|
|
|
|
func (m *InMemoryPeerInfoManager) GetPeersBySpeed() [][]byte {
|
|
result := [][]byte{}
|
|
m.peerInfoMx.RLock()
|
|
for _, info := range m.fastestPeers {
|
|
result = append(result, info.PeerId)
|
|
}
|
|
m.peerInfoMx.RUnlock()
|
|
return result
|
|
}
|
|
|
|
// blatantly lifted from slices.BinarySearchFunc, optimized for direct insertion
|
|
// and uint64 comparison without overflow
|
|
func (m *InMemoryPeerInfoManager) searchAndInsertPeer(info *p2p.PeerInfo) {
|
|
n := len(m.fastestPeers)
|
|
i, j := 0, n
|
|
for i < j {
|
|
h := int(uint(i+j) >> 1)
|
|
if m.fastestPeers[h].Bandwidth > info.Bandwidth {
|
|
i = h + 1
|
|
} else {
|
|
j = h
|
|
}
|
|
}
|
|
|
|
if i < n && m.fastestPeers[i].Bandwidth == info.Bandwidth &&
|
|
bytes.Equal(m.fastestPeers[i].PeerId, info.PeerId) {
|
|
m.fastestPeers[i] = info
|
|
} else {
|
|
m.fastestPeers = append(m.fastestPeers, new(p2p.PeerInfo))
|
|
copy(m.fastestPeers[i+1:], m.fastestPeers[i:])
|
|
m.fastestPeers[i] = info
|
|
}
|
|
}
|