Merge branch 'develop' into develop-2.1

This commit is contained in:
Cassandra Heart 2024-12-18 01:21:07 -06:00
commit e32e71bb0c
No known key found for this signature in database
GPG Key ID: 6352152859385958
10 changed files with 254 additions and 215 deletions

View File

@ -22,6 +22,24 @@ import (
"gopkg.in/yaml.v2"
)
type GRPCMessageLimitsConfig struct {
MaxRecvMsgSize int `yaml:"maxRecvMsgSize"`
MaxSendMsgSize int `yaml:"maxSendMsgSize"`
}
// WithDefaults returns a copy of the GRPCMessageLimitsConfig with any missing fields set to
// their default values.
func (c GRPCMessageLimitsConfig) WithDefaults(recv, send int) GRPCMessageLimitsConfig {
cpy := c
if cpy.MaxRecvMsgSize == 0 {
cpy.MaxRecvMsgSize = recv
}
if cpy.MaxSendMsgSize == 0 {
cpy.MaxSendMsgSize = send
}
return cpy
}
type Config struct {
Key *KeyConfig `yaml:"key"`
P2P *P2PConfig `yaml:"p2p"`
@ -32,6 +50,16 @@ type Config struct {
LogFile string `yaml:"logFile"`
}
// WithDefaults returns a copy of the config with default values filled in.
func (c Config) WithDefaults() Config {
cpy := c
p2p := cpy.P2P.WithDefaults()
cpy.P2P = &p2p
engine := cpy.Engine.WithDefaults()
cpy.Engine = &engine
return cpy
}
func NewConfig(configPath string) (*Config, error) {
file, err := os.Open(configPath)
if err != nil {
@ -447,7 +475,8 @@ func LoadConfig(configPath string, proverKey string, skipGenesisCheck bool) (
config.P2P.BootstrapPeers = peers
}
return config, nil
withDefaults := config.WithDefaults()
return &withDefaults, nil
}
func SaveConfig(configPath string, config *Config) error {

View File

@ -2,6 +2,17 @@ package config
import "time"
const (
defaultMinimumPeersRequired = 3
defaultDataWorkerBaseListenMultiaddr = "/ip4/127.0.0.1/tcp/%d"
defaultDataWorkerBaseListenPort = 40000
defaultDataWorkerMemoryLimit = 1792 * 1024 * 1024 // 1.75 GiB
defaultSyncTimeout = 4 * time.Second
defaultSyncCandidates = 8
defaultSyncMessageReceiveLimit = 1 * 1024 * 1024
defaultSyncMessageSendLimit = 600 * 1024 * 1024
)
type FramePublishFragmentationReedSolomonConfig struct {
// The number of data shards to use for Reed-Solomon encoding and decoding.
DataShards int `yaml:"dataShards"`
@ -9,7 +20,8 @@ type FramePublishFragmentationReedSolomonConfig struct {
ParityShards int `yaml:"parityShards"`
}
// WithDefaults sets default values for any fields that are not set.
// WithDefaults returns a copy of the FramePublishFragmentationReedSolomonConfig with any missing fields set to
// their default values.
func (c FramePublishFragmentationReedSolomonConfig) WithDefaults() FramePublishFragmentationReedSolomonConfig {
cpy := c
if cpy.DataShards == 0 {
@ -29,7 +41,8 @@ type FramePublishFragmentationConfig struct {
ReedSolomon FramePublishFragmentationReedSolomonConfig `yaml:"reedSolomon"`
}
// WithDefaults sets default values for any fields that are not set.
// WithDefaults returns a copy of the FramePublishFragmentationConfig with any missing fields set to
// their default values.
func (c FramePublishFragmentationConfig) WithDefaults() FramePublishFragmentationConfig {
cpy := c
if cpy.Algorithm == "" {
@ -53,7 +66,8 @@ type FramePublishConfig struct {
BallastSize int `yaml:"ballastSize"`
}
// WithDefaults sets default values for any fields that are not set.
// WithDefaults returns a copy of the FramePublishConfig with any missing fields set to
// their default values.
func (c FramePublishConfig) WithDefaults() FramePublishConfig {
cpy := c
if cpy.Mode == "" {
@ -96,6 +110,8 @@ type EngineConfig struct {
SyncTimeout time.Duration `yaml:"syncTimeout"`
// Number of candidate peers per category to sync with.
SyncCandidates int `yaml:"syncCandidates"`
// The configuration for the GRPC message limits.
SyncMessageLimits GRPCMessageLimitsConfig `yaml:"syncMessageLimits"`
// Values used only for testing do not override these in production, your
// node will get kicked out
@ -106,3 +122,33 @@ type EngineConfig struct {
// EXPERIMENTAL: The configuration for frame publishing.
FramePublish FramePublishConfig `yaml:"framePublish"`
}
// WithDefaults returns a copy of the EngineConfig with any missing fields set to
// their default values.
func (c EngineConfig) WithDefaults() EngineConfig {
cpy := c
if cpy.MinimumPeersRequired == 0 {
cpy.MinimumPeersRequired = defaultMinimumPeersRequired
}
if cpy.DataWorkerBaseListenMultiaddr == "" {
cpy.DataWorkerBaseListenMultiaddr = defaultDataWorkerBaseListenMultiaddr
}
if cpy.DataWorkerBaseListenPort == 0 {
cpy.DataWorkerBaseListenPort = defaultDataWorkerBaseListenPort
}
if cpy.DataWorkerMemoryLimit == 0 {
cpy.DataWorkerMemoryLimit = defaultDataWorkerMemoryLimit
}
if cpy.SyncTimeout == 0 {
cpy.SyncTimeout = defaultSyncTimeout
}
if cpy.SyncCandidates == 0 {
cpy.SyncCandidates = defaultSyncCandidates
}
cpy.SyncMessageLimits = cpy.SyncMessageLimits.WithDefaults(
defaultSyncMessageReceiveLimit,
defaultSyncMessageSendLimit,
)
cpy.FramePublish = cpy.FramePublish.WithDefaults()
return cpy
}

View File

@ -2,6 +2,22 @@ package config
import (
"time"
blossomsub "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub"
qruntime "source.quilibrium.com/quilibrium/monorepo/node/internal/runtime"
)
const (
defaultLowWatermarkConnections = 160
defaultHighWatermarkConnections = 192
defaultGRPCServerRateLimit = 10
defaultMinBootstrapPeers = 3
defaultBootstrapParallelism = 10
defaultDiscoveryParallelism = 50
defaultDiscoveryPeerLookupLimit = 1000
defaultPingTimeout = 5 * time.Second
defaultPingPeriod = 30 * time.Second
defaultPingAttempts = 3
)
type P2PConfig struct {
@ -43,7 +59,7 @@ type P2PConfig struct {
LowWatermarkConnections int `yaml:"lowWatermarkConnections"`
HighWatermarkConnections int `yaml:"highWatermarkConnections"`
DirectPeers []string `yaml:"directPeers"`
GrpcServerRateLimit int `yaml:"grpcServerRateLimit"`
GRPCServerRateLimit int `yaml:"grpcServerRateLimit"`
MinBootstrapPeers int `yaml:"minBootstrapPeers"`
BootstrapParallelism int `yaml:"bootstrapParallelism"`
DiscoveryParallelism int `yaml:"discoveryParallelism"`
@ -56,3 +72,145 @@ type P2PConfig struct {
SubscriptionQueueSize int `yaml:"subscriptionQueueSize"`
PeerOutboundQueueSize int `yaml:"peerOutboundQueueSize"`
}
// WithDefaults returns a copy of the P2PConfig with any missing fields set to
// their default values.
func (c P2PConfig) WithDefaults() P2PConfig {
cpy := c
if cpy.D == 0 {
cpy.D = blossomsub.BlossomSubD
}
if cpy.DLo == 0 {
cpy.DLo = blossomsub.BlossomSubDlo
}
if cpy.DHi == 0 {
cpy.DHi = blossomsub.BlossomSubDhi
}
if cpy.DScore == 0 {
cpy.DScore = blossomsub.BlossomSubDscore
}
if cpy.DOut == 0 {
cpy.DOut = blossomsub.BlossomSubDout
}
if cpy.HistoryLength == 0 {
cpy.HistoryLength = blossomsub.BlossomSubHistoryLength
}
if cpy.HistoryGossip == 0 {
cpy.HistoryGossip = blossomsub.BlossomSubHistoryGossip
}
if cpy.DLazy == 0 {
cpy.DLazy = blossomsub.BlossomSubDlazy
}
if cpy.GossipFactor == 0 {
cpy.GossipFactor = blossomsub.BlossomSubGossipFactor
}
if cpy.GossipRetransmission == 0 {
cpy.GossipRetransmission = blossomsub.BlossomSubGossipRetransmission
}
if cpy.HeartbeatInitialDelay == 0 {
cpy.HeartbeatInitialDelay = blossomsub.BlossomSubHeartbeatInitialDelay
}
if cpy.HeartbeatInterval == 0 {
cpy.HeartbeatInterval = blossomsub.BlossomSubHeartbeatInterval
}
if cpy.FanoutTTL == 0 {
cpy.FanoutTTL = blossomsub.BlossomSubFanoutTTL
}
if cpy.PrunePeers == 0 {
cpy.PrunePeers = blossomsub.BlossomSubPrunePeers
}
if cpy.PruneBackoff == 0 {
cpy.PruneBackoff = blossomsub.BlossomSubPruneBackoff
}
if cpy.UnsubscribeBackoff == 0 {
cpy.UnsubscribeBackoff = blossomsub.BlossomSubUnsubscribeBackoff
}
if cpy.Connectors == 0 {
cpy.Connectors = blossomsub.BlossomSubConnectors
}
if cpy.MaxPendingConnections == 0 {
cpy.MaxPendingConnections = blossomsub.BlossomSubMaxPendingConnections
}
if cpy.ConnectionTimeout == 0 {
cpy.ConnectionTimeout = blossomsub.BlossomSubConnectionTimeout
}
if cpy.DirectConnectTicks == 0 {
cpy.DirectConnectTicks = blossomsub.BlossomSubDirectConnectTicks
}
if cpy.DirectConnectInitialDelay == 0 {
cpy.DirectConnectInitialDelay =
blossomsub.BlossomSubDirectConnectInitialDelay
}
if cpy.OpportunisticGraftTicks == 0 {
cpy.OpportunisticGraftTicks =
blossomsub.BlossomSubOpportunisticGraftTicks
}
if cpy.OpportunisticGraftPeers == 0 {
cpy.OpportunisticGraftPeers =
blossomsub.BlossomSubOpportunisticGraftPeers
}
if cpy.GraftFloodThreshold == 0 {
cpy.GraftFloodThreshold = blossomsub.BlossomSubGraftFloodThreshold
}
if cpy.MaxIHaveLength == 0 {
cpy.MaxIHaveLength = blossomsub.BlossomSubMaxIHaveLength
}
if cpy.MaxIHaveMessages == 0 {
cpy.MaxIHaveMessages = blossomsub.BlossomSubMaxIHaveMessages
}
if cpy.MaxIDontWantMessages == 0 {
cpy.MaxIDontWantMessages = blossomsub.BlossomSubMaxIDontWantMessages
}
if cpy.IWantFollowupTime == 0 {
cpy.IWantFollowupTime = blossomsub.BlossomSubIWantFollowupTime
}
if cpy.IDontWantMessageThreshold == 0 {
cpy.IDontWantMessageThreshold = blossomsub.BlossomSubIDontWantMessageThreshold
}
if cpy.IDontWantMessageTTL == 0 {
cpy.IDontWantMessageTTL = blossomsub.BlossomSubIDontWantMessageTTL
}
if cpy.LowWatermarkConnections == 0 {
cpy.LowWatermarkConnections = defaultLowWatermarkConnections
}
if cpy.HighWatermarkConnections == 0 {
cpy.HighWatermarkConnections = defaultHighWatermarkConnections
}
if cpy.GRPCServerRateLimit == 0 {
cpy.GRPCServerRateLimit = defaultGRPCServerRateLimit
}
if cpy.MinBootstrapPeers == 0 {
cpy.MinBootstrapPeers = defaultMinBootstrapPeers
}
if cpy.BootstrapParallelism == 0 {
cpy.BootstrapParallelism = defaultBootstrapParallelism
}
if cpy.DiscoveryParallelism == 0 {
cpy.DiscoveryParallelism = defaultDiscoveryParallelism
}
if cpy.DiscoveryPeerLookupLimit == 0 {
cpy.DiscoveryPeerLookupLimit = defaultDiscoveryPeerLookupLimit
}
if cpy.PingTimeout == 0 {
cpy.PingTimeout = defaultPingTimeout
}
if cpy.PingPeriod == 0 {
cpy.PingPeriod = defaultPingPeriod
}
if cpy.PingAttempts == 0 {
cpy.PingAttempts = defaultPingAttempts
}
if cpy.ValidateQueueSize == 0 {
cpy.ValidateQueueSize = blossomsub.DefaultValidateQueueSize
}
if cpy.ValidateWorkers == 0 {
cpy.ValidateWorkers = qruntime.WorkerCount(0, false)
}
if cpy.SubscriptionQueueSize == 0 {
cpy.SubscriptionQueueSize = blossomsub.DefaultSubscriptionQueueSize
}
if cpy.PeerOutboundQueueSize == 0 {
cpy.PeerOutboundQueueSize = blossomsub.DefaultPeerOutboundQueueSize
}
return cpy
}

View File

@ -98,7 +98,7 @@ func (e *DataClockConsensusEngine) publishProof(
}
e.peerMapMx.Unlock()
cfg := e.config.Engine.FramePublish.WithDefaults()
cfg := e.config.Engine.FramePublish
if cfg.BallastSize > 0 {
frame = proto.Clone(frame).(*protobufs.ClockFrame)
frame.Padding = make([]byte, cfg.BallastSize)

View File

@ -20,11 +20,6 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
const (
defaultSyncTimeout = 4 * time.Second
defaultSyncCandidates = 8
)
func (e *DataClockConsensusEngine) syncWithMesh() error {
e.logger.Info("collecting vdf proofs")
@ -308,10 +303,6 @@ func (e *DataClockConsensusEngine) GetAheadPeers(frameNumber uint64) []internal.
}
syncCandidates := e.config.Engine.SyncCandidates
if syncCandidates == 0 {
syncCandidates = defaultSyncCandidates
}
return slices.Concat(
internal.WeightedSampleWithoutReplacement(nearCandidates, min(len(nearCandidates), syncCandidates)),
internal.WeightedSampleWithoutReplacement(reachableCandidates, min(len(reachableCandidates), syncCandidates)),
@ -350,10 +341,6 @@ func (e *DataClockConsensusEngine) syncWithPeer(
}()
syncTimeout := e.config.Engine.SyncTimeout
if syncTimeout == 0 {
syncTimeout = defaultSyncTimeout
}
dialCtx, cancelDial := context.WithTimeout(e.ctx, syncTimeout)
defer cancelDial()
cc, err := e.pubSub.GetDirectChannel(dialCtx, peerId, "sync")
@ -379,7 +366,10 @@ func (e *DataClockConsensusEngine) syncWithPeer(
&protobufs.GetDataFrameRequest{
FrameNumber: latest.FrameNumber + 1,
},
grpc.MaxCallRecvMsgSize(600*1024*1024),
// The message size limits are swapped because the server is the one
// sending the data.
grpc.MaxCallRecvMsgSize(e.config.Engine.SyncMessageLimits.MaxSendMsgSize),
grpc.MaxCallSendMsgSize(e.config.Engine.SyncMessageLimits.MaxRecvMsgSize),
)
cancelGet()
if err != nil {

View File

@ -220,21 +220,11 @@ func NewDataClockConsensusEngine(
panic(errors.New("peer info manager is nil"))
}
minimumPeersRequired := cfg.Engine.MinimumPeersRequired
if minimumPeersRequired == 0 {
minimumPeersRequired = 3
}
difficulty := cfg.Engine.Difficulty
if difficulty == 0 {
difficulty = 160000
}
rateLimit := cfg.P2P.GrpcServerRateLimit
if rateLimit == 0 {
rateLimit = 10
}
clockFrameFragmentBuffer, err := fragmentation.NewClockFrameFragmentCircularBuffer(
fragmentation.NewReedSolomonClockFrameFragmentBuffer,
16,
@ -272,7 +262,7 @@ func NewDataClockConsensusEngine(
syncingStatus: SyncStatusNotSyncing,
peerMap: map[string]*peerInfo{},
uncooperativePeersMap: map[string]*peerInfo{},
minimumPeersRequired: minimumPeersRequired,
minimumPeersRequired: cfg.Engine.MinimumPeersRequired,
report: report,
frameProver: frameProver,
masterTimeReel: masterTimeReel,
@ -285,7 +275,7 @@ func NewDataClockConsensusEngine(
config: cfg,
preMidnightMint: map[string]struct{}{},
grpcRateLimiter: NewRateLimiter(
rateLimit,
cfg.P2P.GRPCServerRateLimit,
time.Minute,
),
requestSyncCh: make(chan struct{}, 1),
@ -353,8 +343,8 @@ func (e *DataClockConsensusEngine) Start() <-chan error {
e.pubSub.Subscribe(e.infoFilter, e.handleInfoMessage)
syncServer := qgrpc.NewServer(
grpc.MaxSendMsgSize(40*1024*1024),
grpc.MaxRecvMsgSize(40*1024*1024),
grpc.MaxRecvMsgSize(e.config.Engine.SyncMessageLimits.MaxRecvMsgSize),
grpc.MaxSendMsgSize(e.config.Engine.SyncMessageLimits.MaxSendMsgSize),
)
e.grpcServers = append(e.grpcServers[:0:0], syncServer)
protobufs.RegisterDataServiceServer(syncServer, e)
@ -889,14 +879,6 @@ func (
zap.Uint32("client", index),
)
if e.config.Engine.DataWorkerBaseListenMultiaddr == "" {
e.config.Engine.DataWorkerBaseListenMultiaddr = "/ip4/127.0.0.1/tcp/%d"
}
if e.config.Engine.DataWorkerBaseListenPort == 0 {
e.config.Engine.DataWorkerBaseListenPort = 40000
}
ma, err := multiaddr.NewMultiaddr(
fmt.Sprintf(
e.config.Engine.DataWorkerBaseListenMultiaddr,
@ -1001,14 +983,6 @@ func (e *DataClockConsensusEngine) createParallelDataClientsFromBaseMultiaddr(
zap.Int("parallelism", parallelism),
)
if e.config.Engine.DataWorkerBaseListenMultiaddr == "" {
e.config.Engine.DataWorkerBaseListenMultiaddr = "/ip4/127.0.0.1/tcp/%d"
}
if e.config.Engine.DataWorkerBaseListenPort == 0 {
e.config.Engine.DataWorkerBaseListenPort = 40000
}
clients := make([]protobufs.DataIPCServiceClient, parallelism)
for i := 0; i < parallelism; i++ {

View File

@ -110,7 +110,7 @@ func (e *DataClockConsensusEngine) runTxMessageHandler() {
continue
}
if e.frameProverTries[0].Contains(e.provingKeyAddress) {
if e.FrameProverTrieContains(0, e.provingKeyAddress) {
wg := &sync.WaitGroup{}
for name := range e.executionEngines {
name := name

View File

@ -35,7 +35,7 @@ func (e *DataClockConsensusEngine) GetDataFrame(
if !ok {
return nil, status.Error(codes.Internal, "remote peer ID not found")
}
if e.config.P2P.GrpcServerRateLimit != -1 {
if e.config.P2P.GRPCServerRateLimit != -1 {
if err := e.grpcRateLimiter.Allow(peerID); err != nil {
return nil, err
}

View File

@ -400,15 +400,6 @@ func main() {
return
}
if nodeConfig.Engine.DataWorkerBaseListenMultiaddr == "" {
nodeConfig.Engine.DataWorkerBaseListenMultiaddr = "/ip4/127.0.0.1/tcp/%d"
}
if nodeConfig.Engine.DataWorkerBaseListenPort == 0 {
nodeConfig.Engine.DataWorkerBaseListenPort = 40000
}
if nodeConfig.Engine.DataWorkerMemoryLimit == 0 {
nodeConfig.Engine.DataWorkerMemoryLimit = 1792 * 1024 * 1024 // 1.75GiB
}
if len(nodeConfig.Engine.DataWorkerMultiaddrs) == 0 {
maxProcs, numCPU := runtime.GOMAXPROCS(0), runtime.NumCPU()
if maxProcs > numCPU && !nodeConfig.Engine.AllowExcessiveGOMAXPROCS {
@ -476,6 +467,9 @@ func main() {
if _, explicitGOMEMLIMIT := os.LookupEnv("GOMEMLIMIT"); !explicitGOMEMLIMIT {
rdebug.SetMemoryLimit(availableOverhead * 8 / 10)
}
if _, explicitGOGC := os.LookupEnv("GOGC"); !explicitGOGC {
rdebug.SetGCPercent(10)
}
}
}

View File

@ -45,26 +45,13 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/config"
qgrpc "source.quilibrium.com/quilibrium/monorepo/node/internal/grpc"
"source.quilibrium.com/quilibrium/monorepo/node/internal/observability"
qruntime "source.quilibrium.com/quilibrium/monorepo/node/internal/runtime"
"source.quilibrium.com/quilibrium/monorepo/node/p2p/internal"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
// The default watermarks are the defaults used by libp2p.DefaultConnectionManager.
// We explicitly set them here in order to force internal consistency between the
// connection manager and the resource manager.
const (
defaultLowWatermarkConnections = 160
defaultHighWatermarkConnections = 192
defaultMinBootstrapPeers = 3
defaultBootstrapParallelism = 10
defaultDiscoveryParallelism = 50
defaultDiscoveryPeerLookupLimit = 1000
defaultPingTimeout = 5 * time.Second
defaultPingPeriod = 30 * time.Second
defaultPingAttempts = 3
DecayInterval = 10 * time.Second
AppDecay = .9
DecayInterval = 10 * time.Second
AppDecay = .9
)
type appScore struct {
@ -192,7 +179,6 @@ func NewBlossomSub(
logger *zap.Logger,
) *BlossomSub {
ctx := context.Background()
p2pConfig = withDefaults(p2pConfig)
opts := []libp2pconfig.Option{
libp2p.ListenAddrStrings(p2pConfig.ListenMultiaddr),
@ -1043,144 +1029,6 @@ func (b *BlossomSub) SignMessage(msg []byte) ([]byte, error) {
return sig, errors.Wrap(err, "sign message")
}
func withDefaults(p2pConfig *config.P2PConfig) *config.P2PConfig {
cfg := *p2pConfig
p2pConfig = &cfg
if p2pConfig.D == 0 {
p2pConfig.D = blossomsub.BlossomSubD
}
if p2pConfig.DLo == 0 {
p2pConfig.DLo = blossomsub.BlossomSubDlo
}
if p2pConfig.DHi == 0 {
p2pConfig.DHi = blossomsub.BlossomSubDhi
}
if p2pConfig.DScore == 0 {
p2pConfig.DScore = blossomsub.BlossomSubDscore
}
if p2pConfig.DOut == 0 {
p2pConfig.DOut = blossomsub.BlossomSubDout
}
if p2pConfig.HistoryLength == 0 {
p2pConfig.HistoryLength = blossomsub.BlossomSubHistoryLength
}
if p2pConfig.HistoryGossip == 0 {
p2pConfig.HistoryGossip = blossomsub.BlossomSubHistoryGossip
}
if p2pConfig.DLazy == 0 {
p2pConfig.DLazy = blossomsub.BlossomSubDlazy
}
if p2pConfig.GossipFactor == 0 {
p2pConfig.GossipFactor = blossomsub.BlossomSubGossipFactor
}
if p2pConfig.GossipRetransmission == 0 {
p2pConfig.GossipRetransmission = blossomsub.BlossomSubGossipRetransmission
}
if p2pConfig.HeartbeatInitialDelay == 0 {
p2pConfig.HeartbeatInitialDelay = blossomsub.BlossomSubHeartbeatInitialDelay
}
if p2pConfig.HeartbeatInterval == 0 {
p2pConfig.HeartbeatInterval = blossomsub.BlossomSubHeartbeatInterval
}
if p2pConfig.FanoutTTL == 0 {
p2pConfig.FanoutTTL = blossomsub.BlossomSubFanoutTTL
}
if p2pConfig.PrunePeers == 0 {
p2pConfig.PrunePeers = blossomsub.BlossomSubPrunePeers
}
if p2pConfig.PruneBackoff == 0 {
p2pConfig.PruneBackoff = blossomsub.BlossomSubPruneBackoff
}
if p2pConfig.UnsubscribeBackoff == 0 {
p2pConfig.UnsubscribeBackoff = blossomsub.BlossomSubUnsubscribeBackoff
}
if p2pConfig.Connectors == 0 {
p2pConfig.Connectors = blossomsub.BlossomSubConnectors
}
if p2pConfig.MaxPendingConnections == 0 {
p2pConfig.MaxPendingConnections = blossomsub.BlossomSubMaxPendingConnections
}
if p2pConfig.ConnectionTimeout == 0 {
p2pConfig.ConnectionTimeout = blossomsub.BlossomSubConnectionTimeout
}
if p2pConfig.DirectConnectTicks == 0 {
p2pConfig.DirectConnectTicks = blossomsub.BlossomSubDirectConnectTicks
}
if p2pConfig.DirectConnectInitialDelay == 0 {
p2pConfig.DirectConnectInitialDelay =
blossomsub.BlossomSubDirectConnectInitialDelay
}
if p2pConfig.OpportunisticGraftTicks == 0 {
p2pConfig.OpportunisticGraftTicks =
blossomsub.BlossomSubOpportunisticGraftTicks
}
if p2pConfig.OpportunisticGraftPeers == 0 {
p2pConfig.OpportunisticGraftPeers =
blossomsub.BlossomSubOpportunisticGraftPeers
}
if p2pConfig.GraftFloodThreshold == 0 {
p2pConfig.GraftFloodThreshold = blossomsub.BlossomSubGraftFloodThreshold
}
if p2pConfig.MaxIHaveLength == 0 {
p2pConfig.MaxIHaveLength = blossomsub.BlossomSubMaxIHaveLength
}
if p2pConfig.MaxIHaveMessages == 0 {
p2pConfig.MaxIHaveMessages = blossomsub.BlossomSubMaxIHaveMessages
}
if p2pConfig.MaxIDontWantMessages == 0 {
p2pConfig.MaxIDontWantMessages = blossomsub.BlossomSubMaxIDontWantMessages
}
if p2pConfig.IWantFollowupTime == 0 {
p2pConfig.IWantFollowupTime = blossomsub.BlossomSubIWantFollowupTime
}
if p2pConfig.IDontWantMessageThreshold == 0 {
p2pConfig.IDontWantMessageThreshold = blossomsub.BlossomSubIDontWantMessageThreshold
}
if p2pConfig.IDontWantMessageTTL == 0 {
p2pConfig.IDontWantMessageTTL = blossomsub.BlossomSubIDontWantMessageTTL
}
if p2pConfig.LowWatermarkConnections == 0 {
p2pConfig.LowWatermarkConnections = defaultLowWatermarkConnections
}
if p2pConfig.HighWatermarkConnections == 0 {
p2pConfig.HighWatermarkConnections = defaultHighWatermarkConnections
}
if p2pConfig.MinBootstrapPeers == 0 {
p2pConfig.MinBootstrapPeers = defaultMinBootstrapPeers
}
if p2pConfig.BootstrapParallelism == 0 {
p2pConfig.BootstrapParallelism = defaultBootstrapParallelism
}
if p2pConfig.DiscoveryParallelism == 0 {
p2pConfig.DiscoveryParallelism = defaultDiscoveryParallelism
}
if p2pConfig.DiscoveryPeerLookupLimit == 0 {
p2pConfig.DiscoveryPeerLookupLimit = defaultDiscoveryPeerLookupLimit
}
if p2pConfig.PingTimeout == 0 {
p2pConfig.PingTimeout = defaultPingTimeout
}
if p2pConfig.PingPeriod == 0 {
p2pConfig.PingPeriod = defaultPingPeriod
}
if p2pConfig.PingAttempts == 0 {
p2pConfig.PingAttempts = defaultPingAttempts
}
if p2pConfig.ValidateQueueSize == 0 {
p2pConfig.ValidateQueueSize = blossomsub.DefaultValidateQueueSize
}
if p2pConfig.ValidateWorkers == 0 {
p2pConfig.ValidateWorkers = qruntime.WorkerCount(0, false)
}
if p2pConfig.SubscriptionQueueSize == 0 {
p2pConfig.SubscriptionQueueSize = blossomsub.DefaultSubscriptionQueueSize
}
if p2pConfig.PeerOutboundQueueSize == 0 {
p2pConfig.PeerOutboundQueueSize = blossomsub.DefaultPeerOutboundQueueSize
}
return p2pConfig
}
func toBlossomSubParams(p2pConfig *config.P2PConfig) blossomsub.BlossomSubParams {
return blossomsub.BlossomSubParams{
D: p2pConfig.D,