feat(provider): resume cycle (#11031)
Some checks are pending
CodeQL / codeql (push) Waiting to run
Docker Check / lint (push) Waiting to run
Docker Check / build (push) Waiting to run
Gateway Conformance / gateway-conformance (push) Waiting to run
Gateway Conformance / gateway-conformance-libp2p-experiment (push) Waiting to run
Go Build / go-build (push) Waiting to run
Go Check / go-check (push) Waiting to run
Go Lint / go-lint (push) Waiting to run
Go Test / go-test (push) Waiting to run
Interop / interop-prep (push) Waiting to run
Interop / helia-interop (push) Blocked by required conditions
Interop / ipfs-webui (push) Blocked by required conditions
Sharness / sharness-test (push) Waiting to run
Spell Check / spellcheck (push) Waiting to run

* bump kad-dht: resume reprovide cycle

* daemon: --provide-fresh-start flag

* changelog

* docs

* go-fmt

* chore: latest go-libp2p-kad-dht#1170

after conflict resolution, to confirm CI is still green

* kad-dht: depend on latest master

* move daemon flag to Provider.DHT.ResumeEnabled config

* refactor: sweep provider datastore

* bump kad-dht

* bump kad-dht

* bump kad-dht

* make datastore keys constant

* use kad-dht master

* add emoji to changelog entry

* go-fmt

* bump kad-dht

* test(provider): add tests for resume cycle feature

validates Provide.DHT.ResumeEnabled behavior:
- preserves cycle state when enabled (default)
- resets cycle when disabled

tests verify current_time_offset across restarts using JSON output

---------

Co-authored-by: Marcin Rataj <lidel@lidel.org>
This commit is contained in:
Guillaume Michel 2025-10-29 11:07:46 +01:00 committed by GitHub
parent 2e9c4ec500
commit c2bf0f9515
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 233 additions and 20 deletions

View File

@ -16,6 +16,7 @@ const (
DefaultProvideDHTInterval = 22 * time.Hour // https://github.com/ipfs/kubo/pull/9326
DefaultProvideDHTMaxWorkers = 16 // Unified default for both sweep and legacy providers
DefaultProvideDHTSweepEnabled = false
DefaultProvideDHTResumeEnabled = true
DefaultProvideDHTDedicatedPeriodicWorkers = 2
DefaultProvideDHTDedicatedBurstWorkers = 1
DefaultProvideDHTMaxProvideConnsPerWorker = 20
@ -86,6 +87,12 @@ type ProvideDHT struct {
// OfflineDelay sets the delay after which the provider switches from Disconnected to Offline state (sweep mode only).
// Default: DefaultProvideDHTOfflineDelay
OfflineDelay *OptionalDuration `json:",omitempty"`
// ResumeEnabled controls whether the provider resumes from its previous state on restart.
// When enabled, the provider persists its reprovide cycle state and provide queue to the datastore,
// and restores them on restart. When disabled, the provider starts fresh on each restart.
// Default: true
ResumeEnabled Flag `json:",omitempty"`
}
func ParseProvideStrategy(s string) ProvideStrategy {

View File

@ -14,6 +14,7 @@ import (
"github.com/ipfs/boxo/provider"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
"github.com/ipfs/go-datastore/query"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/repo"
@ -36,14 +37,21 @@ import (
"go.uber.org/fx"
)
// The size of a batch that will be used for calculating average announcement
// time per CID, inside of boxo/provider.ThroughputReport
// and in 'ipfs stats provide' report.
// Used when Provide.DHT.SweepEnabled=false
const sampledBatchSize = 1000
const (
// The size of a batch that will be used for calculating average announcement
// time per CID, inside of boxo/provider.ThroughputReport
// and in 'ipfs stats provide' report.
// Used when Provide.DHT.SweepEnabled=false
sampledBatchSize = 1000
// Datastore key used to store previous reprovide strategy.
const reprovideStrategyKey = "/reprovideStrategy"
// Datastore key used to store previous reprovide strategy.
reprovideStrategyKey = "/reprovideStrategy"
// Datastore namespace prefix for provider data.
providerDatastorePrefix = "provider"
// Datastore path for the provider keystore.
keystoreDatastorePath = "keystore"
)
// Interval between reprovide queue monitoring checks for slow reprovide alerts.
// Used when Provide.DHT.SweepEnabled=true
@ -324,10 +332,10 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
Repo repo.Repo
}
sweepingReprovider := fx.Provide(func(in providerInput) (DHTProvider, *keystore.ResettableKeystore, error) {
ds := in.Repo.Datastore()
ds := namespace.Wrap(in.Repo.Datastore(), datastore.NewKey(providerDatastorePrefix))
ks, err := keystore.NewResettableKeystore(ds,
keystore.WithPrefixBits(16),
keystore.WithDatastorePath("/provider/keystore"),
keystore.WithDatastorePath(keystoreDatastorePath),
keystore.WithBatchSize(int(cfg.Provide.DHT.KeystoreBatchSize.WithDefault(config.DefaultProvideDHTKeystoreBatchSize))),
)
if err != nil {
@ -370,6 +378,8 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
if inDht != nil {
prov, err := ddhtprovider.New(inDht,
ddhtprovider.WithKeystore(ks),
ddhtprovider.WithDatastore(ds),
ddhtprovider.WithResumeCycle(cfg.Provide.DHT.ResumeEnabled.WithDefault(config.DefaultProvideDHTResumeEnabled)),
ddhtprovider.WithReprovideInterval(reprovideInterval),
ddhtprovider.WithMaxReprovideDelay(time.Hour),
@ -403,6 +413,8 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
}
opts := []dhtprovider.Option{
dhtprovider.WithKeystore(ks),
dhtprovider.WithDatastore(ds),
dhtprovider.WithResumeCycle(cfg.Provide.DHT.ResumeEnabled.WithDefault(config.DefaultProvideDHTResumeEnabled)),
dhtprovider.WithPeerID(impl.Host().ID()),
dhtprovider.WithRouter(impl),
dhtprovider.WithMessageSender(impl.MessageSender()),
@ -576,7 +588,7 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
stats := prov.Stats()
queuedWorkers = stats.Workers.QueuedPeriodic > 0
queueSize = stats.Queues.PendingRegionReprovides
queueSize = int64(stats.Queues.PendingRegionReprovides)
// Alert if reprovide queue keeps growing and all periodic workers are busy.
// Requires consecutiveAlertsThreshold intervals of sustained growth.

View File

@ -11,6 +11,7 @@ This release was brought to you by the [Shipyard](https://ipshipyard.com/) team.
- [Overview](#overview)
- [🔦 Highlights](#-highlights)
- [📊 Detailed statistics for Sweep provider with `ipfs provide stat`](#-detailed-statistics-for-sweep-provider-with-ipfs-provide-stat)
- [⏯️ Provider resume cycle for improved reproviding reliability](#provider-resume-cycle-for-improved-reproviding-reliability)
- [🔔 Sweep provider slow reprovide warnings](#-sweep-provider-slow-reprovide-warnings)
- [🔧 Fixed UPnP port forwarding after router restarts](#-fixed-upnp-port-forwarding-after-router-restarts)
- [🖥️ RISC-V support with prebuilt binaries](#-risc-v-support-with-prebuilt-binaries)
@ -64,6 +65,30 @@ provider statistics instead of the default WAN DHT stats.
> [`Provide.DHT.SweepEnabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtsweepenabled)).
> Legacy provider shows basic statistics without flag support.
#### ⏯️ Provider resume cycle for improved reproviding reliability
When using the sweeping provider (`Provide.DHT.SweepEnabled`), Kubo now
persists the reprovide cycle state and automatically resumes where it left off
after a restart. This brings several improvements:
- **Persistent progress**: The provider now saves its position in the reprovide
cycle to the datastore. On restart, it continues from where it stopped instead
of starting from scratch.
- **Catch-up reproviding**: If the node was offline for an extended period, all
CIDs that haven't been reprovided within the configured reprovide interval are
immediately queued for reproviding when the node starts up. This ensures
content availability is maintained even after downtime.
- **Persistent provide queue**: The provide queue is now persisted to the
datastore on shutdown. When the node restarts, queued CIDs are restored and
provided as expected, preventing loss of pending provide operations.
- **Resume control**: The resume behavior is now controlled via the
`Provide.DHT.ResumeEnabled` config option (default: `true`). If you don't want
to keep the persisted provider state from a previous run, you can set
`Provide.DHT.ResumeEnabled=false` in your config.
This feature significantly improves the reliability of content providing,
especially for nodes that experience intermittent connectivity or restarts.
#### 🔔 Sweep provider slow reprovide warnings
Kubo now monitors DHT reprovide operations when `Provide.DHT.SweepEnabled=true`

View File

@ -132,6 +132,7 @@ config file at runtime.
- [`Provide.DHT.MaxWorkers`](#providedhtmaxworkers)
- [`Provide.DHT.Interval`](#providedhtinterval)
- [`Provide.DHT.SweepEnabled`](#providedhtsweepenabled)
- [`Provide.DHT.ResumeEnabled`](#providedhtresumeenabled)
- [`Provide.DHT.DedicatedPeriodicWorkers`](#providedhtdedicatedperiodicworkers)
- [`Provide.DHT.DedicatedBurstWorkers`](#providedhtdedicatedburstworkers)
- [`Provide.DHT.MaxProvideConnsPerWorker`](#providedhtmaxprovideconnsperworker)
@ -2139,6 +2140,17 @@ gets batched by keyspace region. The keystore is periodically refreshed at each
[`Provide.Strategy`](#providestrategy) to ensure only current content remains
scheduled. This handles cases where content is unpinned or removed.
**Persistent reprovide cycle state:** When Provide Sweep is enabled, the
reprovide cycle state is persisted to the datastore by default. On restart, Kubo
automatically resumes from where it left off. If the node was offline for an
extended period, all CIDs that haven't been reprovided within the configured
[`Provide.DHT.Interval`](#providedhtinterval) are immediately queued for
reproviding. Additionally, the provide queue is persisted on shutdown and
restored on startup, ensuring no pending provide operations are lost. If you
don't want to keep the persisted provider state from a previous run, you can
disable this behavior by setting [`Provide.DHT.ResumeEnabled`](#providedhtresumeenabled)
to `false`.
> <picture>
> <source media="(prefers-color-scheme: dark)" srcset="https://github.com/user-attachments/assets/f6e06b08-7fee-490c-a681-1bf440e16e27">
> <source media="(prefers-color-scheme: light)" srcset="https://github.com/user-attachments/assets/e1662d7c-f1be-4275-a9ed-f2752fcdcabe">
@ -2163,9 +2175,42 @@ Default: `false`
Type: `flag`
#### `Provide.DHT.ResumeEnabled`
Controls whether the provider resumes from its previous state on restart. Only
applies when `Provide.DHT.SweepEnabled` is true.
When enabled (the default), the provider persists its reprovide cycle state and
provide queue to the datastore, and restores them on restart. This ensures:
- The reprovide cycle continues from where it left off instead of starting over
- Any CIDs in the provide queue during shutdown are restored and provided after
restart
- CIDs that missed their reprovide window while the node was offline are queued
for immediate reproviding
When disabled, the provider starts fresh on each restart, discarding any
previous reprovide cycle state and provide queue. On a fresh start, all CIDs
matching the [`Provide.Strategy`](#providestrategy) will be provided ASAP (as
burst provides), and then keyspace regions are reprovided according to the
regular schedule starting from the beginning of the reprovide cycle.
> [!NOTE]
> Disabling this option means the provider will provide all content matching
> your strategy on every restart (which can be resource-intensive for large
> datasets), then start from the beginning of the reprovide cycle. For nodes
> with large datasets or frequent restarts, keeping this enabled (the default)
> is recommended for better resource efficiency and more consistent reproviding
> behavior.
Default: `true`
Type: `flag`
#### `Provide.DHT.DedicatedPeriodicWorkers`
Number of workers dedicated to periodic keyspace region reprovides. Only applies when `Provide.DHT.SweepEnabled` is true.
Number of workers dedicated to periodic keyspace region reprovides. Only
applies when `Provide.DHT.SweepEnabled` is true.
Among the [`Provide.DHT.MaxWorkers`](#providedhtmaxworkers), this
number of workers will be dedicated to the periodic region reprovide only. The sum of

View File

@ -115,7 +115,7 @@ require (
github.com/libp2p/go-doh-resolver v0.5.0 // indirect
github.com/libp2p/go-flow-metrics v0.3.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251028150720-c3f8d33dc781 // indirect
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251025120456-f33906fd2f32 // indirect
github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect
github.com/libp2p/go-libp2p-pubsub v0.14.2 // indirect
github.com/libp2p/go-libp2p-pubsub-router v0.6.0 // indirect

View File

@ -434,8 +434,8 @@ github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl9
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g=
github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw=
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251028150720-c3f8d33dc781 h1:oTzgZExvlcixPXIXO7Knojv5yYoBB5SMLUmgtNzBGfY=
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251028150720-c3f8d33dc781/go.mod h1:aHMTg23iseX9grGSfA5gFUzLrqzmYbA8PqgGPqM8VkI=
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251025120456-f33906fd2f32 h1:xZj18PsLD157snR/BFo547jwOkGDH7jZjMEkBDOoD4Q=
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251025120456-f33906fd2f32/go.mod h1:aHMTg23iseX9grGSfA5gFUzLrqzmYbA8PqgGPqM8VkI=
github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio=
github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s=
github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4=

2
go.mod
View File

@ -53,7 +53,7 @@ require (
github.com/libp2p/go-doh-resolver v0.5.0
github.com/libp2p/go-libp2p v0.44.0
github.com/libp2p/go-libp2p-http v0.5.0
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251028150720-c3f8d33dc781
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251025120456-f33906fd2f32
github.com/libp2p/go-libp2p-kbucket v0.8.0
github.com/libp2p/go-libp2p-pubsub v0.14.2
github.com/libp2p/go-libp2p-pubsub-router v0.6.0

4
go.sum
View File

@ -518,8 +518,8 @@ github.com/libp2p/go-libp2p-gostream v0.6.0 h1:QfAiWeQRce6pqnYfmIVWJFXNdDyfiR/qk
github.com/libp2p/go-libp2p-gostream v0.6.0/go.mod h1:Nywu0gYZwfj7Jc91PQvbGU8dIpqbQQkjWgDuOrFaRdA=
github.com/libp2p/go-libp2p-http v0.5.0 h1:+x0AbLaUuLBArHubbbNRTsgWz0RjNTy6DJLOxQ3/QBc=
github.com/libp2p/go-libp2p-http v0.5.0/go.mod h1:glh87nZ35XCQyFsdzZps6+F4HYI6DctVFY5u1fehwSg=
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251028150720-c3f8d33dc781 h1:oTzgZExvlcixPXIXO7Knojv5yYoBB5SMLUmgtNzBGfY=
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251028150720-c3f8d33dc781/go.mod h1:aHMTg23iseX9grGSfA5gFUzLrqzmYbA8PqgGPqM8VkI=
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251025120456-f33906fd2f32 h1:xZj18PsLD157snR/BFo547jwOkGDH7jZjMEkBDOoD4Q=
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251025120456-f33906fd2f32/go.mod h1:aHMTg23iseX9grGSfA5gFUzLrqzmYbA8PqgGPqM8VkI=
github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio=
github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s=
github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4=

View File

@ -3,6 +3,7 @@ package cli
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"strings"
@ -608,6 +609,124 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
})
}
// runResumeTests validates Provide.DHT.ResumeEnabled behavior for SweepingProvider.
//
// Background: The provider tracks current_time_offset = (now - cycleStart) % interval
// where cycleStart is the timestamp marking the beginning of the reprovide cycle.
// With ResumeEnabled=true, cycleStart persists in the datastore across restarts.
// With ResumeEnabled=false, cycleStart resets to 'now' on each startup.
func runResumeTests(t *testing.T, apply cfgApplier) {
t.Helper()
const (
reprovideInterval = 30 * time.Second
initialRuntime = 10 * time.Second // Let cycle progress
downtime = 5 * time.Second // Simulated offline period
restartTime = 2 * time.Second // Daemon restart stabilization
// Thresholds account for timing jitter (~2-3s margin)
minOffsetBeforeRestart = 8 * time.Second // Expect ~10s
minOffsetAfterResume = 12 * time.Second // Expect ~17s (10s + 5s + 2s)
maxOffsetAfterReset = 5 * time.Second // Expect ~2s (fresh start)
)
setupNode := func(t *testing.T, resumeEnabled bool) *harness.Node {
node := harness.NewT(t).NewNode().Init()
apply(node) // Sets Provide.DHT.SweepEnabled=true
node.SetIPFSConfig("Provide.DHT.ResumeEnabled", resumeEnabled)
node.SetIPFSConfig("Provide.DHT.Interval", reprovideInterval.String())
node.SetIPFSConfig("Bootstrap", []string{})
node.StartDaemon()
return node
}
t.Run("preserves cycle state across restart", func(t *testing.T) {
t.Parallel()
node := setupNode(t, true)
defer node.StopDaemon()
for i := 0; i < 10; i++ {
node.IPFSAddStr(fmt.Sprintf("resume-test-%d-%d", i, time.Now().UnixNano()))
}
time.Sleep(initialRuntime)
beforeRestart := node.IPFS("provide", "stat", "--enc=json")
offsetBeforeRestart, _, err := parseProvideStatJSON(beforeRestart.Stdout.String())
require.NoError(t, err)
require.Greater(t, offsetBeforeRestart, minOffsetBeforeRestart,
"cycle should have progressed")
node.StopDaemon()
time.Sleep(downtime)
node.StartDaemon()
time.Sleep(restartTime)
afterRestart := node.IPFS("provide", "stat", "--enc=json")
offsetAfterRestart, _, err := parseProvideStatJSON(afterRestart.Stdout.String())
require.NoError(t, err)
assert.GreaterOrEqual(t, offsetAfterRestart, minOffsetAfterResume,
"offset should account for downtime")
})
t.Run("resets cycle when disabled", func(t *testing.T) {
t.Parallel()
node := setupNode(t, false)
defer node.StopDaemon()
for i := 0; i < 10; i++ {
node.IPFSAddStr(fmt.Sprintf("no-resume-%d-%d", i, time.Now().UnixNano()))
}
time.Sleep(initialRuntime)
beforeRestart := node.IPFS("provide", "stat", "--enc=json")
offsetBeforeRestart, _, err := parseProvideStatJSON(beforeRestart.Stdout.String())
require.NoError(t, err)
require.Greater(t, offsetBeforeRestart, minOffsetBeforeRestart,
"cycle should have progressed")
node.StopDaemon()
time.Sleep(downtime)
node.StartDaemon()
time.Sleep(restartTime)
afterRestart := node.IPFS("provide", "stat", "--enc=json")
offsetAfterRestart, _, err := parseProvideStatJSON(afterRestart.Stdout.String())
require.NoError(t, err)
assert.Less(t, offsetAfterRestart, maxOffsetAfterReset,
"offset should reset to near zero")
})
}
type provideStatJSON struct {
Sweep struct {
Timing struct {
CurrentTimeOffset int64 `json:"current_time_offset"` // nanoseconds
} `json:"timing"`
Schedule struct {
NextReprovidePrefix string `json:"next_reprovide_prefix"`
} `json:"schedule"`
} `json:"Sweep"`
}
// parseProvideStatJSON extracts timing and schedule information from
// the JSON output of 'ipfs provide stat --enc=json'.
// Note: prefix is unused in current tests but kept for potential future use.
func parseProvideStatJSON(output string) (offset time.Duration, prefix string, err error) {
var stat provideStatJSON
if err := json.Unmarshal([]byte(output), &stat); err != nil {
return 0, "", err
}
offset = time.Duration(stat.Sweep.Timing.CurrentTimeOffset)
prefix = stat.Sweep.Schedule.NextReprovidePrefix
return offset, prefix, nil
}
func TestProvider(t *testing.T) {
t.Parallel()
@ -637,6 +756,11 @@ func TestProvider(t *testing.T) {
t.Run(v.name, func(t *testing.T) {
// t.Parallel()
runProviderSuite(t, v.reprovide, v.apply)
// Resume tests only apply to SweepingProvider
if v.name == "SweepingProvider" {
runResumeTests(t, v.apply)
}
})
}
}

View File

@ -184,7 +184,7 @@ require (
github.com/libp2p/go-flow-metrics v0.3.0 // indirect
github.com/libp2p/go-libp2p v0.44.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251028150720-c3f8d33dc781 // indirect
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251025120456-f33906fd2f32 // indirect
github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect
github.com/libp2p/go-libp2p-record v0.3.1 // indirect
github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect

View File

@ -468,8 +468,8 @@ github.com/libp2p/go-libp2p v0.44.0 h1:5Gtt8OrF8yiXmH+Mx4+/iBeFRMK1TY3a8OrEBDEqA
github.com/libp2p/go-libp2p v0.44.0/go.mod h1:NovCojezAt4dnDd4fH048K7PKEqH0UFYYqJRjIIu8zc=
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251028150720-c3f8d33dc781 h1:oTzgZExvlcixPXIXO7Knojv5yYoBB5SMLUmgtNzBGfY=
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251028150720-c3f8d33dc781/go.mod h1:aHMTg23iseX9grGSfA5gFUzLrqzmYbA8PqgGPqM8VkI=
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251025120456-f33906fd2f32 h1:xZj18PsLD157snR/BFo547jwOkGDH7jZjMEkBDOoD4Q=
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251025120456-f33906fd2f32/go.mod h1:aHMTg23iseX9grGSfA5gFUzLrqzmYbA8PqgGPqM8VkI=
github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s=
github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4=
github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg=