mirror of
https://github.com/ipfs/kubo.git
synced 2026-02-21 10:27:46 +08:00
feat: enable DHT Provide Sweep by default (#10955)
Co-authored-by: Marcin Rataj <lidel@lidel.org> Co-authored-by: Andrew Gillis <11790789+gammazero@users.noreply.github.com>
This commit is contained in:
parent
93f8897d7c
commit
702c63b6db
@ -15,7 +15,7 @@ const (
|
||||
// DHT provider defaults
|
||||
DefaultProvideDHTInterval = 22 * time.Hour // https://github.com/ipfs/kubo/pull/9326
|
||||
DefaultProvideDHTMaxWorkers = 16 // Unified default for both sweep and legacy providers
|
||||
DefaultProvideDHTSweepEnabled = false
|
||||
DefaultProvideDHTSweepEnabled = true
|
||||
DefaultProvideDHTResumeEnabled = true
|
||||
DefaultProvideDHTDedicatedPeriodicWorkers = 2
|
||||
DefaultProvideDHTDedicatedBurstWorkers = 1
|
||||
@ -64,7 +64,7 @@ type ProvideDHT struct {
|
||||
MaxWorkers *OptionalInteger `json:",omitempty"`
|
||||
|
||||
// SweepEnabled activates the sweeping reprovider system which spreads
|
||||
// reprovide operations over time. This will become the default in a future release.
|
||||
// reprovide operations over time.
|
||||
// Default: DefaultProvideDHTSweepEnabled
|
||||
SweepEnabled Flag `json:",omitempty"`
|
||||
|
||||
|
||||
@ -240,14 +240,27 @@ func (tp *TestSuite) TestRoutingProvide(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
out, err = apis[2].Routing().FindProviders(ctx, p, options.Routing.NumProviders(1))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
maxAttempts := 5
|
||||
success := false
|
||||
for range maxAttempts {
|
||||
// We may need to try again as Provide() doesn't block until the CID is
|
||||
// actually provided.
|
||||
out, err = apis[2].Routing().FindProviders(ctx, p, options.Routing.NumProviders(1))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
provider := <-out
|
||||
|
||||
if provider.ID.String() == self0.ID().String() {
|
||||
success = true
|
||||
break
|
||||
}
|
||||
if len(provider.ID.String()) > 0 {
|
||||
t.Errorf("got wrong provider: %s != %s", provider.ID.String(), self0.ID().String())
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
provider := <-out
|
||||
|
||||
if provider.ID.String() != self0.ID().String() {
|
||||
t.Errorf("got wrong provider: %s != %s", provider.ID.String(), self0.ID().String())
|
||||
if !success {
|
||||
t.Errorf("missing provider after %d attempts", maxAttempts)
|
||||
}
|
||||
}
|
||||
|
||||
@ -116,6 +116,7 @@ type DHTProvider interface {
|
||||
// `OfflineDelay`). The schedule depends on the network size, hence recent
|
||||
// network connectivity is essential.
|
||||
RefreshSchedule() error
|
||||
Close() error
|
||||
}
|
||||
|
||||
var (
|
||||
@ -134,6 +135,7 @@ func (r *NoopProvider) StartProviding(bool, ...mh.Multihash) error { return nil
|
||||
func (r *NoopProvider) ProvideOnce(...mh.Multihash) error { return nil }
|
||||
func (r *NoopProvider) Clear() int { return 0 }
|
||||
func (r *NoopProvider) RefreshSchedule() error { return nil }
|
||||
func (r *NoopProvider) Close() error { return nil }
|
||||
|
||||
// LegacyProvider is a wrapper around the boxo/provider.System that implements
|
||||
// the DHTProvider interface. This provider manages reprovides using a burst
|
||||
@ -523,8 +525,41 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
// Keystore data isn't purged, on close, but it will be overwritten
|
||||
// when the node starts again.
|
||||
// Keystore will be closed by ensureProviderClosesBeforeKeystore hook
|
||||
// to guarantee provider closes before keystore.
|
||||
return nil
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
// ensureProviderClosesBeforeKeystore manages the shutdown order between
|
||||
// provider and keystore to prevent race conditions.
|
||||
//
|
||||
// The provider's worker goroutines may call keystore methods during their
|
||||
// operation. If keystore closes while these operations are in-flight, we get
|
||||
// "keystore is closed" errors. By closing the provider first, we ensure all
|
||||
// worker goroutines exit and complete any pending keystore operations before
|
||||
// the keystore itself closes.
|
||||
type providerKeystoreShutdownInput struct {
|
||||
fx.In
|
||||
Provider DHTProvider
|
||||
Keystore *keystore.ResettableKeystore
|
||||
}
|
||||
ensureProviderClosesBeforeKeystore := fx.Invoke(func(lc fx.Lifecycle, in providerKeystoreShutdownInput) {
|
||||
// Skip for NoopProvider
|
||||
if _, ok := in.Provider.(*NoopProvider); ok {
|
||||
return
|
||||
}
|
||||
|
||||
lc.Append(fx.Hook{
|
||||
OnStop: func(ctx context.Context) error {
|
||||
// Close provider first - waits for all worker goroutines to exit.
|
||||
// This ensures no code can access keystore after this returns.
|
||||
if err := in.Provider.Close(); err != nil {
|
||||
logger.Errorw("error closing provider during shutdown", "error", err)
|
||||
}
|
||||
|
||||
// Close keystore - safe now, provider is fully shut down
|
||||
return in.Keystore.Close()
|
||||
},
|
||||
})
|
||||
@ -650,6 +685,7 @@ See docs: https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtmaxw
|
||||
return fx.Options(
|
||||
sweepingReprovider,
|
||||
initKeystore,
|
||||
ensureProviderClosesBeforeKeystore,
|
||||
reprovideAlert,
|
||||
)
|
||||
}
|
||||
|
||||
@ -59,6 +59,9 @@ A new experimental DHT provider is available as an alternative to both the defau
|
||||
|
||||
**Monitoring and debugging:** Legacy mode (`SweepEnabled=false`) tracks `provider_reprovider_provide_count` and `provider_reprovider_reprovide_count`, while sweep mode (`SweepEnabled=true`) tracks `total_provide_count_total`. Enable debug logging with `GOLOG_LOG_LEVEL=error,provider=debug,dht/provider=debug` to see detailed logs from either system.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> The metric `total_provide_count_total` was renamed to `provider_provides_total` in Kubo v0.39 to follow OpenTelemetry naming conventions. If you have dashboards or alerts monitoring this metric, update them accordingly.
|
||||
|
||||
> [!NOTE]
|
||||
> This feature is experimental and opt-in. In the future, it will become the default and replace the legacy system. Some commands like `ipfs stats provide` and `ipfs routing provide` are not yet available with sweep mode. Run `ipfs provide --help` for alternatives.
|
||||
|
||||
@ -68,6 +71,9 @@ For configuration details, see [`Provide.DHT`](https://github.com/ipfs/kubo/blob
|
||||
|
||||
Kubo now exposes DHT metrics from [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht/), including `total_provide_count_total` for sweep provider operations and RPC metrics prefixed with `rpc_inbound_` and `rpc_outbound_` for DHT message traffic. See [Kubo metrics documentation](https://github.com/ipfs/kubo/blob/master/docs/metrics.md) for details.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> The metric `total_provide_count_total` was renamed to `provider_provides_total` in Kubo v0.39 to follow OpenTelemetry naming conventions. If you have dashboards or alerts monitoring this metric, update them accordingly.
|
||||
|
||||
#### 🚨 Improved gateway error pages with diagnostic tools
|
||||
|
||||
Gateway error pages now provide more actionable information during content retrieval failures. When a 504 Gateway Timeout occurs, users see detailed retrieval state information including which phase failed and a sample of providers that were attempted:
|
||||
|
||||
@ -10,11 +10,14 @@ This release was brought to you by the [Shipyard](https://ipshipyard.com/) team.
|
||||
|
||||
- [Overview](#overview)
|
||||
- [🔦 Highlights](#-highlights)
|
||||
- [🎯 Amino DHT Sweep provider is now the default](#-amino-dht-sweep-provider-is-now-the-default)
|
||||
- [📊 Detailed statistics for Sweep provider with `ipfs provide stat`](#-detailed-statistics-for-sweep-provider-with-ipfs-provide-stat)
|
||||
- [⏯️ Provider resume cycle for improved reproviding reliability](#provider-resume-cycle-for-improved-reproviding-reliability)
|
||||
- [🔔 Sweep provider slow reprovide warnings](#-sweep-provider-slow-reprovide-warnings)
|
||||
- [📊 Metric rename: `provider_provides_total`](#-metric-rename-provider_provides_total)
|
||||
- [🔧 Fixed UPnP port forwarding after router restarts](#-fixed-upnp-port-forwarding-after-router-restarts)
|
||||
- [🖥️ RISC-V support with prebuilt binaries](#️-risc-v-support-with-prebuilt-binaries)
|
||||
- [🚦 Gateway range request limits for CDN compatibility](#-gateway-range-request-limits-for-cdn-compatibility)
|
||||
- [🪦 Deprecated `go-ipfs` name no longer published](#-deprecated-go-ipfs-name-no-longer-published)
|
||||
- [📦️ Important dependency updates](#-important-dependency-updates)
|
||||
- [📝 Changelog](#-changelog)
|
||||
@ -22,77 +25,54 @@ This release was brought to you by the [Shipyard](https://ipshipyard.com/) team.
|
||||
|
||||
### Overview
|
||||
|
||||
Kubo 0.39.0 graduates the experimental sweep provider to default, bringing efficient content announcement to all nodes. This release adds detailed provider statistics, automatic state persistence for reliable reproviding after restarts, and proactive monitoring alerts for identifying issues early. It also includes important fixes for UPnP port forwarding, RISC-V prebuilt binaries, and finalizes the deprecation of the legacy go-ipfs name.
|
||||
|
||||
### 🔦 Highlights
|
||||
|
||||
#### 🚦 Gateway range request limits for CDN compatibility
|
||||
#### 🎯 Amino DHT Sweep provider is now the default
|
||||
|
||||
The new [`Gateway.MaxRangeRequestFileSize`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewaymaxrangerequestfilesize) configuration protects against CDN bugs where range requests over a certain size are silently ignored and the entire file is returned instead ([boxo#856](https://github.com/ipfs/boxo/issues/856#issuecomment-2786431369)). This causes unexpected bandwidth costs for both gateway operators and clients who only wanted a small byte range.
|
||||
The Amino DHT Sweep provider system, introduced as experimental in v0.38, is now enabled by default (`Provide.DHT.SweepEnabled=true`).
|
||||
|
||||
**What this means:** All nodes now benefit from efficient keyspace-sweeping content announcements that reduce memory overhead and create predictable network patterns, especially for nodes providing large content collections.
|
||||
|
||||
**Migration:** The transition is automatic on upgrade. Your existing configuration is preserved:
|
||||
|
||||
- If you explicitly set `Provide.DHT.SweepEnabled=false` in v0.38, you'll continue using the legacy provider
|
||||
- If you were using the default settings, you'll automatically get the sweep provider
|
||||
- To opt out and return to legacy behavior: `ipfs config --json Provide.DHT.SweepEnabled false`
|
||||
|
||||
**New features available with sweep mode:**
|
||||
|
||||
- Detailed statistics via `ipfs provide stat` ([see below](#-detailed-statistics-for-sweep-provider-with-ipfs-provide-stat))
|
||||
- Automatic resume after restarts with persistent state ([see below](#provider-resume-cycle-for-improved-reproviding-reliability))
|
||||
- Proactive alerts when reproviding falls behind ([see below](#-sweep-provider-slow-reprovide-warnings))
|
||||
- Better metrics for monitoring (`provider_provides_total`) ([see below](#-metric-rename-provider_provides_total))
|
||||
|
||||
For background on the sweep provider design and motivations, see [`Provide.DHT.SweepEnabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtsweepenabled) and [ipshipyard.com#8](https://github.com/ipshipyard/ipshipyard.com/pull/8).
|
||||
|
||||
Set this to your CDN's range request limit (e.g., `"5GiB"` for Cloudflare's default plan) to return 501 Not Implemented for oversized range requests, with an error message suggesting verifiable block requests as an alternative.
|
||||
#### 📊 Detailed statistics for Sweep provider with `ipfs provide stat`
|
||||
|
||||
The experimental Sweep provider system ([introduced in
|
||||
v0.38](https://github.com/ipfs/kubo/blob/master/docs/changelogs/v0.38.md#-experimental-sweeping-dht-provider))
|
||||
now has detailed statistics available through `ipfs provide stat`.
|
||||
The Sweep provider system now exposes detailed statistics through `ipfs provide stat`, helping you monitor provider health and troubleshoot issues.
|
||||
|
||||
These statistics help you monitor provider health and troubleshoot issues,
|
||||
especially useful for nodes providing large content collections. You can quickly
|
||||
identify bottlenecks like queue backlog, worker saturation, or connectivity
|
||||
problems that might prevent content from being announced to the DHT.
|
||||
Run `ipfs provide stat` for a quick summary, or use `--all` to see complete metrics including connectivity status, queue sizes, reprovide schedules, network statistics, operation rates, and worker utilization. For real-time monitoring, use `watch ipfs provide stat --all --compact` to observe changes in a 2-column layout. Individual sections can be displayed with flags like `--network`, `--operations`, or `--workers`.
|
||||
|
||||
**Default behavior:** Displays a brief summary showing queue sizes, scheduled
|
||||
CIDs/regions, average record holders, ongoing/total provides, and worker status
|
||||
when resources are constrained.
|
||||
For Dual DHT configurations, use `--lan` to view LAN DHT statistics instead of the default WAN DHT stats.
|
||||
|
||||
**Detailed statistics with `--all`:** View complete metrics organized into sections:
|
||||
|
||||
- **Connectivity**: DHT connection status
|
||||
- **Queues**: Pending provide and reprovide operations
|
||||
- **Schedule**: CIDs/regions scheduled for reprovide
|
||||
- **Timings**: Uptime, reprovide cycle information
|
||||
- **Network**: Peer statistics, keyspace region sizes
|
||||
- **Operations**: Ongoing and past provides, rates, errors
|
||||
- **Workers**: Worker pool utilization and availability
|
||||
|
||||
**Real-time monitoring:** For continuous monitoring, run
|
||||
`watch ipfs provide stat --all --compact` to see detailed statistics refreshed
|
||||
in a 2-column layout. This lets you observe provide rates, queue sizes, and
|
||||
worker availability in real-time. Individual sections can be displayed using
|
||||
flags like `--network`, `--operations`, or `--workers`, and multiple flags can
|
||||
be combined for custom views.
|
||||
|
||||
**Dual DHT support:** For Dual DHT configurations, use `--lan` to view LAN DHT
|
||||
provider statistics instead of the default WAN DHT stats.
|
||||
For more information, run `ipfs provide stat --help` or see the [Provide Stats documentation](https://github.com/ipfs/kubo/blob/master/docs/provide-stats.md).
|
||||
|
||||
> [!NOTE]
|
||||
> These statistics are only available when using the Sweep provider system
|
||||
> (enabled via
|
||||
> [`Provide.DHT.SweepEnabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtsweepenabled)).
|
||||
> Legacy provider shows basic statistics without flag support.
|
||||
> Legacy provider (when `Provide.DHT.SweepEnabled=false`) shows basic statistics without flag support.
|
||||
|
||||
#### ⏯️ Provider resume cycle for improved reproviding reliability
|
||||
|
||||
When using the sweeping provider (`Provide.DHT.SweepEnabled`), Kubo now
|
||||
persists the reprovide cycle state and automatically resumes where it left off
|
||||
after a restart. This brings several improvements:
|
||||
The Sweep provider now persists the reprovide cycle state and automatically resumes where it left off after a restart. This brings several improvements:
|
||||
|
||||
- **Persistent progress**: The provider now saves its position in the reprovide
|
||||
cycle to the datastore. On restart, it continues from where it stopped instead
|
||||
of starting from scratch.
|
||||
- **Catch-up reproviding**: If the node was offline for an extended period, all
|
||||
CIDs that haven't been reprovided within the configured reprovide interval are
|
||||
immediately queued for reproviding when the node starts up. This ensures
|
||||
content availability is maintained even after downtime.
|
||||
- **Persistent provide queue**: The provide queue is now persisted to the
|
||||
datastore on shutdown. When the node restarts, queued CIDs are restored and
|
||||
provided as expected, preventing loss of pending provide operations.
|
||||
- **Resume control**: The resume behavior is now controlled via the
|
||||
`Provide.DHT.ResumeEnabled` config option (default: `true`). If you don't want
|
||||
to keep the persisted provider state from a previous run, you can set
|
||||
`Provide.DHT.ResumeEnabled=false` in your config.
|
||||
- **Persistent progress**: The provider saves its position in the reprovide cycle to the datastore. On restart, it continues from where it stopped instead of starting from scratch.
|
||||
- **Catch-up reproviding**: If the node was offline for an extended period, all CIDs that haven't been reprovided within the configured reprovide interval are immediately queued for reproviding when the node starts up. This ensures content availability is maintained even after downtime.
|
||||
- **Persistent provide queue**: The provide queue is persisted to the datastore on shutdown. When the node restarts, queued CIDs are restored and provided as expected, preventing loss of pending provide operations.
|
||||
- **Resume control**: The resume behavior is controlled via [`Provide.DHT.ResumeEnabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtresumeenabled) (default: `true`). Set to `false` if you don't want to keep the persisted provider state from a previous run.
|
||||
|
||||
This feature significantly improves the reliability of content providing,
|
||||
especially for nodes that experience intermittent connectivity or restarts.
|
||||
This feature improves reliability for nodes that experience intermittent connectivity or restarts.
|
||||
|
||||
#### 🔔 Sweep provider slow reprovide warnings
|
||||
|
||||
@ -110,6 +90,12 @@ The alert polls every 15 minutes (to avoid alert fatigue while catching
|
||||
persistent issues) and only triggers after sustained growth across multiple
|
||||
intervals. The legacy provider is unaffected by this change.
|
||||
|
||||
#### 📊 Metric rename: `provider_provides_total`
|
||||
|
||||
The Amino DHT Sweep provider metric has been renamed from `total_provide_count_total` to `provider_provides_total` to follow OpenTelemetry naming conventions and maintain consistency with other kad-dht metrics (which use dot notation like `rpc.inbound.messages`, `rpc.outbound.requests`, etc.).
|
||||
|
||||
**Migration:** If you have Prometheus queries, dashboards, or alerts monitoring the old `total_provide_count_total` metric, update them to use `provider_provides_total` instead. This affects all nodes using sweep mode, which is now the default in v0.39 (previously opt-in experimental in v0.38).
|
||||
|
||||
#### 🔧 Fixed UPnP port forwarding after router restarts
|
||||
|
||||
Kubo now automatically recovers UPnP port mappings when routers restart or
|
||||
@ -136,26 +122,27 @@ using UPnP for NAT traversal.
|
||||
|
||||
#### 🖥️ RISC-V support with prebuilt binaries
|
||||
|
||||
Kubo now provides official `linux-riscv64` prebuilt binaries with every release,
|
||||
bringing IPFS to [RISC-V](https://en.wikipedia.org/wiki/RISC-V) open hardware.
|
||||
Kubo provides official `linux-riscv64` prebuilt binaries, bringing IPFS to [RISC-V](https://en.wikipedia.org/wiki/RISC-V) open hardware.
|
||||
|
||||
As RISC-V single-board computers and embedded systems become more accessible,
|
||||
it's good to see the distributed web supported on open hardware architectures -
|
||||
a natural pairing of open technologies.
|
||||
As RISC-V single-board computers and embedded systems become more accessible, the distributed web is now supported on open hardware architectures - a natural pairing of open technologies.
|
||||
|
||||
Download from <https://dist.ipfs.tech/kubo/> or
|
||||
<https://github.com/ipfs/kubo/releases> and look for the `linux-riscv64` archive.
|
||||
Download from <https://dist.ipfs.tech/kubo/> or <https://github.com/ipfs/kubo/releases> and look for the `linux-riscv64` archive.
|
||||
|
||||
#### 🚦 Gateway range request limits for CDN compatibility
|
||||
|
||||
The new [`Gateway.MaxRangeRequestFileSize`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewaymaxrangerequestfilesize) configuration protects against CDN range request limitations that cause bandwidth overcharges on deserialized responses. Some CDNs convert range requests over large files into full file downloads, causing clients requesting small byte ranges to unknowingly download entire multi-gigabyte files.
|
||||
|
||||
This only impacts deserialized responses. Clients using verifiable block requests (`application/vnd.ipld.raw`) are not affected. See the [configuration documentation](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewaymaxrangerequestfilesize) for details.
|
||||
|
||||
#### 🪦 Deprecated `go-ipfs` name no longer published
|
||||
|
||||
The `go-ipfs` name was deprecated in 2022 and renamed to `kubo`. Starting with this release, we have stopped publishing Docker images and distribution binaries under the old `go-ipfs` name.
|
||||
The `go-ipfs` name was deprecated in 2022 and renamed to `kubo`. Starting with this release, the legacy Docker image name has been replaced with a stub that displays an error message directing users to switch to `ipfs/kubo`.
|
||||
|
||||
Existing users should switch to:
|
||||
**Docker images:** The `ipfs/go-ipfs` image tags now contain only a stub script that exits with an error, instructing users to update their Docker configurations to use [`ipfs/kubo`](https://hub.docker.com/r/ipfs/kubo) instead. This ensures users are aware of the deprecation while allowing existing automation to fail explicitly rather than silently using outdated images.
|
||||
|
||||
- Docker: `ipfs/kubo` image (instead of `ipfs/go-ipfs`)
|
||||
- Binaries: download from <https://dist.ipfs.tech/kubo/> or <https://github.com/ipfs/kubo/releases>
|
||||
**Distribution binaries:** Download Kubo from <https://dist.ipfs.tech/kubo/> or <https://github.com/ipfs/kubo/releases>. The legacy `go-ipfs` distribution path should no longer be used.
|
||||
|
||||
For Docker users, the legacy `ipfs/go-ipfs` image name now shows a deprecation notice directing you to `ipfs/kubo`.
|
||||
All users should migrate to the `kubo` name in their scripts and configurations.
|
||||
|
||||
### 📦️ Important dependency updates
|
||||
|
||||
|
||||
@ -1162,11 +1162,20 @@ Type: `optionalDuration`
|
||||
|
||||
### `Gateway.MaxRangeRequestFileSize`
|
||||
|
||||
Maximum file size for HTTP range requests. Range requests for files larger than this limit return 501 Not Implemented.
|
||||
Maximum file size for HTTP range requests on deserialized responses. Range requests for files larger than this limit return 501 Not Implemented.
|
||||
|
||||
Protects against CDN bugs where range requests are silently ignored and the entire file is returned instead. For example, Cloudflare's default plan returns the full file for range requests over 5GiB, causing unexpected bandwidth costs for both gateway operators and clients who only wanted a small byte range.
|
||||
**Why this exists:**
|
||||
|
||||
Set this to your CDN's range request limit (e.g., `"5GiB"` for Cloudflare's default plan). The error response suggests using verifiable block requests (application/vnd.ipld.raw) as an alternative.
|
||||
Some CDNs like Cloudflare intercept HTTP range requests and convert them to full file downloads when files exceed their cache bucket limits. Cloudflare's default plan only caches range requests for files up to 5GiB. Files larger than this receive HTTP 200 with the entire file instead of HTTP 206 with the requested byte range. A client requesting 1MB from a 40GiB file would unknowingly download all 40GiB, causing bandwidth overcharges for the gateway operator, unexpected data costs for the client, and potential browser crashes.
|
||||
|
||||
This only affects deserialized responses. Clients fetching verifiable blocks as `application/vnd.ipld.raw` are not impacted because they work with small chunks that stay well below CDN cache limits.
|
||||
|
||||
**How to use:**
|
||||
|
||||
Set this to your CDN's range request cache limit (e.g., `"5GiB"` for Cloudflare's default plan). The gateway returns 501 Not Implemented for range requests over files larger than this limit, with an error message suggesting verifiable block requests as an alternative.
|
||||
|
||||
> [!NOTE]
|
||||
> Cloudflare users running open gateway hosting deserialized responses should deploy additional protection via Cloudflare Snippets (requires Enterprise plan). The Kubo configuration alone is not sufficient because Cloudflare has already intercepted and cached the response by the time it reaches your origin. See [boxo#856](https://github.com/ipfs/boxo/issues/856#issuecomment-3523944976) for a snippet that aborts HTTP 200 responses when Content-Length exceeds the limit.
|
||||
|
||||
Default: `0` (no limit)
|
||||
|
||||
@ -2181,10 +2190,9 @@ to `false`.
|
||||
You can compare the effectiveness of sweep mode vs legacy mode by monitoring the appropriate metrics (see [Monitoring Provide Operations](#monitoring-provide-operations) above).
|
||||
|
||||
> [!NOTE]
|
||||
> This feature is opt-in for now, but will become the default in a future release.
|
||||
> Eventually, this configuration flag will be removed once the feature is stable.
|
||||
> This is the default provider system as of Kubo v0.39. To use the legacy provider instead, set `Provide.DHT.SweepEnabled=false`.
|
||||
|
||||
Default: `false`
|
||||
Default: `true`
|
||||
|
||||
Type: `flag`
|
||||
|
||||
|
||||
@ -115,7 +115,7 @@ require (
|
||||
github.com/libp2p/go-doh-resolver v0.5.0 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.3.0 // indirect
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251025120456-f33906fd2f32 // indirect
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251112013111-6d2d861e0abb // indirect
|
||||
github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect
|
||||
github.com/libp2p/go-libp2p-pubsub v0.14.2 // indirect
|
||||
github.com/libp2p/go-libp2p-pubsub-router v0.6.0 // indirect
|
||||
@ -171,7 +171,7 @@ require (
|
||||
github.com/pion/webrtc/v4 v4.1.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/polydawn/refmt v0.89.0 // indirect
|
||||
github.com/probe-lab/go-libdht v0.3.0 // indirect
|
||||
github.com/probe-lab/go-libdht v0.4.0 // indirect
|
||||
github.com/prometheus/client_golang v1.23.2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.66.1 // indirect
|
||||
|
||||
@ -430,8 +430,8 @@ github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl9
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
|
||||
github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g=
|
||||
github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw=
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251025120456-f33906fd2f32 h1:xZj18PsLD157snR/BFo547jwOkGDH7jZjMEkBDOoD4Q=
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251025120456-f33906fd2f32/go.mod h1:aHMTg23iseX9grGSfA5gFUzLrqzmYbA8PqgGPqM8VkI=
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251112013111-6d2d861e0abb h1:jOWsCSRZKnRgocz4Ocu25Yigh5ZUkar2zWt/bzBh43Q=
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251112013111-6d2d861e0abb/go.mod h1:WIysu8hNWQN8t73dKyTNqiZdcYKRrGFl4wjzX4Gz6pQ=
|
||||
github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio=
|
||||
github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s=
|
||||
github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4=
|
||||
@ -630,8 +630,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH
|
||||
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
|
||||
github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4=
|
||||
github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw=
|
||||
github.com/probe-lab/go-libdht v0.3.0 h1:Q3ZXK8wCjZvgeHSTtRrppXobXY/KHPLZJfc+cdTTyqA=
|
||||
github.com/probe-lab/go-libdht v0.3.0/go.mod h1:hamw22kI6YkPQFGy5P6BrWWDrgE9ety5Si8iWAyuDvc=
|
||||
github.com/probe-lab/go-libdht v0.4.0 h1:LAqHuko/owRW6+0cs5wmJXbHzg09EUMJEh5DI37yXqo=
|
||||
github.com/probe-lab/go-libdht v0.4.0/go.mod h1:hamw22kI6YkPQFGy5P6BrWWDrgE9ety5Si8iWAyuDvc=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
|
||||
@ -59,7 +59,7 @@ Metrics for the legacy provider system when `Provide.DHT.SweepEnabled=false`:
|
||||
|
||||
Metrics for the DHT provider system when `Provide.DHT.SweepEnabled=true`:
|
||||
|
||||
- `total_provide_count_total` - Counter: total successful provide operations since node startup (includes both one-time provides and periodic provides done on `Provide.DHT.Interval`)
|
||||
- `provider_provides_total` - Counter: total successful provide operations since node startup (includes both one-time provides and periodic provides done on `Provide.DHT.Interval`)
|
||||
|
||||
> [!NOTE]
|
||||
> These metrics are exposed by [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht/). You can enable debug logging for DHT provider activity with `GOLOG_LOG_LEVEL=dht/provider=debug`.
|
||||
|
||||
4
go.mod
4
go.mod
@ -53,7 +53,7 @@ require (
|
||||
github.com/libp2p/go-doh-resolver v0.5.0
|
||||
github.com/libp2p/go-libp2p v0.45.0
|
||||
github.com/libp2p/go-libp2p-http v0.5.0
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251025120456-f33906fd2f32
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251112013111-6d2d861e0abb
|
||||
github.com/libp2p/go-libp2p-kbucket v0.8.0
|
||||
github.com/libp2p/go-libp2p-pubsub v0.14.2
|
||||
github.com/libp2p/go-libp2p-pubsub-router v0.6.0
|
||||
@ -69,7 +69,7 @@ require (
|
||||
github.com/multiformats/go-multihash v0.2.3
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
|
||||
github.com/probe-lab/go-libdht v0.3.0
|
||||
github.com/probe-lab/go-libdht v0.4.0
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d
|
||||
|
||||
8
go.sum
8
go.sum
@ -514,8 +514,8 @@ github.com/libp2p/go-libp2p-gostream v0.6.0 h1:QfAiWeQRce6pqnYfmIVWJFXNdDyfiR/qk
|
||||
github.com/libp2p/go-libp2p-gostream v0.6.0/go.mod h1:Nywu0gYZwfj7Jc91PQvbGU8dIpqbQQkjWgDuOrFaRdA=
|
||||
github.com/libp2p/go-libp2p-http v0.5.0 h1:+x0AbLaUuLBArHubbbNRTsgWz0RjNTy6DJLOxQ3/QBc=
|
||||
github.com/libp2p/go-libp2p-http v0.5.0/go.mod h1:glh87nZ35XCQyFsdzZps6+F4HYI6DctVFY5u1fehwSg=
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251025120456-f33906fd2f32 h1:xZj18PsLD157snR/BFo547jwOkGDH7jZjMEkBDOoD4Q=
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251025120456-f33906fd2f32/go.mod h1:aHMTg23iseX9grGSfA5gFUzLrqzmYbA8PqgGPqM8VkI=
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251112013111-6d2d861e0abb h1:jOWsCSRZKnRgocz4Ocu25Yigh5ZUkar2zWt/bzBh43Q=
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251112013111-6d2d861e0abb/go.mod h1:WIysu8hNWQN8t73dKyTNqiZdcYKRrGFl4wjzX4Gz6pQ=
|
||||
github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio=
|
||||
github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s=
|
||||
github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4=
|
||||
@ -732,8 +732,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH
|
||||
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
|
||||
github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4=
|
||||
github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw=
|
||||
github.com/probe-lab/go-libdht v0.3.0 h1:Q3ZXK8wCjZvgeHSTtRrppXobXY/KHPLZJfc+cdTTyqA=
|
||||
github.com/probe-lab/go-libdht v0.3.0/go.mod h1:hamw22kI6YkPQFGy5P6BrWWDrgE9ety5Si8iWAyuDvc=
|
||||
github.com/probe-lab/go-libdht v0.4.0 h1:LAqHuko/owRW6+0cs5wmJXbHzg09EUMJEh5DI37yXqo=
|
||||
github.com/probe-lab/go-libdht v0.4.0/go.mod h1:hamw22kI6YkPQFGy5P6BrWWDrgE9ety5Si8iWAyuDvc=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
|
||||
@ -72,9 +72,9 @@ func TestRoutingV1Proxy(t *testing.T) {
|
||||
|
||||
cidStr := nodes[0].IPFSAddStr(string(random.Bytes(1000)))
|
||||
// Reprovide as initialProviderDelay still ongoing
|
||||
res := nodes[0].IPFS("routing", "reprovide")
|
||||
require.NoError(t, res.Err)
|
||||
res = nodes[1].IPFS("routing", "findprovs", cidStr)
|
||||
waitUntilProvidesComplete(t, nodes[0])
|
||||
|
||||
res := nodes[1].IPFS("routing", "findprovs", cidStr)
|
||||
assert.Equal(t, nodes[0].PeerID().String(), res.Stdout.Trimmed())
|
||||
})
|
||||
|
||||
|
||||
@ -14,7 +14,6 @@ import (
|
||||
"github.com/ipfs/kubo/test/cli/harness"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRoutingV1Server(t *testing.T) {
|
||||
@ -39,9 +38,7 @@ func TestRoutingV1Server(t *testing.T) {
|
||||
text := "hello world " + uuid.New().String()
|
||||
cidStr := nodes[2].IPFSAddStr(text)
|
||||
_ = nodes[3].IPFSAddStr(text)
|
||||
// Reprovide as initialProviderDelay still ongoing
|
||||
res := nodes[3].IPFS("routing", "reprovide")
|
||||
require.NoError(t, res.Err)
|
||||
waitUntilProvidesComplete(t, nodes[3])
|
||||
|
||||
cid, err := cid.Decode(cidStr)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@ -17,6 +17,8 @@ func TestDHTOptimisticProvide(t *testing.T) {
|
||||
|
||||
nodes[0].UpdateConfig(func(cfg *config.Config) {
|
||||
cfg.Experimental.OptimisticProvide = true
|
||||
// Optimistic provide only works with the legacy provider.
|
||||
cfg.Provide.DHT.SweepEnabled = config.False
|
||||
})
|
||||
|
||||
nodes.StartDaemons().Connect()
|
||||
|
||||
@ -2,7 +2,10 @@ package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/kubo/test/cli/harness"
|
||||
"github.com/ipfs/kubo/test/cli/testutils"
|
||||
@ -10,6 +13,33 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func waitUntilProvidesComplete(t *testing.T, n *harness.Node) {
|
||||
getCidsCount := func(line string) int {
|
||||
trimmed := strings.TrimSpace(line)
|
||||
countStr := strings.SplitN(trimmed, " ", 2)[0]
|
||||
count, err := strconv.Atoi(countStr)
|
||||
require.NoError(t, err)
|
||||
return count
|
||||
}
|
||||
|
||||
queuedProvides, ongoingProvides := true, true
|
||||
for queuedProvides || ongoingProvides {
|
||||
res := n.IPFS("provide", "stat", "-a")
|
||||
require.NoError(t, res.Err)
|
||||
for _, line := range res.Stdout.Lines() {
|
||||
if trimmed, ok := strings.CutPrefix(line, " Provide queue:"); ok {
|
||||
provideQueueSize := getCidsCount(trimmed)
|
||||
queuedProvides = provideQueueSize > 0
|
||||
}
|
||||
if trimmed, ok := strings.CutPrefix(line, " Ongoing provides:"); ok {
|
||||
ongoingProvideCount := getCidsCount(trimmed)
|
||||
ongoingProvides = ongoingProvideCount > 0
|
||||
}
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func testRoutingDHT(t *testing.T, enablePubsub bool) {
|
||||
t.Run(fmt.Sprintf("enablePubSub=%v", enablePubsub), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
@ -84,10 +114,8 @@ func testRoutingDHT(t *testing.T, enablePubsub bool) {
|
||||
t.Run("ipfs routing findprovs", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
hash := nodes[3].IPFSAddStr("some stuff")
|
||||
// Reprovide as initialProviderDelay still ongoing
|
||||
res := nodes[3].IPFS("routing", "reprovide")
|
||||
require.NoError(t, res.Err)
|
||||
res = nodes[4].IPFS("routing", "findprovs", hash)
|
||||
waitUntilProvidesComplete(t, nodes[3])
|
||||
res := nodes[4].IPFS("routing", "findprovs", hash)
|
||||
assert.Equal(t, nodes[3].PeerID().String(), res.Stdout.Trimmed())
|
||||
})
|
||||
|
||||
|
||||
@ -184,7 +184,7 @@ require (
|
||||
github.com/libp2p/go-flow-metrics v0.3.0 // indirect
|
||||
github.com/libp2p/go-libp2p v0.45.0 // indirect
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251025120456-f33906fd2f32 // indirect
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251112013111-6d2d861e0abb // indirect
|
||||
github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect
|
||||
github.com/libp2p/go-libp2p-record v0.3.1 // indirect
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect
|
||||
|
||||
@ -464,8 +464,8 @@ github.com/libp2p/go-libp2p v0.45.0 h1:Pdhr2HsFXaYjtfiNcBP4CcRUONvbMFdH3puM9vV4T
|
||||
github.com/libp2p/go-libp2p v0.45.0/go.mod h1:NovCojezAt4dnDd4fH048K7PKEqH0UFYYqJRjIIu8zc=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251025120456-f33906fd2f32 h1:xZj18PsLD157snR/BFo547jwOkGDH7jZjMEkBDOoD4Q=
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251025120456-f33906fd2f32/go.mod h1:aHMTg23iseX9grGSfA5gFUzLrqzmYbA8PqgGPqM8VkI=
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251112013111-6d2d861e0abb h1:jOWsCSRZKnRgocz4Ocu25Yigh5ZUkar2zWt/bzBh43Q=
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.35.2-0.20251112013111-6d2d861e0abb/go.mod h1:WIysu8hNWQN8t73dKyTNqiZdcYKRrGFl4wjzX4Gz6pQ=
|
||||
github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s=
|
||||
github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4=
|
||||
github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg=
|
||||
|
||||
@ -93,8 +93,8 @@ EOF
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
test_expect_failure "'ipfs add' with an unregistered hash and wrapped leaves fails without crashing" '
|
||||
ipfs add --hash poseidon-bls12_381-a2-fc1 --raw-leaves=false -r mountdir/planets
|
||||
test_expect_success "'ipfs add' with an unregistered hash and wrapped leaves fails without crashing" '
|
||||
test_expect_code 1 ipfs add --hash poseidon-bls12_381-a2-fc1 --raw-leaves=false -r mountdir/planets
|
||||
'
|
||||
|
||||
}
|
||||
|
||||
@ -250,6 +250,5 @@ process_resident_memory_bytes
|
||||
process_start_time_seconds
|
||||
process_virtual_memory_bytes
|
||||
process_virtual_memory_max_bytes
|
||||
provider_reprovider_provide_count
|
||||
provider_reprovider_reprovide_count
|
||||
provider_provides_total
|
||||
target_info
|
||||
|
||||
Loading…
Reference in New Issue
Block a user