Merge pull request #10978 from ipfs/release-v0.38.0

chore: release v0.38.0
This commit is contained in:
Marcin Rataj 2025-10-01 20:13:59 +02:00 committed by GitHub
commit 34debcbcb4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
143 changed files with 7608 additions and 1979 deletions

17
.github/build-platforms.yml vendored Normal file
View File

@ -0,0 +1,17 @@
# Build platforms configuration for Kubo
# Matches https://github.com/ipfs/distributions/blob/master/dists/kubo/build_matrix
# plus linux-riscv64 for emerging architecture support
#
# The Go compiler handles FUSE support automatically via build tags.
# Platforms are simply listed - no need to specify FUSE capability.
platforms:
- darwin-amd64
- darwin-arm64
- freebsd-amd64
- linux-amd64
- linux-arm64
- linux-riscv64
- openbsd-amd64
- windows-amd64
- windows-arm64

View File

@ -32,9 +32,9 @@ jobs:
uses: actions/checkout@v5
- name: Setup Go
uses: actions/setup-go@v5
uses: actions/setup-go@v6
with:
go-version: 1.25.x
go-version-file: 'go.mod'
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL

View File

@ -23,7 +23,7 @@ jobs:
timeout-minutes: 5
steps:
- uses: actions/checkout@v5
- uses: hadolint/hadolint-action@v3.1.0
- uses: hadolint/hadolint-action@v3.3.0
with:
dockerfile: Dockerfile
failure-threshold: warning

View File

@ -46,17 +46,16 @@ jobs:
output: fixtures
# 2. Build the kubo-gateway
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 1.25.x
- uses: protocol/cache-go-action@v1
with:
name: ${{ github.job }}
- name: Checkout kubo-gateway
uses: actions/checkout@v5
with:
path: kubo-gateway
- name: Setup Go
uses: actions/setup-go@v6
with:
go-version-file: 'kubo-gateway/go.mod'
cache: true
cache-dependency-path: kubo-gateway/go.sum
- name: Build kubo-gateway
run: make build
working-directory: kubo-gateway
@ -133,17 +132,16 @@ jobs:
output: fixtures
# 2. Build the kubo-gateway
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 1.25.x
- uses: protocol/cache-go-action@v1
with:
name: ${{ github.job }}
- name: Checkout kubo-gateway
uses: actions/checkout@v5
with:
path: kubo-gateway
- name: Setup Go
uses: actions/setup-go@v6
with:
go-version-file: 'kubo-gateway/go.mod'
cache: true
cache-dependency-path: kubo-gateway/go.sum
- name: Build kubo-gateway
run: make build
working-directory: kubo-gateway

View File

@ -21,20 +21,38 @@ jobs:
env:
TEST_DOCKER: 0
TEST_VERBOSE: 1
TRAVIS: 1
GIT_PAGER: cat
IPFS_CHECK_RCMGR_DEFAULTS: 1
defaults:
run:
shell: bash
steps:
- uses: actions/setup-go@v5
with:
go-version: 1.25.x
- uses: actions/checkout@v5
- run: make cmd/ipfs-try-build
env:
TEST_FUSE: 1
- run: make cmd/ipfs-try-build
env:
TEST_FUSE: 0
- uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
cache: true
cache-dependency-path: go.sum
- name: Build all platforms
run: |
# Read platforms from build-platforms.yml and build each one
echo "Building kubo for all platforms..."
# Read and build each platform
grep '^ - ' .github/build-platforms.yml | sed 's/^ - //' | while read -r platform; do
if [ -z "$platform" ]; then
continue
fi
echo "::group::Building $platform"
GOOS=$(echo "$platform" | cut -d- -f1)
GOARCH=$(echo "$platform" | cut -d- -f2)
echo "Building $platform"
echo " GOOS=$GOOS GOARCH=$GOARCH go build -o /dev/null ./cmd/ipfs"
GOOS=$GOOS GOARCH=$GOARCH go build -o /dev/null ./cmd/ipfs
echo "::endgroup::"
done
echo "All platforms built successfully"

View File

@ -25,9 +25,9 @@ jobs:
- uses: actions/checkout@v5
with:
submodules: recursive
- uses: actions/setup-go@v5
- uses: actions/setup-go@v6
with:
go-version: "1.25.x"
go-version-file: 'go.mod'
- name: Check that go.mod is tidy
uses: protocol/multiple-go-modules@v1.4
with:

View File

@ -22,15 +22,14 @@ jobs:
TEST_DOCKER: 0
TEST_FUSE: 0
TEST_VERBOSE: 1
TRAVIS: 1
GIT_PAGER: cat
IPFS_CHECK_RCMGR_DEFAULTS: 1
defaults:
run:
shell: bash
steps:
- uses: actions/setup-go@v5
with:
go-version: 1.25.x
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
- run: make -O test_go_lint

View File

@ -22,19 +22,18 @@ jobs:
TEST_DOCKER: 0
TEST_FUSE: 0
TEST_VERBOSE: 1
TRAVIS: 1
GIT_PAGER: cat
IPFS_CHECK_RCMGR_DEFAULTS: 1
defaults:
run:
shell: bash
steps:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.25.x
- name: Check out Kubo
uses: actions/checkout@v5
- name: Set up Go
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
- name: Install missing tools
run: sudo apt update && sudo apt install -y zsh
- name: 👉️ If this step failed, go to «Summary» (top left) → inspect the «Failures/Errors» table
@ -45,7 +44,7 @@ jobs:
make -j "$PARALLEL" test/unit/gotest.junit.xml &&
[[ ! $(jq -s -c 'map(select(.Action == "fail")) | .[]' test/unit/gotest.json) ]]
- name: Upload coverage to Codecov
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
if: failure() || success()
with:
name: unittests

View File

@ -9,9 +9,6 @@ on:
branches:
- 'master'
env:
GO_VERSION: 1.25.x
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
cancel-in-progress: true
@ -29,17 +26,16 @@ jobs:
TEST_DOCKER: 0
TEST_FUSE: 0
TEST_VERBOSE: 1
TRAVIS: 1
GIT_PAGER: cat
IPFS_CHECK_RCMGR_DEFAULTS: 1
defaults:
run:
shell: bash
steps:
- uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
- run: make build
- uses: actions/upload-artifact@v4
with:
@ -53,7 +49,7 @@ jobs:
run:
shell: bash
steps:
- uses: actions/setup-node@v4
- uses: actions/setup-node@v5
with:
node-version: lts/*
- uses: actions/download-artifact@v5
@ -82,14 +78,13 @@ jobs:
LIBP2P_TCP_REUSEPORT: false
LIBP2P_ALLOW_WEAK_RSA_KEYS: 1
E2E_IPFSD_TYPE: go
TRAVIS: 1
GIT_PAGER: cat
IPFS_CHECK_RCMGR_DEFAULTS: 1
defaults:
run:
shell: bash
steps:
- uses: actions/setup-node@v4
- uses: actions/setup-node@v5
with:
node-version: 20.x
- uses: actions/download-artifact@v5

View File

@ -4,10 +4,10 @@ on:
workflow_dispatch:
pull_request:
paths-ignore:
- '**/*.md'
- "**/*.md"
push:
branches:
- 'master'
- "master"
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
@ -22,14 +22,14 @@ jobs:
run:
shell: bash
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 1.25.x
- name: Checkout Kubo
uses: actions/checkout@v5
with:
path: kubo
- name: Setup Go
uses: actions/setup-go@v6
with:
go-version-file: 'kubo/go.mod'
- name: Install missing tools
run: sudo apt update && sudo apt install -y socat net-tools fish libxml2-utils
- uses: actions/cache@v4
@ -55,7 +55,7 @@ jobs:
# increasing parallelism beyond 10 doesn't speed up the tests much
PARALLEL: ${{ github.repository == 'ipfs/kubo' && 10 || 3 }}
- name: Upload coverage report
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
if: failure() || success()
with:
name: sharness

View File

@ -22,11 +22,11 @@ jobs:
- uses: ipfs/start-ipfs-daemon-action@v1
with:
args: --init --init-profile=flatfs,server --enable-gc=false
- uses: actions/setup-node@v4
- uses: actions/setup-node@v5
with:
node-version: 14
- name: Sync the latest 5 github releases
uses: actions/github-script@v7
uses: actions/github-script@v8
with:
script: |
const fs = require('fs').promises

View File

@ -1,5 +1,6 @@
# Kubo Changelogs
- [v0.38](docs/changelogs/v0.38.md)
- [v0.37](docs/changelogs/v0.37.md)
- [v0.36](docs/changelogs/v0.36.md)
- [v0.35](docs/changelogs/v0.35.md)

View File

@ -1,6 +1,6 @@
<h1 align="center">
<br>
<a href="https://docs.ipfs.tech/how-to/command-line-quick-start/"><img src="https://user-images.githubusercontent.com/157609/250148884-d6d12db8-fdcf-4be3-8546-2550b69845d8.png" alt="Kubo logo" title="Kubo logo" width="200"></a>
<a href="https://github.com/ipfs/kubo/blob/master/docs/logo/"><img src="https://user-images.githubusercontent.com/157609/250148884-d6d12db8-fdcf-4be3-8546-2550b69845d8.png" alt="Kubo logo" title="Kubo logo" width="200"></a>
<br>
Kubo: IPFS Implementation in GO
<br>

View File

@ -107,8 +107,8 @@ uninstall:
.PHONY: uninstall
supported:
@echo "Currently supported platforms:"
@for p in ${SUPPORTED_PLATFORMS}; do echo $$p; done
@echo "Currently supported platforms (from .github/build-platforms.yml):"
@grep '^ - ' .github/build-platforms.yml | sed 's/^ - //' || (echo "Error: .github/build-platforms.yml not found"; exit 1)
.PHONY: supported
help:
@ -138,7 +138,8 @@ help:
@echo ' test_short - Run short go tests and short sharness tests'
@echo ' test_go_short - Run short go tests'
@echo ' test_go_test - Run all go tests'
@echo ' test_go_expensive - Run all go tests and compile on all platforms'
@echo ' test_go_build - Build kubo for all platforms from .github/build-platforms.yml'
@echo ' test_go_expensive - Run all go tests and build all platforms'
@echo ' test_go_race - Run go tests with the race detector enabled'
@echo ' test_go_lint - Run the `golangci-lint` vetting tool'
@echo ' test_sharness - Run sharness tests'

View File

@ -1,49 +0,0 @@
# Notes:
# - Minimal appveyor.yml file is an empty file. All sections are optional.
# - Indent each level of configuration with 2 spaces. Do not use tabs!
# - All section names are case-sensitive.
# - Section names should be unique on each level.
version: "{build}"
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\ipfs\go-ipfs
environment:
GOPATH: c:\gopath
TEST_VERBOSE: 1
#TEST_NO_FUSE: 1
#TEST_SUITE: test_sharness
#GOFLAGS: -tags nofuse
global:
BASH: C:\cygwin\bin\bash
matrix:
- GOARCH: amd64
GOVERSION: 1.5.1
GOROOT: c:\go
DOWNLOADPLATFORM: "x64"
install:
# Enable make
#- SET PATH=c:\MinGW\bin;%PATH%
#- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
- go version
- go env
# Cygwin build script
#
# NOTES:
#
# The stdin/stdout file descriptor appears not to be valid for the Appveyor
# build which causes failures as certain functions attempt to redirect
# default file handles. Ensure a dummy file descriptor is opened with 'exec'.
#
build_script:
- '%BASH% -lc "cd $APPVEYOR_BUILD_FOLDER; exec 0</dev/null; export PATH=$GOPATH/bin:$PATH; make nofuse"'
test_script:
- '%BASH% -lc "cd $APPVEYOR_BUILD_FOLDER; exec 0</dev/null; export PATH=$GOPATH/bin:$PATH; export GOFLAGS=''-tags nofuse''; export TEST_NO_FUSE=1; export TEST_VERBOSE=1; export TEST_EXPENSIVE=1; export TEST_SUITE=test_sharness; make $TEST_SUITE"'
#build:
# parallel: true

24
bin/test-go-build-platforms Executable file
View File

@ -0,0 +1,24 @@
#!/bin/bash
set -e
echo "Building kubo for all platforms in .github/build-platforms.yml..."
if [ ! -f .github/build-platforms.yml ]; then
echo "Error: .github/build-platforms.yml not found"
exit 1
fi
grep '^ - ' .github/build-platforms.yml | sed 's/^ - //' | while read -r platform; do
if [ -z "$platform" ]; then
continue
fi
GOOS=$(echo "$platform" | cut -d- -f1)
GOARCH=$(echo "$platform" | cut -d- -f2)
echo "Building $platform..."
echo " GOOS=$GOOS GOARCH=$GOARCH go build -o /dev/null ./cmd/ipfs"
GOOS=$GOOS GOARCH=$GOARCH go build -o /dev/null ./cmd/ipfs
done
echo "All platforms built successfully"

View File

@ -47,7 +47,7 @@ func (np NodeProvider) MakeAPISwarm(t *testing.T, ctx context.Context, fullIdent
c.Experimental.FilestoreEnabled = true
// only provide things we pin. Allows to test
// provide operations.
c.Reprovider.Strategy = config.NewOptionalString("roots")
c.Provide.Strategy = config.NewOptionalString("roots")
n.WriteConfig(c)
n.StartDaemon("--enable-pubsub-experiment", "--offline="+strconv.FormatBool(!online))

View File

@ -52,8 +52,12 @@ func (api *PinAPI) Add(ctx context.Context, p path.Path, opts ...caopts.PinAddOp
return err
}
return api.core().Request("pin/add", p.String()).
Option("recursive", options.Recursive).Exec(ctx, nil)
req := api.core().Request("pin/add", p.String()).
Option("recursive", options.Recursive)
if options.Name != "" {
req = req.Option("name", options.Name)
}
return req.Exec(ctx, nil)
}
type pinLsObject struct {
@ -72,6 +76,7 @@ func (api *PinAPI) Ls(ctx context.Context, pins chan<- iface.Pin, opts ...caopts
res, err := api.core().Request("pin/ls").
Option("type", options.Type).
Option("names", options.Detailed).
Option("stream", true).
Send(ctx)
if err != nil {
@ -80,8 +85,8 @@ func (api *PinAPI) Ls(ctx context.Context, pins chan<- iface.Pin, opts ...caopts
defer res.Output.Close()
dec := json.NewDecoder(res.Output)
var out pinLsObject
for {
var out pinLsObject
err := dec.Decode(&out)
if err != nil {
if err != io.EOF {

View File

@ -2,7 +2,6 @@ include mk/header.mk
IPFS_BIN_$(d) := $(call go-curr-pkg-tgt)
TGT_BIN += $(IPFS_BIN_$(d))
TEST_GO_BUILD += $(d)-try-build
CLEAN += $(IPFS_BIN_$(d))
PATH := $(realpath $(d)):$(PATH)
@ -15,23 +14,12 @@ PATH := $(realpath $(d)):$(PATH)
$(d)_flags =-ldflags="-X "github.com/ipfs/kubo".CurrentCommit=$(git-hash)"
$(d)-try-build $(IPFS_BIN_$(d)): GOFLAGS += $(cmd/ipfs_flags)
$(IPFS_BIN_$(d)): GOFLAGS += $(cmd/ipfs_flags)
# uses second expansion to collect all $(DEPS_GO)
$(IPFS_BIN_$(d)): $(d) $$(DEPS_GO) ALWAYS #| $(DEPS_OO_$(d))
$(go-build-relative)
TRY_BUILD_$(d)=$(addprefix $(d)-try-build-,$(SUPPORTED_PLATFORMS))
$(d)-try-build: $(TRY_BUILD_$(d))
.PHONY: $(d)-try-build
$(TRY_BUILD_$(d)): PLATFORM = $(subst -, ,$(patsubst $<-try-build-%,%,$@))
$(TRY_BUILD_$(d)): GOOS = $(word 1,$(PLATFORM))
$(TRY_BUILD_$(d)): GOARCH = $(word 2,$(PLATFORM))
$(TRY_BUILD_$(d)): $(d) $$(DEPS_GO) ALWAYS
GOOS=$(GOOS) GOARCH=$(GOARCH) $(go-try-build)
.PHONY: $(TRY_BUILD_$(d))
$(d)-install: GOFLAGS += $(cmd/ipfs_flags)
$(d)-install: $(d) $$(DEPS_GO) ALWAYS
$(GOCC) install $(go-flags-with-tags) ./cmd/ipfs

View File

@ -83,10 +83,12 @@ func addMigrationFiles(ctx context.Context, node *core.IpfsNode, paths []string,
fi, err := f.Stat()
if err != nil {
f.Close()
return err
}
ipfsPath, err := ufs.Add(ctx, files.NewReaderStatFile(f, fi), options.Unixfs.Pin(pin, ""))
f.Close()
if err != nil {
return err
}

View File

@ -43,6 +43,9 @@ import (
manet "github.com/multiformats/go-multiaddr/net"
prometheus "github.com/prometheus/client_golang/prometheus"
promauto "github.com/prometheus/client_golang/prometheus/promauto"
"go.opentelemetry.io/otel"
promexporter "go.opentelemetry.io/otel/exporters/prometheus"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
)
const (
@ -211,6 +214,21 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
log.Errorf("Injecting prometheus handler for metrics failed with message: %s\n", err.Error())
}
// Set up OpenTelemetry meter provider to enable metrics from external libraries
// like go-libp2p-kad-dht. Without this, metrics registered via otel.Meter()
// (such as total_provide_count from sweep provider) won't be exposed at the
// /debug/metrics/prometheus endpoint.
if exporter, err := promexporter.New(
promexporter.WithRegisterer(prometheus.DefaultRegisterer),
); err != nil {
log.Errorf("Creating prometheus exporter for OpenTelemetry failed: %s (some metrics will be missing from /debug/metrics/prometheus)\n", err.Error())
} else {
meterProvider := sdkmetric.NewMeterProvider(
sdkmetric.WithReader(exporter),
)
otel.SetMeterProvider(meterProvider)
}
// let the user know we're going.
fmt.Printf("Initializing daemon...\n")
@ -284,6 +302,15 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
default:
return err
case fsrepo.ErrNeedMigration:
migrationDone := make(chan struct{})
go func() {
select {
case <-req.Context.Done():
os.Exit(1)
case <-migrationDone:
}
}()
domigrate, found := req.Options[migrateKwd].(bool)
// Get current repo version for more informative message
@ -299,6 +326,7 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
if !found {
domigrate = YesNoPrompt("Run migrations now? [y/N]")
}
close(migrationDone)
if !domigrate {
fmt.Printf("Not running migrations on repository at %s. Re-run daemon with --migrate or see 'ipfs repo migrate --help'\n", cctx.ConfigRoot)
@ -476,25 +504,33 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
// This should never happen, but better safe than sorry
log.Fatal("Private network does not work with Routing.Type=auto. Update your config to Routing.Type=dht (or none, and do manual peering)")
}
if cfg.Provider.Strategy.WithDefault("") != "" && cfg.Reprovider.Strategy.IsDefault() {
log.Fatal("Invalid config. Remove unused Provider.Strategy and set Reprovider.Strategy instead. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy")
// Check for deprecated Provider/Reprovider configuration after migration
// This should never happen for regular users, but is useful error for people who have Docker orchestration
// that blindly sets config keys (overriding automatic Kubo migration).
//nolint:staticcheck // intentionally checking deprecated fields
if cfg.Provider.Enabled != config.Default || !cfg.Provider.Strategy.IsDefault() || !cfg.Provider.WorkerCount.IsDefault() {
log.Fatal("Deprecated configuration detected. Manually migrate 'Provider' fields to 'Provide' and remove 'Provider' from your config. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide")
}
// Check for deprecated "flat" strategy
if cfg.Reprovider.Strategy.WithDefault("") == "flat" {
log.Error("Reprovider.Strategy='flat' is deprecated and will be removed in the next release. Please update your config to use 'all' instead.")
//nolint:staticcheck // intentionally checking deprecated fields
if !cfg.Reprovider.Interval.IsDefault() || !cfg.Reprovider.Strategy.IsDefault() {
log.Fatal("Deprecated configuration detected. Manually migrate 'Reprovider' fields to 'Provide': Reprovider.Strategy -> Provide.Strategy, Reprovider.Interval -> Provide.Interval. Remove 'Reprovider' from your config. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide")
}
// Check for deprecated "flat" strategy (should have been migrated to "all")
if cfg.Provide.Strategy.WithDefault("") == "flat" {
log.Fatal("Provide.Strategy='flat' is no longer supported. Use 'all' instead. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy")
}
if cfg.Experimental.StrategicProviding {
log.Error("Experimental.StrategicProviding was removed. Remove it from your config and set Provider.Enabled=false to remove this message. Documentation: https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#strategic-providing")
cfg.Experimental.StrategicProviding = false
cfg.Provider.Enabled = config.False
log.Fatal("Experimental.StrategicProviding was removed. Remove it from your config. Documentation: https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#strategic-providing")
}
// Check for invalid MaxWorkers=0 with SweepEnabled
if cfg.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled) &&
cfg.Provide.DHT.MaxWorkers.WithDefault(config.DefaultProvideDHTMaxWorkers) == 0 {
log.Fatal("Invalid configuration: Provide.DHT.MaxWorkers cannot be 0 when Provide.DHT.SweepEnabled=true. Set Provide.DHT.MaxWorkers to a positive value (e.g., 16) to control resource usage. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtmaxworkers")
}
if routingOption == routingOptionDelegatedKwd {
// Delegated routing is read-only mode - content providing must be disabled
if cfg.Provider.Enabled.WithDefault(config.DefaultProviderEnabled) {
log.Fatal("Routing.Type=delegated does not support content providing. Set Provider.Enabled=false in your config.")
}
if cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval) != 0 {
log.Fatal("Routing.Type=delegated does not support content providing. Set Reprovider.Interval='0' in your config.")
if cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) {
log.Fatal("Routing.Type=delegated does not support content providing. Set Provide.Enabled=false in your config.")
}
}
@ -649,7 +685,7 @@ take effect.
if !offline {
// Warn users when provide systems are disabled
if !cfg.Provider.Enabled.WithDefault(config.DefaultProviderEnabled) {
if !cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) {
fmt.Print(`
Provide and Reprovide systems are disabled due to 'Provide.Enabled=false'
@ -657,12 +693,12 @@ take effect.
If this is not intentional, call 'ipfs config profile apply announce-on' or set Provide.Enabled=true'
`)
} else if cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval) == 0 {
} else if cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) == 0 {
fmt.Print(`
Provide and Reprovide systems are disabled due to 'Reprovider.Interval=0'
Local CIDs will not be announced to Amino DHT, making them impossible to retrieve without manual peering
If this is not intentional, call 'ipfs config profile apply announce-on', or set 'Reprovider.Interval=22h'
Providing to the DHT is disabled due to 'Provide.DHT.Interval=0'
Local CIDs will not be provided to Amino DHT, making them impossible to retrieve without manual peering
If this is not intentional, call 'ipfs config profile apply announce-on', or set 'Provide.DHT.Interval=22h'
`)
}

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package kubo

View File

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package kubo

View File

@ -1,5 +1,4 @@
//go:build testrunmain
// +build testrunmain
package main_test

View File

@ -1,5 +1,4 @@
//go:build !wasm
// +build !wasm
package util
@ -64,13 +63,7 @@ func SetupInterruptHandler(ctx context.Context) (io.Closer, context.Context) {
switch count {
case 1:
fmt.Println() // Prevent un-terminated ^C character in terminal
ih.wg.Add(1)
go func() {
defer ih.wg.Done()
cancelFunc()
}()
cancelFunc()
default:
fmt.Println("Received another interrupt before graceful shutdown, terminating...")
os.Exit(-1)

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package util

View File

@ -1,5 +1,4 @@
//go:build freebsd
// +build freebsd
package util

View File

@ -1,5 +1,4 @@
//go:build !windows && !plan9
// +build !windows,!plan9
package util

View File

@ -1,5 +1,4 @@
//go:build darwin || linux || netbsd || openbsd
// +build darwin linux netbsd openbsd
package util

View File

@ -1,5 +1,4 @@
//go:build windows
// +build windows
package util

View File

@ -1,5 +1,4 @@
//go:build !plan9
// +build !plan9
package main

View File

@ -1,5 +1,4 @@
//go:build !plan9
// +build !plan9
package main
@ -13,6 +12,7 @@ import (
"syscall"
commands "github.com/ipfs/kubo/commands"
"github.com/ipfs/kubo/config"
core "github.com/ipfs/kubo/core"
coreapi "github.com/ipfs/kubo/core/coreapi"
corehttp "github.com/ipfs/kubo/core/corehttp"
@ -25,10 +25,18 @@ import (
var (
http = flag.Bool("http", false, "expose IPFS HTTP API")
repoPath = flag.String("repo", os.Getenv("IPFS_PATH"), "IPFS_PATH to use")
repoPath *string
watchPath = flag.String("path", ".", "the path to watch")
)
func init() {
ipfsPath, err := config.PathRoot()
if err != nil {
ipfsPath = os.Getenv(config.EnvDir)
}
repoPath = flag.String("repo", ipfsPath, "repo path to use")
}
func main() {
flag.Parse()

View File

@ -35,8 +35,9 @@ type Config struct {
Migration Migration
AutoConf AutoConf
Provider Provider
Reprovider Reprovider
Provide Provide // Merged Provider and Reprovider configuration
Provider Provider // Deprecated: use Provide. Will be removed in a future release.
Reprovider Reprovider // Deprecated: use Provide. Will be removed in a future release.
HTTPRetrieval HTTPRetrieval
Experimental Experiments
Plugins Plugins

View File

@ -134,14 +134,24 @@ func TestCheckKey(t *testing.T) {
t.Fatal("Foo.Bar isn't a valid key in the config")
}
err = CheckKey("Reprovider.Strategy")
err = CheckKey("Provide.Strategy")
if err != nil {
t.Fatalf("%s: %s", err, "Reprovider.Strategy is a valid key in the config")
t.Fatalf("%s: %s", err, "Provide.Strategy is a valid key in the config")
}
err = CheckKey("Provider.Foo")
err = CheckKey("Provide.DHT.MaxWorkers")
if err != nil {
t.Fatalf("%s: %s", err, "Provide.DHT.MaxWorkers is a valid key in the config")
}
err = CheckKey("Provide.DHT.Interval")
if err != nil {
t.Fatalf("%s: %s", err, "Provide.DHT.Interval is a valid key in the config")
}
err = CheckKey("Provide.Foo")
if err == nil {
t.Fatal("Provider.Foo isn't a valid key in the config")
t.Fatal("Provide.Foo isn't a valid key in the config")
}
err = CheckKey("Gateway.PublicGateways.Foo.Paths")

View File

@ -9,6 +9,7 @@ const (
DefaultDeserializedResponses = true
DefaultDisableHTMLErrors = false
DefaultExposeRoutingAPI = false
DefaultDiagnosticServiceURL = "https://check.ipfs.network"
// Gateway limit defaults from boxo
DefaultRetrievalTimeout = gateway.DefaultRetrievalTimeout
@ -98,4 +99,10 @@ type Gateway struct {
// Requests beyond this limit receive 429 Too Many Requests with Retry-After header.
// A value of 0 disables the limit.
MaxConcurrentRequests *OptionalInteger `json:",omitempty"`
// DiagnosticServiceURL is the URL for a service to diagnose CID retrievability issues.
// When the gateway returns a 504 Gateway Timeout error, an "Inspect retrievability of CID"
// button will be shown that links to this service with the CID appended as ?cid=<CID-to-diagnose>.
// Set to empty string to disable the button.
DiagnosticServiceURL *OptionalString `json:",omitempty"`
}

View File

@ -1,8 +1,14 @@
package config
import (
"fmt"
"strconv"
"strings"
"github.com/ipfs/boxo/ipld/unixfs/importer/helpers"
"github.com/ipfs/boxo/ipld/unixfs/io"
"github.com/ipfs/boxo/verifcid"
mh "github.com/multiformats/go-multihash"
)
const (
@ -43,3 +49,132 @@ type Import struct {
BatchMaxNodes OptionalInteger
BatchMaxSize OptionalInteger
}
// ValidateImportConfig validates the Import configuration according to UnixFS spec requirements.
// See: https://specs.ipfs.tech/unixfs/#hamt-structure-and-parameters
func ValidateImportConfig(cfg *Import) error {
// Validate CidVersion
if !cfg.CidVersion.IsDefault() {
cidVer := cfg.CidVersion.WithDefault(DefaultCidVersion)
if cidVer != 0 && cidVer != 1 {
return fmt.Errorf("Import.CidVersion must be 0 or 1, got %d", cidVer)
}
}
// Validate UnixFSFileMaxLinks
if !cfg.UnixFSFileMaxLinks.IsDefault() {
maxLinks := cfg.UnixFSFileMaxLinks.WithDefault(DefaultUnixFSFileMaxLinks)
if maxLinks <= 0 {
return fmt.Errorf("Import.UnixFSFileMaxLinks must be positive, got %d", maxLinks)
}
}
// Validate UnixFSDirectoryMaxLinks
if !cfg.UnixFSDirectoryMaxLinks.IsDefault() {
maxLinks := cfg.UnixFSDirectoryMaxLinks.WithDefault(DefaultUnixFSDirectoryMaxLinks)
if maxLinks < 0 {
return fmt.Errorf("Import.UnixFSDirectoryMaxLinks must be non-negative, got %d", maxLinks)
}
}
// Validate UnixFSHAMTDirectoryMaxFanout if set
if !cfg.UnixFSHAMTDirectoryMaxFanout.IsDefault() {
fanout := cfg.UnixFSHAMTDirectoryMaxFanout.WithDefault(DefaultUnixFSHAMTDirectoryMaxFanout)
// Check all requirements: fanout < 8 covers both non-positive and non-multiple of 8
// Combined with power of 2 check and max limit, this ensures valid values: 8, 16, 32, 64, 128, 256, 512, 1024
if fanout < 8 || !isPowerOfTwo(fanout) || fanout > 1024 {
return fmt.Errorf("Import.UnixFSHAMTDirectoryMaxFanout must be a positive power of 2, multiple of 8, and not exceed 1024 (got %d)", fanout)
}
}
// Validate BatchMaxNodes
if !cfg.BatchMaxNodes.IsDefault() {
maxNodes := cfg.BatchMaxNodes.WithDefault(DefaultBatchMaxNodes)
if maxNodes <= 0 {
return fmt.Errorf("Import.BatchMaxNodes must be positive, got %d", maxNodes)
}
}
// Validate BatchMaxSize
if !cfg.BatchMaxSize.IsDefault() {
maxSize := cfg.BatchMaxSize.WithDefault(DefaultBatchMaxSize)
if maxSize <= 0 {
return fmt.Errorf("Import.BatchMaxSize must be positive, got %d", maxSize)
}
}
// Validate UnixFSChunker format
if !cfg.UnixFSChunker.IsDefault() {
chunker := cfg.UnixFSChunker.WithDefault(DefaultUnixFSChunker)
if !isValidChunker(chunker) {
return fmt.Errorf("Import.UnixFSChunker invalid format: %q (expected \"size-<bytes>\", \"rabin-<min>-<avg>-<max>\", or \"buzhash\")", chunker)
}
}
// Validate HashFunction
if !cfg.HashFunction.IsDefault() {
hashFunc := cfg.HashFunction.WithDefault(DefaultHashFunction)
hashCode, ok := mh.Names[strings.ToLower(hashFunc)]
if !ok {
return fmt.Errorf("Import.HashFunction unrecognized: %q", hashFunc)
}
// Check if the hash is allowed by verifcid
if !verifcid.DefaultAllowlist.IsAllowed(hashCode) {
return fmt.Errorf("Import.HashFunction %q is not allowed for use in IPFS", hashFunc)
}
}
return nil
}
// isPowerOfTwo checks if a number is a power of 2
func isPowerOfTwo(n int64) bool {
return n > 0 && (n&(n-1)) == 0
}
// isValidChunker validates chunker format
func isValidChunker(chunker string) bool {
if chunker == "buzhash" {
return true
}
// Check for size-<bytes> format
if strings.HasPrefix(chunker, "size-") {
sizeStr := strings.TrimPrefix(chunker, "size-")
if sizeStr == "" {
return false
}
// Check if it's a valid positive integer (no negative sign allowed)
if sizeStr[0] == '-' {
return false
}
size, err := strconv.Atoi(sizeStr)
// Size must be positive (not zero)
return err == nil && size > 0
}
// Check for rabin-<min>-<avg>-<max> format
if strings.HasPrefix(chunker, "rabin-") {
parts := strings.Split(chunker, "-")
if len(parts) != 4 {
return false
}
// Parse and validate min, avg, max values
values := make([]int, 3)
for i := 0; i < 3; i++ {
val, err := strconv.Atoi(parts[i+1])
if err != nil {
return false
}
values[i] = val
}
// Validate ordering: min <= avg <= max
min, avg, max := values[0], values[1], values[2]
return min <= avg && avg <= max
}
return false
}

408
config/import_test.go Normal file
View File

@ -0,0 +1,408 @@
package config
import (
"strings"
"testing"
mh "github.com/multiformats/go-multihash"
)
func TestValidateImportConfig_HAMTFanout(t *testing.T) {
tests := []struct {
name string
fanout int64
wantErr bool
errMsg string
}{
// Valid values - powers of 2, multiples of 8, and <= 1024
{name: "valid 8", fanout: 8, wantErr: false},
{name: "valid 16", fanout: 16, wantErr: false},
{name: "valid 32", fanout: 32, wantErr: false},
{name: "valid 64", fanout: 64, wantErr: false},
{name: "valid 128", fanout: 128, wantErr: false},
{name: "valid 256", fanout: 256, wantErr: false},
{name: "valid 512", fanout: 512, wantErr: false},
{name: "valid 1024", fanout: 1024, wantErr: false},
// Invalid values - not powers of 2
{name: "invalid 7", fanout: 7, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
{name: "invalid 15", fanout: 15, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
{name: "invalid 100", fanout: 100, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
{name: "invalid 257", fanout: 257, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
{name: "invalid 1000", fanout: 1000, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
// Invalid values - powers of 2 but not multiples of 8
{name: "invalid 1", fanout: 1, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
{name: "invalid 2", fanout: 2, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
{name: "invalid 4", fanout: 4, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
// Invalid values - exceeds 1024
{name: "invalid 2048", fanout: 2048, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
{name: "invalid 4096", fanout: 4096, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
// Invalid values - negative or zero
{name: "invalid 0", fanout: 0, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
{name: "invalid -8", fanout: -8, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
{name: "invalid -256", fanout: -256, wantErr: true, errMsg: "must be a positive power of 2, multiple of 8, and not exceed 1024"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := &Import{
UnixFSHAMTDirectoryMaxFanout: *NewOptionalInteger(tt.fanout),
}
err := ValidateImportConfig(cfg)
if tt.wantErr {
if err == nil {
t.Errorf("ValidateImportConfig() expected error for fanout=%d, got nil", tt.fanout)
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
}
} else {
if err != nil {
t.Errorf("ValidateImportConfig() unexpected error for fanout=%d: %v", tt.fanout, err)
}
}
})
}
}
func TestValidateImportConfig_CidVersion(t *testing.T) {
tests := []struct {
name string
cidVer int64
wantErr bool
errMsg string
}{
{name: "valid 0", cidVer: 0, wantErr: false},
{name: "valid 1", cidVer: 1, wantErr: false},
{name: "invalid 2", cidVer: 2, wantErr: true, errMsg: "must be 0 or 1"},
{name: "invalid -1", cidVer: -1, wantErr: true, errMsg: "must be 0 or 1"},
{name: "invalid 100", cidVer: 100, wantErr: true, errMsg: "must be 0 or 1"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := &Import{
CidVersion: *NewOptionalInteger(tt.cidVer),
}
err := ValidateImportConfig(cfg)
if tt.wantErr {
if err == nil {
t.Errorf("ValidateImportConfig() expected error for cidVer=%d, got nil", tt.cidVer)
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
}
} else {
if err != nil {
t.Errorf("ValidateImportConfig() unexpected error for cidVer=%d: %v", tt.cidVer, err)
}
}
})
}
}
func TestValidateImportConfig_UnixFSFileMaxLinks(t *testing.T) {
tests := []struct {
name string
maxLinks int64
wantErr bool
errMsg string
}{
{name: "valid 1", maxLinks: 1, wantErr: false},
{name: "valid 174", maxLinks: 174, wantErr: false},
{name: "valid 1000", maxLinks: 1000, wantErr: false},
{name: "invalid 0", maxLinks: 0, wantErr: true, errMsg: "must be positive"},
{name: "invalid -1", maxLinks: -1, wantErr: true, errMsg: "must be positive"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := &Import{
UnixFSFileMaxLinks: *NewOptionalInteger(tt.maxLinks),
}
err := ValidateImportConfig(cfg)
if tt.wantErr {
if err == nil {
t.Errorf("ValidateImportConfig() expected error for maxLinks=%d, got nil", tt.maxLinks)
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
}
} else {
if err != nil {
t.Errorf("ValidateImportConfig() unexpected error for maxLinks=%d: %v", tt.maxLinks, err)
}
}
})
}
}
func TestValidateImportConfig_UnixFSDirectoryMaxLinks(t *testing.T) {
tests := []struct {
name string
maxLinks int64
wantErr bool
errMsg string
}{
{name: "valid 0", maxLinks: 0, wantErr: false}, // 0 means no limit
{name: "valid 1", maxLinks: 1, wantErr: false},
{name: "valid 1000", maxLinks: 1000, wantErr: false},
{name: "invalid -1", maxLinks: -1, wantErr: true, errMsg: "must be non-negative"},
{name: "invalid -100", maxLinks: -100, wantErr: true, errMsg: "must be non-negative"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := &Import{
UnixFSDirectoryMaxLinks: *NewOptionalInteger(tt.maxLinks),
}
err := ValidateImportConfig(cfg)
if tt.wantErr {
if err == nil {
t.Errorf("ValidateImportConfig() expected error for maxLinks=%d, got nil", tt.maxLinks)
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
}
} else {
if err != nil {
t.Errorf("ValidateImportConfig() unexpected error for maxLinks=%d: %v", tt.maxLinks, err)
}
}
})
}
}
func TestValidateImportConfig_BatchMax(t *testing.T) {
tests := []struct {
name string
maxNodes int64
maxSize int64
wantErr bool
errMsg string
}{
{name: "valid nodes 1", maxNodes: 1, maxSize: -999, wantErr: false},
{name: "valid nodes 128", maxNodes: 128, maxSize: -999, wantErr: false},
{name: "valid size 1", maxNodes: -999, maxSize: 1, wantErr: false},
{name: "valid size 20MB", maxNodes: -999, maxSize: 20 << 20, wantErr: false},
{name: "invalid nodes 0", maxNodes: 0, maxSize: -999, wantErr: true, errMsg: "BatchMaxNodes must be positive"},
{name: "invalid nodes -1", maxNodes: -1, maxSize: -999, wantErr: true, errMsg: "BatchMaxNodes must be positive"},
{name: "invalid size 0", maxNodes: -999, maxSize: 0, wantErr: true, errMsg: "BatchMaxSize must be positive"},
{name: "invalid size -1", maxNodes: -999, maxSize: -1, wantErr: true, errMsg: "BatchMaxSize must be positive"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := &Import{}
if tt.maxNodes != -999 {
cfg.BatchMaxNodes = *NewOptionalInteger(tt.maxNodes)
}
if tt.maxSize != -999 {
cfg.BatchMaxSize = *NewOptionalInteger(tt.maxSize)
}
err := ValidateImportConfig(cfg)
if tt.wantErr {
if err == nil {
t.Errorf("ValidateImportConfig() expected error, got nil")
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
}
} else {
if err != nil {
t.Errorf("ValidateImportConfig() unexpected error: %v", err)
}
}
})
}
}
func TestValidateImportConfig_UnixFSChunker(t *testing.T) {
tests := []struct {
name string
chunker string
wantErr bool
errMsg string
}{
{name: "valid size-262144", chunker: "size-262144", wantErr: false},
{name: "valid size-1", chunker: "size-1", wantErr: false},
{name: "valid size-1048576", chunker: "size-1048576", wantErr: false},
{name: "valid rabin", chunker: "rabin-128-256-512", wantErr: false},
{name: "valid rabin min", chunker: "rabin-16-32-64", wantErr: false},
{name: "valid buzhash", chunker: "buzhash", wantErr: false},
{name: "invalid size-", chunker: "size-", wantErr: true, errMsg: "invalid format"},
{name: "invalid size-abc", chunker: "size-abc", wantErr: true, errMsg: "invalid format"},
{name: "invalid rabin-", chunker: "rabin-", wantErr: true, errMsg: "invalid format"},
{name: "invalid rabin-128", chunker: "rabin-128", wantErr: true, errMsg: "invalid format"},
{name: "invalid rabin-128-256", chunker: "rabin-128-256", wantErr: true, errMsg: "invalid format"},
{name: "invalid rabin-a-b-c", chunker: "rabin-a-b-c", wantErr: true, errMsg: "invalid format"},
{name: "invalid unknown", chunker: "unknown", wantErr: true, errMsg: "invalid format"},
{name: "invalid empty", chunker: "", wantErr: true, errMsg: "invalid format"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := &Import{
UnixFSChunker: *NewOptionalString(tt.chunker),
}
err := ValidateImportConfig(cfg)
if tt.wantErr {
if err == nil {
t.Errorf("ValidateImportConfig() expected error for chunker=%s, got nil", tt.chunker)
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
}
} else {
if err != nil {
t.Errorf("ValidateImportConfig() unexpected error for chunker=%s: %v", tt.chunker, err)
}
}
})
}
}
func TestValidateImportConfig_HashFunction(t *testing.T) {
tests := []struct {
name string
hashFunc string
wantErr bool
errMsg string
}{
{name: "valid sha2-256", hashFunc: "sha2-256", wantErr: false},
{name: "valid sha2-512", hashFunc: "sha2-512", wantErr: false},
{name: "valid sha3-256", hashFunc: "sha3-256", wantErr: false},
{name: "valid blake2b-256", hashFunc: "blake2b-256", wantErr: false},
{name: "valid blake3", hashFunc: "blake3", wantErr: false},
{name: "invalid unknown", hashFunc: "unknown-hash", wantErr: true, errMsg: "unrecognized"},
{name: "invalid empty", hashFunc: "", wantErr: true, errMsg: "unrecognized"},
}
// Check for hashes that exist but are not allowed
// MD5 should exist but not be allowed
if code, ok := mh.Names["md5"]; ok {
tests = append(tests, struct {
name string
hashFunc string
wantErr bool
errMsg string
}{name: "md5 not allowed", hashFunc: "md5", wantErr: true, errMsg: "not allowed"})
_ = code // use the variable
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := &Import{
HashFunction: *NewOptionalString(tt.hashFunc),
}
err := ValidateImportConfig(cfg)
if tt.wantErr {
if err == nil {
t.Errorf("ValidateImportConfig() expected error for hashFunc=%s, got nil", tt.hashFunc)
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
t.Errorf("ValidateImportConfig() error = %v, want error containing %q", err, tt.errMsg)
}
} else {
if err != nil {
t.Errorf("ValidateImportConfig() unexpected error for hashFunc=%s: %v", tt.hashFunc, err)
}
}
})
}
}
func TestValidateImportConfig_DefaultValue(t *testing.T) {
// Test that default (unset) value doesn't trigger validation
cfg := &Import{}
err := ValidateImportConfig(cfg)
if err != nil {
t.Errorf("ValidateImportConfig() unexpected error for default config: %v", err)
}
}
func TestIsValidChunker(t *testing.T) {
tests := []struct {
chunker string
want bool
}{
{"buzhash", true},
{"size-262144", true},
{"size-1", true},
{"size-0", false}, // 0 is not valid - must be positive
{"size-9999999", true},
{"rabin-128-256-512", true},
{"rabin-16-32-64", true},
{"rabin-1-2-3", true},
{"rabin-512-256-128", false}, // Invalid ordering: min > avg > max
{"rabin-256-128-512", false}, // Invalid ordering: min > avg
{"rabin-128-512-256", false}, // Invalid ordering: avg > max
{"", false},
{"size-", false},
{"size-abc", false},
{"size--1", false},
{"rabin-", false},
{"rabin-128", false},
{"rabin-128-256", false},
{"rabin-128-256-512-1024", false},
{"rabin-a-b-c", false},
{"unknown", false},
{"buzzhash", false}, // typo
}
for _, tt := range tests {
t.Run(tt.chunker, func(t *testing.T) {
if got := isValidChunker(tt.chunker); got != tt.want {
t.Errorf("isValidChunker(%q) = %v, want %v", tt.chunker, got, tt.want)
}
})
}
}
func TestIsPowerOfTwo(t *testing.T) {
tests := []struct {
n int64
want bool
}{
{0, false},
{1, true},
{2, true},
{3, false},
{4, true},
{5, false},
{6, false},
{7, false},
{8, true},
{16, true},
{32, true},
{64, true},
{100, false},
{128, true},
{256, true},
{512, true},
{1024, true},
{2048, true},
{-1, false},
{-8, false},
}
for _, tt := range tests {
t.Run("", func(t *testing.T) {
if got := isPowerOfTwo(tt.n); got != tt.want {
t.Errorf("isPowerOfTwo(%d) = %v, want %v", tt.n, got, tt.want)
}
})
}
}

View File

@ -60,10 +60,6 @@ func InitWithIdentity(identity Identity) (*Config, error) {
NoFetch: false,
HTTPHeaders: map[string][]string{},
},
Reprovider: Reprovider{
Interval: nil,
Strategy: nil,
},
Pinning: Pinning{
RemoteServices: map[string]RemotePinningService{},
},

View File

@ -1,11 +1,23 @@
package config
const (
// DefaultMFSNoFlushLimit is the default limit for consecutive unflushed MFS operations
DefaultMFSNoFlushLimit = 256
)
type Internal struct {
// All marked as omitempty since we are expecting to make changes to all subcomponents of Internal
Bitswap *InternalBitswap `json:",omitempty"`
UnixFSShardingSizeThreshold *OptionalString `json:",omitempty"` // moved to Import.UnixFSHAMTDirectorySizeThreshold
Libp2pForceReachability *OptionalString `json:",omitempty"`
BackupBootstrapInterval *OptionalDuration `json:",omitempty"`
// MFSNoFlushLimit controls the maximum number of consecutive
// MFS operations allowed with --flush=false before requiring a manual flush.
// This prevents unbounded memory growth and ensures data consistency.
// Set to 0 to disable limiting (old behavior, may cause high memory usage)
// This is an EXPERIMENTAL feature and may change or be removed in future releases.
// See https://github.com/ipfs/kubo/issues/10842
MFSNoFlushLimit *OptionalInteger `json:",omitempty"`
}
type InternalBitswap struct {

View File

@ -275,7 +275,7 @@ fetching may be degraded.
},
},
"announce-off": {
Description: `Disables Provide and Reprovide systems (announcing to Amino DHT).
Description: `Disables Provide system (announcing to Amino DHT).
USE WITH CAUTION:
The main use case for this is setups with manual Peering.Peers config.
@ -284,16 +284,16 @@ fetching may be degraded.
one hosting it, and other peers are not already connected to it.
`,
Transform: func(c *Config) error {
c.Provider.Enabled = False
c.Reprovider.Interval = NewOptionalDuration(0) // 0 disables periodic reprovide
c.Provide.Enabled = False
c.Provide.DHT.Interval = NewOptionalDuration(0) // 0 disables periodic reprovide
return nil
},
},
"announce-on": {
Description: `Re-enables Provide and Reprovide systems (reverts announce-off profile).`,
Description: `Re-enables Provide system (reverts announce-off profile).`,
Transform: func(c *Config) error {
c.Provider.Enabled = True
c.Reprovider.Interval = NewOptionalDuration(DefaultReproviderInterval) // have to apply explicit default because nil would be ignored
c.Provide.Enabled = True
c.Provide.DHT.Interval = NewOptionalDuration(DefaultProvideDHTInterval) // have to apply explicit default because nil would be ignored
return nil
},
},

170
config/provide.go Normal file
View File

@ -0,0 +1,170 @@
package config
import (
"fmt"
"strings"
"time"
"github.com/libp2p/go-libp2p-kad-dht/amino"
)
const (
DefaultProvideEnabled = true
DefaultProvideStrategy = "all"
// DHT provider defaults
DefaultProvideDHTInterval = 22 * time.Hour // https://github.com/ipfs/kubo/pull/9326
DefaultProvideDHTMaxWorkers = 16 // Unified default for both sweep and legacy providers
DefaultProvideDHTSweepEnabled = false
DefaultProvideDHTDedicatedPeriodicWorkers = 2
DefaultProvideDHTDedicatedBurstWorkers = 1
DefaultProvideDHTMaxProvideConnsPerWorker = 16
DefaultProvideDHTKeystoreBatchSize = 1 << 14 // ~544 KiB per batch (1 multihash = 34 bytes)
DefaultProvideDHTOfflineDelay = 2 * time.Hour
)
type ProvideStrategy int
const (
ProvideStrategyAll ProvideStrategy = 1 << iota
ProvideStrategyPinned
ProvideStrategyRoots
ProvideStrategyMFS
)
// Provide configures both immediate CID announcements (provide operations) for new content
// and periodic re-announcements of existing CIDs (reprovide operations).
// This section combines the functionality previously split between Provider and Reprovider.
type Provide struct {
// Enabled controls whether both provide and reprovide systems are enabled.
// When disabled, the node will not announce any content to the routing system.
Enabled Flag `json:",omitempty"`
// Strategy determines which CIDs are announced to the routing system.
// Default: DefaultProvideStrategy
Strategy *OptionalString `json:",omitempty"`
// DHT configures DHT-specific provide and reprovide settings.
DHT ProvideDHT
}
// ProvideDHT configures DHT provider settings for both immediate announcements
// and periodic reprovides.
type ProvideDHT struct {
// Interval sets the time between rounds of reproviding local content
// to the routing system. Set to "0" to disable content reproviding.
// Default: DefaultProvideDHTInterval
Interval *OptionalDuration `json:",omitempty"`
// MaxWorkers sets the maximum number of concurrent workers for provide operations.
// When SweepEnabled is false: controls NEW CID announcements only.
// When SweepEnabled is true: controls total worker pool for all operations.
// Default: DefaultProvideDHTMaxWorkers
MaxWorkers *OptionalInteger `json:",omitempty"`
// SweepEnabled activates the sweeping reprovider system which spreads
// reprovide operations over time. This will become the default in a future release.
// Default: DefaultProvideDHTSweepEnabled
SweepEnabled Flag `json:",omitempty"`
// DedicatedPeriodicWorkers sets workers dedicated to periodic reprovides (sweep mode only).
// Default: DefaultProvideDHTDedicatedPeriodicWorkers
DedicatedPeriodicWorkers *OptionalInteger `json:",omitempty"`
// DedicatedBurstWorkers sets workers dedicated to burst provides (sweep mode only).
// Default: DefaultProvideDHTDedicatedBurstWorkers
DedicatedBurstWorkers *OptionalInteger `json:",omitempty"`
// MaxProvideConnsPerWorker sets concurrent connections per worker for sending provider records (sweep mode only).
// Default: DefaultProvideDHTMaxProvideConnsPerWorker
MaxProvideConnsPerWorker *OptionalInteger `json:",omitempty"`
// KeystoreBatchSize sets the batch size for keystore operations during reprovide refresh (sweep mode only).
// Default: DefaultProvideDHTKeystoreBatchSize
KeystoreBatchSize *OptionalInteger `json:",omitempty"`
// OfflineDelay sets the delay after which the provider switches from Disconnected to Offline state (sweep mode only).
// Default: DefaultProvideDHTOfflineDelay
OfflineDelay *OptionalDuration `json:",omitempty"`
}
func ParseProvideStrategy(s string) ProvideStrategy {
var strategy ProvideStrategy
for _, part := range strings.Split(s, "+") {
switch part {
case "all", "flat", "": // special case, does not mix with others ("flat" is deprecated, maps to "all")
return ProvideStrategyAll
case "pinned":
strategy |= ProvideStrategyPinned
case "roots":
strategy |= ProvideStrategyRoots
case "mfs":
strategy |= ProvideStrategyMFS
}
}
return strategy
}
// ValidateProvideConfig validates the Provide configuration according to DHT requirements.
func ValidateProvideConfig(cfg *Provide) error {
// Validate Provide.DHT.Interval
if !cfg.DHT.Interval.IsDefault() {
interval := cfg.DHT.Interval.WithDefault(DefaultProvideDHTInterval)
if interval > amino.DefaultProvideValidity {
return fmt.Errorf("Provide.DHT.Interval (%v) must be less than or equal to DHT provider record validity (%v)", interval, amino.DefaultProvideValidity)
}
if interval < 0 {
return fmt.Errorf("Provide.DHT.Interval must be non-negative, got %v", interval)
}
}
// Validate MaxWorkers
if !cfg.DHT.MaxWorkers.IsDefault() {
maxWorkers := cfg.DHT.MaxWorkers.WithDefault(DefaultProvideDHTMaxWorkers)
if maxWorkers <= 0 {
return fmt.Errorf("Provide.DHT.MaxWorkers must be positive, got %d", maxWorkers)
}
}
// Validate DedicatedPeriodicWorkers
if !cfg.DHT.DedicatedPeriodicWorkers.IsDefault() {
workers := cfg.DHT.DedicatedPeriodicWorkers.WithDefault(DefaultProvideDHTDedicatedPeriodicWorkers)
if workers < 0 {
return fmt.Errorf("Provide.DHT.DedicatedPeriodicWorkers must be non-negative, got %d", workers)
}
}
// Validate DedicatedBurstWorkers
if !cfg.DHT.DedicatedBurstWorkers.IsDefault() {
workers := cfg.DHT.DedicatedBurstWorkers.WithDefault(DefaultProvideDHTDedicatedBurstWorkers)
if workers < 0 {
return fmt.Errorf("Provide.DHT.DedicatedBurstWorkers must be non-negative, got %d", workers)
}
}
// Validate MaxProvideConnsPerWorker
if !cfg.DHT.MaxProvideConnsPerWorker.IsDefault() {
conns := cfg.DHT.MaxProvideConnsPerWorker.WithDefault(DefaultProvideDHTMaxProvideConnsPerWorker)
if conns <= 0 {
return fmt.Errorf("Provide.DHT.MaxProvideConnsPerWorker must be positive, got %d", conns)
}
}
// Validate KeystoreBatchSize
if !cfg.DHT.KeystoreBatchSize.IsDefault() {
batchSize := cfg.DHT.KeystoreBatchSize.WithDefault(DefaultProvideDHTKeystoreBatchSize)
if batchSize <= 0 {
return fmt.Errorf("Provide.DHT.KeystoreBatchSize must be positive, got %d", batchSize)
}
}
// Validate OfflineDelay
if !cfg.DHT.OfflineDelay.IsDefault() {
delay := cfg.DHT.OfflineDelay.WithDefault(DefaultProvideDHTOfflineDelay)
if delay < 0 {
return fmt.Errorf("Provide.DHT.OfflineDelay must be non-negative, got %v", delay)
}
}
return nil
}

107
config/provide_test.go Normal file
View File

@ -0,0 +1,107 @@
package config
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestParseProvideStrategy(t *testing.T) {
tests := []struct {
input string
expect ProvideStrategy
}{
{"all", ProvideStrategyAll},
{"pinned", ProvideStrategyPinned},
{"mfs", ProvideStrategyMFS},
{"pinned+mfs", ProvideStrategyPinned | ProvideStrategyMFS},
{"invalid", 0},
{"all+invalid", ProvideStrategyAll},
{"", ProvideStrategyAll},
{"flat", ProvideStrategyAll}, // deprecated, maps to "all"
{"flat+all", ProvideStrategyAll},
}
for _, tt := range tests {
result := ParseProvideStrategy(tt.input)
if result != tt.expect {
t.Errorf("ParseProvideStrategy(%q) = %d, want %d", tt.input, result, tt.expect)
}
}
}
func TestValidateProvideConfig_Interval(t *testing.T) {
tests := []struct {
name string
interval time.Duration
wantErr bool
errMsg string
}{
{"valid default (22h)", 22 * time.Hour, false, ""},
{"valid max (48h)", 48 * time.Hour, false, ""},
{"valid small (1h)", 1 * time.Hour, false, ""},
{"valid zero (disabled)", 0, false, ""},
{"invalid over limit (49h)", 49 * time.Hour, true, "must be less than or equal to DHT provider record validity"},
{"invalid over limit (72h)", 72 * time.Hour, true, "must be less than or equal to DHT provider record validity"},
{"invalid negative", -1 * time.Hour, true, "must be non-negative"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := &Provide{
DHT: ProvideDHT{
Interval: NewOptionalDuration(tt.interval),
},
}
err := ValidateProvideConfig(cfg)
if tt.wantErr {
require.Error(t, err, "expected error for interval=%v", tt.interval)
if tt.errMsg != "" {
assert.Contains(t, err.Error(), tt.errMsg, "error message mismatch")
}
} else {
require.NoError(t, err, "unexpected error for interval=%v", tt.interval)
}
})
}
}
func TestValidateProvideConfig_MaxWorkers(t *testing.T) {
tests := []struct {
name string
maxWorkers int64
wantErr bool
errMsg string
}{
{"valid default", 16, false, ""},
{"valid high", 100, false, ""},
{"valid low", 1, false, ""},
{"invalid zero", 0, true, "must be positive"},
{"invalid negative", -1, true, "must be positive"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := &Provide{
DHT: ProvideDHT{
MaxWorkers: NewOptionalInteger(tt.maxWorkers),
},
}
err := ValidateProvideConfig(cfg)
if tt.wantErr {
require.Error(t, err, "expected error for maxWorkers=%d", tt.maxWorkers)
if tt.errMsg != "" {
assert.Contains(t, err.Error(), tt.errMsg, "error message mismatch")
}
} else {
require.NoError(t, err, "unexpected error for maxWorkers=%d", tt.maxWorkers)
}
})
}
}

View File

@ -1,14 +1,16 @@
package config
const (
DefaultProviderEnabled = true
DefaultProviderWorkerCount = 16
)
// Provider configuration describes how NEW CIDs are announced the moment they are created.
// For periodical reprovide configuration, see Reprovider.*
// For periodical reprovide configuration, see Provide.*
//
// Deprecated: use Provide instead. This will be removed in a future release.
type Provider struct {
Enabled Flag `json:",omitempty"`
Strategy *OptionalString `json:",omitempty"` // Unused, you are likely looking for Reprovider.Strategy instead
WorkerCount *OptionalInteger `json:",omitempty"` // Number of concurrent provides allowed, 0 means unlimited
// Deprecated: use Provide.Enabled instead. This will be removed in a future release.
Enabled Flag `json:",omitempty"`
// Deprecated: unused, you are likely looking for Provide.Strategy instead. This will be removed in a future release.
Strategy *OptionalString `json:",omitempty"`
// Deprecated: use Provide.DHT.MaxWorkers instead. This will be removed in a future release.
WorkerCount *OptionalInteger `json:",omitempty"`
}

View File

@ -1,44 +1,13 @@
package config
import (
"strings"
"time"
)
const (
DefaultReproviderInterval = time.Hour * 22 // https://github.com/ipfs/kubo/pull/9326
DefaultReproviderStrategy = "all"
)
type ReproviderStrategy int
const (
ReproviderStrategyAll ReproviderStrategy = 1 << iota
ReproviderStrategyPinned
ReproviderStrategyRoots
ReproviderStrategyMFS
)
// Reprovider configuration describes how CID from local datastore are periodically re-announced to routing systems.
// For provide behavior of ad-hoc or newly created CIDs and their first-time announcement, see Provider.*
// For provide behavior of ad-hoc or newly created CIDs and their first-time announcement, see Provide.*
//
// Deprecated: use Provide instead. This will be removed in a future release.
type Reprovider struct {
Interval *OptionalDuration `json:",omitempty"` // Time period to reprovide locally stored objects to the network
Strategy *OptionalString `json:",omitempty"` // Which keys to announce
}
// Deprecated: use Provide.DHT.Interval instead. This will be removed in a future release.
Interval *OptionalDuration `json:",omitempty"`
func ParseReproviderStrategy(s string) ReproviderStrategy {
var strategy ReproviderStrategy
for _, part := range strings.Split(s, "+") {
switch part {
case "all", "flat", "": // special case, does not mix with others ("flat" is deprecated, maps to "all")
return ReproviderStrategyAll
case "pinned":
strategy |= ReproviderStrategyPinned
case "roots":
strategy |= ReproviderStrategyRoots
case "mfs":
strategy |= ReproviderStrategyMFS
}
}
return strategy
// Deprecated: use Provide.Strategy instead. This will be removed in a future release.
Strategy *OptionalString `json:",omitempty"`
}

View File

@ -1,27 +0,0 @@
package config
import "testing"
func TestParseReproviderStrategy(t *testing.T) {
tests := []struct {
input string
expect ReproviderStrategy
}{
{"all", ReproviderStrategyAll},
{"pinned", ReproviderStrategyPinned},
{"mfs", ReproviderStrategyMFS},
{"pinned+mfs", ReproviderStrategyPinned | ReproviderStrategyMFS},
{"invalid", 0},
{"all+invalid", ReproviderStrategyAll},
{"", ReproviderStrategyAll},
{"flat", ReproviderStrategyAll}, // deprecated, maps to "all"
{"flat+all", ReproviderStrategyAll},
}
for _, tt := range tests {
result := ParseReproviderStrategy(tt.input)
if result != tt.expect {
t.Errorf("ParseReproviderStrategy(%q) = %d, want %d", tt.input, result, tt.expect)
}
}
}

View File

@ -214,3 +214,57 @@ func getEnvOrDefault(key string, defaultValue []string) []string {
}
return defaultValue
}
// HasHTTPProviderConfigured checks if the node is configured to use HTTP routers
// for providing content announcements. This is used when determining if the node
// can provide content even when not connected to libp2p peers.
//
// Note: Right now we only support delegated HTTP content providing if Routing.Type=custom
// and Routing.Routers are configured according to:
// https://github.com/ipfs/kubo/blob/master/docs/delegated-routing.md#configuration-file-example
//
// This uses the `ProvideBitswap` request type that is not documented anywhere,
// because we hoped something like IPIP-378 (https://github.com/ipfs/specs/pull/378)
// would get finalized and we'd switch to that. It never happened due to politics,
// and now we are stuck with ProvideBitswap being the only API that works.
// Some people have reverse engineered it (example:
// https://discuss.ipfs.tech/t/only-peers-found-from-dht-seem-to-be-getting-used-as-relays-so-cant-use-http-routers/19545/9)
// and use it, so what we do here is the bare minimum to ensure their use case works
// using this old API until something better is available.
func (c *Config) HasHTTPProviderConfigured() bool {
if len(c.Routing.Routers) == 0 {
// No "custom" routers
return false
}
method, ok := c.Routing.Methods[MethodNameProvide]
if !ok {
// No provide method configured
return false
}
return c.routerSupportsHTTPProviding(method.RouterName)
}
// routerSupportsHTTPProviding checks if the supplied custom router is or
// includes an HTTP-based router.
func (c *Config) routerSupportsHTTPProviding(routerName string) bool {
rp, ok := c.Routing.Routers[routerName]
if !ok {
// Router configured for providing doesn't exist
return false
}
switch rp.Type {
case RouterTypeHTTP:
return true
case RouterTypeParallel, RouterTypeSequential:
// Check if any child router supports HTTP
if children, ok := rp.Parameters.(*ComposableRouterParams); ok {
for _, childRouter := range children.Routers {
if c.routerSupportsHTTPProviding(childRouter.RouterName) {
return true
}
}
}
}
return false
}

View File

@ -11,11 +11,13 @@ import (
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/ipfs/kubo/core/commands/cmdutils"
"github.com/cheggaaa/pb"
"github.com/ipfs/boxo/files"
mfs "github.com/ipfs/boxo/mfs"
"github.com/ipfs/boxo/path"
"github.com/ipfs/boxo/verifcid"
cmds "github.com/ipfs/go-ipfs-cmds"
ipld "github.com/ipfs/go-ipld-format"
coreiface "github.com/ipfs/kubo/core/coreiface"
@ -81,7 +83,7 @@ to form the IPFS MerkleDAG. Learn more: https://docs.ipfs.tech/concepts/merkle-d
If the daemon is not running, it will just add locally to the repo at $IPFS_PATH.
If the daemon is started later, it will be advertised after a few
seconds when the reprovider runs.
seconds when the provide system runs.
BASIC EXAMPLES:
@ -203,7 +205,7 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
cmds.IntOption(maxHAMTFanoutOptionName, "Limit the maximum number of links of a UnixFS HAMT directory node to this (power of 2, multiple of 8). WARNING: experimental, Import.UnixFSHAMTDirectorySizeThreshold is safer. Default: Import.UnixFSHAMTDirectoryMaxFanout"),
// Experimental Features
cmds.BoolOption(inlineOptionName, "Inline small blocks into CIDs. WARNING: experimental"),
cmds.IntOption(inlineLimitOptionName, "Maximum block size to inline. WARNING: experimental").WithDefault(32),
cmds.IntOption(inlineLimitOptionName, fmt.Sprintf("Maximum block size to inline. Maximum: %d bytes. WARNING: experimental", verifcid.DefaultMaxIdentityDigestSize)).WithDefault(32),
cmds.BoolOption(noCopyOptionName, "Add the file using filestore. Implies raw-leaves. WARNING: experimental"),
cmds.BoolOption(fstoreCacheOptionName, "Check the filestore for pre-existing blocks. WARNING: experimental"),
cmds.BoolOption(preserveModeOptionName, "Apply existing POSIX permissions to created UnixFS entries. WARNING: experimental, forces dag-pb for root block, disables raw-leaves"),
@ -262,6 +264,19 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
hashFunStr, _ := req.Options[hashOptionName].(string)
inline, _ := req.Options[inlineOptionName].(bool)
inlineLimit, _ := req.Options[inlineLimitOptionName].(int)
// Validate inline-limit doesn't exceed the maximum identity digest size
if inline && inlineLimit > verifcid.DefaultMaxIdentityDigestSize {
return fmt.Errorf("inline-limit %d exceeds maximum allowed size of %d bytes", inlineLimit, verifcid.DefaultMaxIdentityDigestSize)
}
// Validate pin name
if pinNameSet {
if err := cmdutils.ValidatePinName(pinName); err != nil {
return err
}
}
toFilesStr, toFilesSet := req.Options[toFilesOptionName].(string)
preserveMode, _ := req.Options[preserveModeOptionName].(bool)
preserveMtime, _ := req.Options[preserveMtimeOptionName].(bool)

View File

@ -0,0 +1,50 @@
package cmdutils
import (
"strings"
"unicode"
)
const maxRunes = 128
// CleanAndTrim sanitizes untrusted strings from remote peers to prevent display issues
// across web UIs, terminals, and logs. It replaces control characters, format characters,
// and surrogates with U+FFFD (<28>), then enforces a maximum length of 128 runes.
//
// This follows the libp2p identify specification and RFC 9839 guidance:
// replacing problematic code points is preferred over deletion as deletion
// is a known security risk.
func CleanAndTrim(str string) string {
// Build sanitized result
var result []rune
for _, r := range str {
// Replace control characters (Cc) with U+FFFD - prevents terminal escapes, CR, LF, etc.
if unicode.Is(unicode.Cc, r) {
result = append(result, '\uFFFD')
continue
}
// Replace format characters (Cf) with U+FFFD - prevents RTL/LTR overrides, zero-width chars
if unicode.Is(unicode.Cf, r) {
result = append(result, '\uFFFD')
continue
}
// Replace surrogate characters (Cs) with U+FFFD - invalid in UTF-8
if unicode.Is(unicode.Cs, r) {
result = append(result, '\uFFFD')
continue
}
// Private use characters (Co) are preserved per spec
result = append(result, r)
}
// Convert to string and trim whitespace
sanitized := strings.TrimSpace(string(result))
// Enforce maximum length (128 runes, not bytes)
runes := []rune(sanitized)
if len(runes) > maxRunes {
return string(runes[:maxRunes])
}
return sanitized
}

View File

@ -13,6 +13,7 @@ import (
const (
AllowBigBlockOptionName = "allow-big-block"
SoftBlockLimit = 1024 * 1024 // https://github.com/ipfs/kubo/issues/7421#issuecomment-910833499
MaxPinNameBytes = 255 // Maximum number of bytes allowed for a pin name
)
var AllowBigBlockOption cmds.Option
@ -50,6 +51,21 @@ func CheckBlockSize(req *cmds.Request, size uint64) error {
return nil
}
// ValidatePinName validates that a pin name does not exceed the maximum allowed byte length.
// Returns an error if the name exceeds MaxPinNameBytes (255 bytes).
func ValidatePinName(name string) error {
if name == "" {
// Empty names are allowed
return nil
}
nameBytes := len([]byte(name))
if nameBytes > MaxPinNameBytes {
return fmt.Errorf("pin name is %d bytes (max %d bytes)", nameBytes, MaxPinNameBytes)
}
return nil
}
// PathOrCidPath returns a path.Path built from the argument. It keeps the old
// behaviour by building a path from a CID string.
func PathOrCidPath(str string) (path.Path, error) {

View File

@ -11,6 +11,8 @@ import (
"slices"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
humanize "github.com/dustin/go-humanize"
@ -35,6 +37,43 @@ import (
var flog = logging.Logger("cmds/files")
// Global counter for unflushed MFS operations
var noFlushOperationCounter atomic.Int64
// Cached limit value (read once on first use)
var (
noFlushLimit int64
noFlushLimitInit sync.Once
)
// updateNoFlushCounter manages the counter for unflushed operations
func updateNoFlushCounter(nd *core.IpfsNode, flush bool) error {
if flush {
// Reset counter when flushing
noFlushOperationCounter.Store(0)
return nil
}
// Cache the limit on first use (config doesn't change at runtime)
noFlushLimitInit.Do(func() {
noFlushLimit = int64(config.DefaultMFSNoFlushLimit)
if cfg, err := nd.Repo.Config(); err == nil && cfg.Internal.MFSNoFlushLimit != nil {
noFlushLimit = cfg.Internal.MFSNoFlushLimit.WithDefault(int64(config.DefaultMFSNoFlushLimit))
}
})
// Check if limit reached
if noFlushLimit > 0 && noFlushOperationCounter.Load() >= noFlushLimit {
return fmt.Errorf("reached limit of %d unflushed MFS operations. "+
"To resolve: 1) run 'ipfs files flush' to persist changes, "+
"2) use --flush=true (default), or "+
"3) increase Internal.MFSNoFlushLimit in config", noFlushLimit)
}
noFlushOperationCounter.Add(1)
return nil
}
// FilesCmd is the 'ipfs files' command
var FilesCmd = &cmds.Command{
Helptext: cmds.HelpText{
@ -64,13 +103,18 @@ defaults to true and ensures two things: 1) that the changes are reflected in
the full MFS structure (updated CIDs) 2) that the parent-folder's cache is
cleared. Use caution when setting this flag to false. It will improve
performance for large numbers of file operations, but it does so at the cost
of consistency guarantees and unbound growth of the directories' in-memory
caches. If the daemon is unexpectedly killed before running 'ipfs files
flush' on the files in question, then data may be lost. This also applies to
run 'ipfs repo gc' concurrently with '--flush=false' operations. We recommend
flushing paths regularly with 'ipfs files flush', specially the folders on
which many write operations are happening, as a way to clear the directory
cache, free memory and speed up read operations.`,
of consistency guarantees. If the daemon is unexpectedly killed before running
'ipfs files flush' on the files in question, then data may be lost. This also
applies to run 'ipfs repo gc' concurrently with '--flush=false' operations.
When using '--flush=false', operations are limited to prevent unbounded
memory growth. After reaching Internal.MFSNoFlushLimit operations, further
operations will fail until you run 'ipfs files flush'. This explicit failure
(instead of auto-flushing) ensures you maintain control over when data is
persisted, preventing unexpected partial states and making batch operations
predictable. We recommend flushing paths regularly, especially folders with
many write operations, to clear caches, free memory, and maintain good
performance.`,
},
Options: []cmds.Option{
cmds.BoolOption(filesFlushOptionName, "f", "Flush target and ancestors after write.").WithDefault(true),
@ -513,12 +557,16 @@ being GC'ed.
}
}
flush, _ := req.Options[filesFlushOptionName].(bool)
if err := updateNoFlushCounter(nd, flush); err != nil {
return err
}
err = mfs.PutNode(nd.FilesRoot, dst, node)
if err != nil {
return fmt.Errorf("cp: cannot put node in path %s: %s", dst, err)
}
flush, _ := req.Options[filesFlushOptionName].(bool)
if flush {
if _, err := mfs.FlushPath(req.Context, nd.FilesRoot, dst); err != nil {
return fmt.Errorf("cp: cannot flush the created file %s: %s", dst, err)
@ -844,6 +892,10 @@ Example:
flush, _ := req.Options[filesFlushOptionName].(bool)
if err := updateNoFlushCounter(nd, flush); err != nil {
return err
}
src, err := checkPath(req.Arguments[0])
if err != nil {
return err
@ -981,6 +1033,10 @@ See '--to-files' in 'ipfs add --help' for more information.
flush, _ := req.Options[filesFlushOptionName].(bool)
rawLeaves, rawLeavesDef := req.Options[filesRawLeavesOptionName].(bool)
if err := updateNoFlushCounter(nd, flush); err != nil {
return err
}
if !rawLeavesDef && cfg.Import.UnixFSRawLeaves != config.Default {
rawLeavesDef = true
rawLeaves = cfg.Import.UnixFSRawLeaves.WithDefault(config.DefaultUnixFSRawLeaves)
@ -1109,6 +1165,10 @@ Examples:
flush, _ := req.Options[filesFlushOptionName].(bool)
if err := updateNoFlushCounter(n, flush); err != nil {
return err
}
prefix, err := getPrefix(req)
if err != nil {
return err
@ -1161,6 +1221,9 @@ are run with the '--flush=false'.
return err
}
// Reset the counter (flush always resets)
noFlushOperationCounter.Store(0)
return cmds.EmitOnce(res, &flushRes{enc.Encode(n.Cid())})
},
Type: flushRes{},
@ -1258,6 +1321,13 @@ Remove files or directories.
cmds.BoolOption(forceOptionName, "Forcibly remove target at path; implies -r for directories"),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
// Check if user explicitly set --flush=false
if flushOpt, ok := req.Options[filesFlushOptionName]; ok {
if flush, ok := flushOpt.(bool); ok && !flush {
return fmt.Errorf("files rm always flushes for safety. The --flush flag cannot be set to false for this command")
}
}
nd, err := cmdenv.GetNode(env)
if err != nil {
return err

View File

@ -12,6 +12,7 @@ import (
version "github.com/ipfs/kubo"
"github.com/ipfs/kubo/core"
"github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/ipfs/kubo/core/commands/cmdutils"
cmds "github.com/ipfs/go-ipfs-cmds"
ke "github.com/ipfs/kubo/core/commands/keyencode"
@ -173,12 +174,14 @@ func printPeer(keyEnc ke.KeyEncoder, ps pstore.Peerstore, p peer.ID) (interface{
slices.Sort(info.Addresses)
protocols, _ := ps.GetProtocols(p) // don't care about errors here.
info.Protocols = append(info.Protocols, protocols...)
for _, proto := range protocols {
info.Protocols = append(info.Protocols, protocol.ID(cmdutils.CleanAndTrim(string(proto))))
}
slices.Sort(info.Protocols)
if v, err := ps.Get(p, "AgentVersion"); err == nil {
if vs, ok := v.(string); ok {
info.AgentVersion = vs
info.AgentVersion = cmdutils.CleanAndTrim(vs)
}
}

View File

@ -1,5 +1,4 @@
//go:build !windows && nofuse
// +build !windows,nofuse
package commands

View File

@ -1,5 +1,4 @@
//go:build !windows && !nofuse
// +build !windows,!nofuse
package commands

View File

@ -11,6 +11,7 @@ import (
bserv "github.com/ipfs/boxo/blockservice"
offline "github.com/ipfs/boxo/exchange/offline"
dag "github.com/ipfs/boxo/ipld/merkledag"
pin "github.com/ipfs/boxo/pinning/pinner"
verifcid "github.com/ipfs/boxo/verifcid"
cid "github.com/ipfs/go-cid"
cidenc "github.com/ipfs/go-cidutil/cidenc"
@ -99,6 +100,11 @@ It may take some time. Pass '--progress' to track the progress.
name, _ := req.Options[pinNameOptionName].(string)
showProgress, _ := req.Options[pinProgressOptionName].(bool)
// Validate pin name
if err := cmdutils.ValidatePinName(name); err != nil {
return err
}
if err := req.ParseBodyArgs(); err != nil {
return err
}
@ -370,18 +376,30 @@ Example:
return err
}
n, err := cmdenv.GetNode(env)
if err != nil {
return err
}
if n.Pinning == nil {
return fmt.Errorf("pinning service not available")
}
typeStr, _ := req.Options[pinTypeOptionName].(string)
stream, _ := req.Options[pinStreamOptionName].(bool)
displayNames, _ := req.Options[pinNamesOptionName].(bool)
name, _ := req.Options[pinNameOptionName].(string)
switch typeStr {
case "all", "direct", "indirect", "recursive":
default:
err = fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr)
// Validate name filter
if err := cmdutils.ValidatePinName(name); err != nil {
return err
}
mode, ok := pin.StringToMode(typeStr)
if !ok {
return fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr)
}
// For backward compatibility, we accumulate the pins in the same output type as before.
var emit func(PinLsOutputWrapper) error
lgcList := map[string]PinLsType{}
@ -397,7 +415,7 @@ Example:
}
if len(req.Arguments) > 0 {
err = pinLsKeys(req, typeStr, api, emit)
err = pinLsKeys(req, mode, displayNames || name != "", n.Pinning, api, emit)
} else {
err = pinLsAll(req, typeStr, displayNames || name != "", name, api, emit)
}
@ -482,23 +500,14 @@ type PinLsObject struct {
Type string `json:",omitempty"`
}
func pinLsKeys(req *cmds.Request, typeStr string, api coreiface.CoreAPI, emit func(value PinLsOutputWrapper) error) error {
func pinLsKeys(req *cmds.Request, mode pin.Mode, displayNames bool, pinner pin.Pinner, api coreiface.CoreAPI, emit func(value PinLsOutputWrapper) error) error {
enc, err := cmdenv.GetCidEncoder(req)
if err != nil {
return err
}
switch typeStr {
case "all", "direct", "indirect", "recursive":
default:
return fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr)
}
opt, err := options.Pin.IsPinned.Type(typeStr)
if err != nil {
panic("unhandled pin type")
}
// Collect CIDs to check
cids := make([]cid.Cid, 0, len(req.Arguments))
for _, p := range req.Arguments {
p, err := cmdutils.PathOrCidPath(p)
if err != nil {
@ -510,25 +519,31 @@ func pinLsKeys(req *cmds.Request, typeStr string, api coreiface.CoreAPI, emit fu
return err
}
pinType, pinned, err := api.Pin().IsPinned(req.Context, rp, opt)
if err != nil {
return err
cids = append(cids, rp.RootCid())
}
// Check pins using the new type-specific method
pinned, err := pinner.CheckIfPinnedWithType(req.Context, mode, displayNames, cids...)
if err != nil {
return err
}
// Process results
for i, p := range pinned {
if !p.Pinned() {
return fmt.Errorf("path '%s' is not pinned", req.Arguments[i])
}
if !pinned {
return fmt.Errorf("path '%s' is not pinned", p)
}
switch pinType {
case "direct", "indirect", "recursive", "internal":
default:
pinType = "indirect through " + pinType
pinType, _ := pin.ModeToString(p.Mode)
if p.Mode == pin.Indirect && p.Via.Defined() {
pinType = "indirect through " + enc.Encode(p.Via)
}
err = emit(PinLsOutputWrapper{
PinLsObject: PinLsObject{
Type: pinType,
Cid: enc.Encode(rp.RootCid()),
Cid: enc.Encode(cids[i]),
Name: p.Name,
},
})
if err != nil {
@ -545,11 +560,9 @@ func pinLsAll(req *cmds.Request, typeStr string, detailed bool, name string, api
return err
}
switch typeStr {
case "all", "direct", "indirect", "recursive":
default:
err = fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr)
return err
_, ok := pin.StringToMode(typeStr)
if !ok {
return fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr)
}
opt, err := options.Pin.Ls.Type(typeStr)

View File

@ -171,6 +171,10 @@ NOTE: a comma-separated notation is supported in CLI for convenience:
opts := []pinclient.AddOption{}
if name, nameFound := req.Options[pinNameOptionName]; nameFound {
nameStr := name.(string)
// Validate pin name
if err := cmdutils.ValidatePinName(nameStr); err != nil {
return err
}
opts = append(opts, pinclient.PinOpts.WithName(nameStr))
}
@ -321,6 +325,11 @@ func lsRemote(ctx context.Context, req *cmds.Request, c *pinclient.Client, out c
opts := []pinclient.LsOption{}
if name, nameFound := req.Options[pinNameOptionName]; nameFound {
nameStr := name.(string)
// Validate name filter
if err := cmdutils.ValidatePinName(nameStr); err != nil {
close(out)
return err
}
opts = append(opts, pinclient.PinOpts.FilterName(nameStr))
}

View File

@ -1,6 +1,7 @@
package commands
import (
"errors"
"fmt"
"io"
"text/tabwriter"
@ -44,12 +45,12 @@ var provideClearCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Clear all CIDs from the provide queue.",
ShortDescription: `
Clear all CIDs from the reprovide queue.
Clear all CIDs pending to be provided for the first time.
Note: Kubo will automatically clear the queue when it detects a change of
Reprovider.Strategy upon a restart. For more information about reprovider
Provide.Strategy upon a restart. For more information about provide
strategies, see:
https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy
https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy
`,
},
Options: []cmds.Option{
@ -99,8 +100,8 @@ var provideStatCmd = &cmds.Command{
Tagline: "Returns statistics about the node's provider system.",
ShortDescription: `
Returns statistics about the content the node is reproviding every
Reprovider.Interval according to Reprovider.Strategy:
https://github.com/ipfs/kubo/blob/master/docs/config.md#reprovider
Provide.DHT.Interval according to Provide.Strategy:
https://github.com/ipfs/kubo/blob/master/docs/config.md#provide
This interface is not stable and may change from release to release.
@ -118,7 +119,12 @@ This interface is not stable and may change from release to release.
return ErrNotOnline
}
stats, err := nd.Provider.Stat()
provideSys, ok := nd.Provider.(provider.System)
if !ok {
return errors.New("stats not available with experimental sweeping provider (Provide.DHT.SweepEnabled=true)")
}
stats, err := provideSys.Stat()
if err != nil {
return err
}

View File

@ -11,6 +11,8 @@ import (
"github.com/ipfs/kubo/config"
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/ipfs/kubo/core/node"
mh "github.com/multiformats/go-multihash"
dag "github.com/ipfs/boxo/ipld/merkledag"
"github.com/ipfs/boxo/ipns"
@ -164,14 +166,19 @@ var provideRefRoutingCmd = &cmds.Command{
if err != nil {
return err
}
if !cfg.Provider.Enabled.WithDefault(config.DefaultProviderEnabled) {
return errors.New("invalid configuration: Provider.Enabled is set to 'false'")
if !cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) {
return errors.New("invalid configuration: Provide.Enabled is set to 'false'")
}
if len(nd.PeerHost.Network().Conns()) == 0 {
if len(nd.PeerHost.Network().Conns()) == 0 && !cfg.HasHTTPProviderConfigured() {
// Node is depending on DHT for providing (no custom HTTP provider
// configured) and currently has no connected peers.
return errors.New("cannot provide, no connected peers")
}
// If we reach here with no connections but HTTP provider configured,
// we proceed with the provide operation via HTTP
// Needed to parse stdin args.
// TODO: Lazy Load
err = req.ParseBodyArgs()
@ -207,9 +214,9 @@ var provideRefRoutingCmd = &cmds.Command{
go func() {
defer cancel()
if rec {
provideErr = provideKeysRec(ctx, nd.Routing, nd.DAG, cids)
provideErr = provideCidsRec(ctx, nd.Provider, nd.DAG, cids)
} else {
provideErr = provideKeys(ctx, nd.Routing, cids)
provideErr = provideCids(nd.Provider, cids)
}
if provideErr != nil {
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
@ -268,14 +275,18 @@ Trigger reprovider to announce our data to network.
if err != nil {
return err
}
if !cfg.Provider.Enabled.WithDefault(config.DefaultProviderEnabled) {
return errors.New("invalid configuration: Provider.Enabled is set to 'false'")
if !cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) {
return errors.New("invalid configuration: Provide.Enabled is set to 'false'")
}
if cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval) == 0 {
return errors.New("invalid configuration: Reprovider.Interval is set to '0'")
if cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) == 0 {
return errors.New("invalid configuration: Provide.DHT.Interval is set to '0'")
}
provideSys, ok := nd.Provider.(*node.LegacyProvider)
if !ok {
return errors.New("manual reprovide not available with experimental sweeping provider (Provide.DHT.SweepEnabled=true)")
}
err = nd.Provider.Reprovide(req.Context)
err = provideSys.Reprovide(req.Context)
if err != nil {
return err
}
@ -284,39 +295,25 @@ Trigger reprovider to announce our data to network.
},
}
func provideKeys(ctx context.Context, r routing.Routing, cids []cid.Cid) error {
for _, c := range cids {
err := r.Provide(ctx, c, true)
if err != nil {
return err
}
func provideCids(prov node.DHTProvider, cids []cid.Cid) error {
mhs := make([]mh.Multihash, len(cids))
for i, c := range cids {
mhs[i] = c.Hash()
}
return nil
return prov.StartProviding(true, mhs...)
}
func provideKeysRec(ctx context.Context, r routing.Routing, dserv ipld.DAGService, cids []cid.Cid) error {
provided := cid.NewSet()
func provideCidsRec(ctx context.Context, prov node.DHTProvider, dserv ipld.DAGService, cids []cid.Cid) error {
for _, c := range cids {
kset := cid.NewSet()
err := dag.Walk(ctx, dag.GetLinksDirect(dserv), c, kset.Visit)
if err != nil {
return err
}
for _, k := range kset.Keys() {
if provided.Has(k) {
continue
}
err = r.Provide(ctx, k, true)
if err != nil {
return err
}
provided.Add(k)
if err = provideCids(prov, kset.Keys()); err != nil {
return err
}
}
return nil
}

View File

@ -7,6 +7,7 @@ import (
"time"
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/ipfs/kubo/core/commands/cmdutils"
cmds "github.com/ipfs/go-ipfs-cmds"
dht "github.com/libp2p/go-libp2p-kad-dht"
@ -92,7 +93,9 @@ This interface is not stable and may change from release to release.
info := dhtPeerInfo{ID: p.String()}
if ver, err := nd.Peerstore.Get(p, "AgentVersion"); err == nil {
info.AgentVersion, _ = ver.(string)
if vs, ok := ver.(string); ok {
info.AgentVersion = cmdutils.CleanAndTrim(vs)
}
} else if err == pstore.ErrNotFound {
// ignore
} else {
@ -143,7 +146,9 @@ This interface is not stable and may change from release to release.
info := dhtPeerInfo{ID: pi.Id.String()}
if ver, err := nd.Peerstore.Get(pi.Id, "AgentVersion"); err == nil {
info.AgentVersion, _ = ver.(string)
if vs, ok := ver.(string); ok {
info.AgentVersion = cmdutils.CleanAndTrim(vs)
}
} else if err == pstore.ErrNotFound {
// ignore
} else {

View File

@ -18,6 +18,7 @@ import (
"github.com/ipfs/kubo/commands"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/ipfs/kubo/core/commands/cmdutils"
"github.com/ipfs/kubo/core/node/libp2p"
"github.com/ipfs/kubo/repo"
"github.com/ipfs/kubo/repo/fsrepo"
@ -27,6 +28,7 @@ import (
inet "github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
pstore "github.com/libp2p/go-libp2p/core/peerstore"
"github.com/libp2p/go-libp2p/core/protocol"
rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
@ -290,7 +292,7 @@ var swarmPeersCmd = &cmds.Command{
}
for _, s := range strs {
ci.Streams = append(ci.Streams, streamInfo{Protocol: string(s)})
ci.Streams = append(ci.Streams, streamInfo{Protocol: cmdutils.CleanAndTrim(string(s))})
}
}
@ -476,13 +478,15 @@ func (ci *connInfo) identifyPeer(ps pstore.Peerstore, p peer.ID) (IdOutput, erro
slices.Sort(info.Addresses)
if protocols, err := ps.GetProtocols(p); err == nil {
info.Protocols = append(info.Protocols, protocols...)
for _, proto := range protocols {
info.Protocols = append(info.Protocols, protocol.ID(cmdutils.CleanAndTrim(string(proto))))
}
slices.Sort(info.Protocols)
}
if v, err := ps.Get(p, "AgentVersion"); err == nil {
if vs, ok := v.(string); ok {
info.AgentVersion = vs
info.AgentVersion = cmdutils.CleanAndTrim(vs)
}
}

View File

@ -2,14 +2,13 @@ package commands
import (
"os"
"path"
"runtime"
"github.com/ipfs/go-ipfs-cmds"
version "github.com/ipfs/kubo"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/core"
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
cmds "github.com/ipfs/go-ipfs-cmds"
manet "github.com/multiformats/go-multiaddr/net"
sysi "github.com/whyrusleeping/go-sysinfo"
)
@ -84,32 +83,28 @@ func runtimeInfo(out map[string]interface{}) error {
func envVarInfo(out map[string]interface{}) error {
ev := make(map[string]interface{})
ev["GOPATH"] = os.Getenv("GOPATH")
ev["IPFS_PATH"] = os.Getenv("IPFS_PATH")
ev[config.EnvDir] = os.Getenv(config.EnvDir)
out["environment"] = ev
return nil
}
func ipfsPath() string {
p := os.Getenv("IPFS_PATH")
if p == "" {
p = path.Join(os.Getenv("HOME"), ".ipfs")
}
return p
}
func diskSpaceInfo(out map[string]interface{}) error {
di := make(map[string]interface{})
dinfo, err := sysi.DiskUsage(ipfsPath())
pathRoot, err := config.PathRoot()
if err != nil {
return err
}
dinfo, err := sysi.DiskUsage(pathRoot)
if err != nil {
return err
}
di["fstype"] = dinfo.FsType
di["total_space"] = dinfo.Total
di["free_space"] = dinfo.Free
out["diskinfo"] = map[string]interface{}{
"fstype": dinfo.FsType,
"total_space": dinfo.Total,
"free_space": dinfo.Free,
}
out["diskinfo"] = di
return nil
}

View File

@ -92,31 +92,31 @@ type IpfsNode struct {
RecordValidator record.Validator
// Online
PeerHost p2phost.Host `optional:"true"` // the network host (server+client)
Peering *peering.PeeringService `optional:"true"`
Filters *ma.Filters `optional:"true"`
Bootstrapper io.Closer `optional:"true"` // the periodic bootstrapper
Routing irouting.ProvideManyRouter `optional:"true"` // the routing system. recommend ipfs-dht
ContentDiscovery routing.ContentDiscovery `optional:"true"` // the discovery part of the routing system
DNSResolver *madns.Resolver // the DNS resolver
IPLDPathResolver pathresolver.Resolver `name:"ipldPathResolver"` // The IPLD path resolver
UnixFSPathResolver pathresolver.Resolver `name:"unixFSPathResolver"` // The UnixFS path resolver
OfflineIPLDPathResolver pathresolver.Resolver `name:"offlineIpldPathResolver"` // The IPLD path resolver that uses only locally available blocks
OfflineUnixFSPathResolver pathresolver.Resolver `name:"offlineUnixFSPathResolver"` // The UnixFS path resolver that uses only locally available blocks
Exchange exchange.Interface // the block exchange + strategy
Bitswap *bitswap.Bitswap `optional:"true"` // The Bitswap instance
Namesys namesys.NameSystem // the name system, resolves paths to hashes
Provider provider.System // the value provider system
ProvidingStrategy config.ReproviderStrategy `optional:"true"`
ProvidingKeyChanFunc provider.KeyChanFunc `optional:"true"`
IpnsRepub *ipnsrp.Republisher `optional:"true"`
ResourceManager network.ResourceManager `optional:"true"`
PeerHost p2phost.Host `optional:"true"` // the network host (server+client)
Peering *peering.PeeringService `optional:"true"`
Filters *ma.Filters `optional:"true"`
Bootstrapper io.Closer `optional:"true"` // the periodic bootstrapper
ContentDiscovery routing.ContentDiscovery `optional:"true"` // the discovery part of the routing system
DNSResolver *madns.Resolver // the DNS resolver
IPLDPathResolver pathresolver.Resolver `name:"ipldPathResolver"` // The IPLD path resolver
UnixFSPathResolver pathresolver.Resolver `name:"unixFSPathResolver"` // The UnixFS path resolver
OfflineIPLDPathResolver pathresolver.Resolver `name:"offlineIpldPathResolver"` // The IPLD path resolver that uses only locally available blocks
OfflineUnixFSPathResolver pathresolver.Resolver `name:"offlineUnixFSPathResolver"` // The UnixFS path resolver that uses only locally available blocks
Exchange exchange.Interface // the block exchange + strategy
Bitswap *bitswap.Bitswap `optional:"true"` // The Bitswap instance
Namesys namesys.NameSystem // the name system, resolves paths to hashes
ProvidingStrategy config.ProvideStrategy `optional:"true"`
ProvidingKeyChanFunc provider.KeyChanFunc `optional:"true"`
IpnsRepub *ipnsrp.Republisher `optional:"true"`
ResourceManager network.ResourceManager `optional:"true"`
PubSub *pubsub.PubSub `optional:"true"`
PSRouter *psrouter.PubsubValueStore `optional:"true"`
DHT *ddht.DHT `optional:"true"`
DHTClient routing.Routing `name:"dhtc" optional:"true"`
Routing irouting.ProvideManyRouter `optional:"true"` // the routing system. recommend ipfs-dht
Provider node.DHTProvider // the value provider system
DHT *ddht.DHT `optional:"true"`
DHTClient routing.Routing `name:"dhtc" optional:"true"`
P2P *p2p.P2P `optional:"true"`

View File

@ -23,10 +23,8 @@ import (
dag "github.com/ipfs/boxo/ipld/merkledag"
pathresolver "github.com/ipfs/boxo/path/resolver"
pin "github.com/ipfs/boxo/pinning/pinner"
provider "github.com/ipfs/boxo/provider"
offlineroute "github.com/ipfs/boxo/routing/offline"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log/v2"
"github.com/ipfs/kubo/config"
coreiface "github.com/ipfs/kubo/core/coreiface"
"github.com/ipfs/kubo/core/coreiface/options"
@ -45,8 +43,6 @@ import (
"github.com/ipfs/kubo/repo"
)
var log = logging.Logger("coreapi")
type CoreAPI struct {
nctx context.Context
@ -73,8 +69,8 @@ type CoreAPI struct {
ipldPathResolver pathresolver.Resolver
unixFSPathResolver pathresolver.Resolver
provider provider.System
providingStrategy config.ReproviderStrategy
provider node.DHTProvider
providingStrategy config.ProvideStrategy
pubSub *pubsub.PubSub

View File

@ -15,9 +15,10 @@ import (
cidutil "github.com/ipfs/go-cidutil"
coreiface "github.com/ipfs/kubo/core/coreiface"
caopts "github.com/ipfs/kubo/core/coreiface/options"
"github.com/ipfs/kubo/core/node"
"github.com/ipfs/kubo/tracing"
peer "github.com/libp2p/go-libp2p/core/peer"
routing "github.com/libp2p/go-libp2p/core/routing"
mh "github.com/multiformats/go-multihash"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
@ -148,9 +149,9 @@ func (api *RoutingAPI) Provide(ctx context.Context, path path.Path, opts ...caop
}
if settings.Recursive {
err = provideKeysRec(ctx, api.routing, api.blockstore, []cid.Cid{c})
err = provideKeysRec(ctx, api.provider, api.blockstore, []cid.Cid{c})
} else {
err = provideKeys(ctx, api.routing, []cid.Cid{c})
err = api.provider.StartProviding(false, c.Hash())
}
if err != nil {
return err
@ -159,41 +160,64 @@ func (api *RoutingAPI) Provide(ctx context.Context, path path.Path, opts ...caop
return nil
}
func provideKeys(ctx context.Context, r routing.Routing, cids []cid.Cid) error {
for _, c := range cids {
err := r.Provide(ctx, c, true)
if err != nil {
return err
}
}
return nil
}
func provideKeysRec(ctx context.Context, r routing.Routing, bs blockstore.Blockstore, cids []cid.Cid) error {
func provideKeysRec(ctx context.Context, prov node.DHTProvider, bs blockstore.Blockstore, cids []cid.Cid) error {
provided := cidutil.NewStreamingSet()
errCh := make(chan error)
// Error channel with buffer size 1 to avoid blocking the goroutine
errCh := make(chan error, 1)
go func() {
// Always close provided.New to signal completion
defer close(provided.New)
// Also close error channel to distinguish between "no error" and "pending error"
defer close(errCh)
dserv := dag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
for _, c := range cids {
err := dag.Walk(ctx, dag.GetLinksDirect(dserv), c, provided.Visitor(ctx))
if err != nil {
errCh <- err
if err := dag.Walk(ctx, dag.GetLinksDirect(dserv), c, provided.Visitor(ctx)); err != nil {
// Send error to channel. If context is cancelled while trying to send,
// exit immediately as the main loop will return ctx.Err()
select {
case errCh <- err:
// Error sent successfully, exit goroutine
case <-ctx.Done():
// Context cancelled, exit without sending error
return
}
return
}
}
// All CIDs walked successfully, goroutine will exit and channels will close
}()
keys := make([]mh.Multihash, 0)
for {
select {
case k := <-provided.New:
err := r.Provide(ctx, k, true)
if err != nil {
return err
}
case err := <-errCh:
return err
case <-ctx.Done():
// Context cancelled, return immediately
return ctx.Err()
case err := <-errCh:
// Received error from DAG walk, return it
return err
case c, ok := <-provided.New:
if !ok {
// Channel closed means goroutine finished.
// CRITICAL: Check for any error that was sent just before channel closure.
// This handles the race where error is sent to errCh but main loop
// sees provided.New close first.
select {
case err := <-errCh:
if err != nil {
return err
}
// errCh closed with nil, meaning success
default:
// No pending error in errCh
}
// All CIDs successfully processed, start providing
return prov.StartProviding(true, keys...)
}
// Accumulate the CID for providing
keys = append(keys, c.Hash())
}
}
}

View File

@ -72,7 +72,7 @@ func (NodeProvider) MakeAPISwarm(t *testing.T, ctx context.Context, fullIdentity
c.AutoTLS.Enabled = config.False // disable so no /ws listener is added
// For provider tests, avoid that content gets
// auto-provided without calling "provide" (unless pinned).
c.Reprovider.Strategy = config.NewOptionalString("roots")
c.Provide.Strategy = config.NewOptionalString("roots")
ds := syncds.MutexWrap(datastore.NewMapDatastore())
r := &repo.Mock{

View File

@ -16,21 +16,25 @@ import (
uio "github.com/ipfs/boxo/ipld/unixfs/io"
"github.com/ipfs/boxo/mfs"
"github.com/ipfs/boxo/path"
provider "github.com/ipfs/boxo/provider"
"github.com/ipfs/boxo/provider"
cid "github.com/ipfs/go-cid"
cidutil "github.com/ipfs/go-cidutil"
ds "github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-datastore/sync"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log/v2"
"github.com/ipfs/kubo/config"
coreiface "github.com/ipfs/kubo/core/coreiface"
options "github.com/ipfs/kubo/core/coreiface/options"
"github.com/ipfs/kubo/core/coreunix"
"github.com/ipfs/kubo/tracing"
mh "github.com/multiformats/go-multihash"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
var log = logging.Logger("coreapi")
type UnixfsAPI CoreAPI
// Add builds a merkledag node from a reader, adds it to the blockstore,
@ -116,7 +120,7 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options
// nor by the pinner (the pinner doesn't traverse the pinned DAG itself, it only
// handles roots). This wrapping ensures all blocks of pinned content get provided.
if settings.Pin && !settings.OnlyHash &&
(api.providingStrategy&config.ReproviderStrategyPinned) != 0 {
(api.providingStrategy&config.ProvideStrategyPinned) != 0 {
dserv = &providingDagService{dserv, api.provider}
}
@ -386,7 +390,7 @@ func (s *syncDagService) Sync() error {
type providingDagService struct {
ipld.DAGService
provider provider.System
provider.MultihashProvider
}
func (pds *providingDagService) Add(ctx context.Context, n ipld.Node) error {
@ -397,8 +401,8 @@ func (pds *providingDagService) Add(ctx context.Context, n ipld.Node) error {
// We don't want DAG operations to fail due to providing issues.
// The user's data is still stored successfully even if the
// announcement to the routing system fails temporarily.
if err := pds.provider.Provide(ctx, n.Cid(), true); err != nil {
log.Error(err)
if err := pds.StartProviding(false, n.Cid().Hash()); err != nil {
log.Errorf("failed to provide new block: %s", err)
}
return nil
}
@ -407,14 +411,13 @@ func (pds *providingDagService) AddMany(ctx context.Context, nds []ipld.Node) er
if err := pds.DAGService.AddMany(ctx, nds); err != nil {
return err
}
keys := make([]mh.Multihash, len(nds))
for i, n := range nds {
keys[i] = n.Cid().Hash()
}
// Same error handling philosophy as Add(): log but don't fail.
// Note: Provide calls are intentionally blocking here - the Provider
// implementation should handle concurrency/queuing internally.
for _, n := range nds {
if err := pds.provider.Provide(ctx, n.Cid(), true); err != nil {
log.Error(err)
break
}
if err := pds.StartProviding(false, keys...); err != nil {
log.Errorf("failed to provide new blocks: %s", err)
}
return nil
}

View File

@ -107,11 +107,13 @@ func Libp2pGatewayOption() ServeOption {
// Keep these constraints for security
DeserializedResponses: false, // Trustless-only
NoDNSLink: true, // No DNS resolution
DisableHTMLErrors: true, // Plain text errors only
PublicGateways: nil,
Menu: nil,
// Apply timeout and concurrency limits from user config
RetrievalTimeout: cfg.Gateway.RetrievalTimeout.WithDefault(config.DefaultRetrievalTimeout),
MaxConcurrentRequests: int(cfg.Gateway.MaxConcurrentRequests.WithDefault(int64(config.DefaultMaxConcurrentRequests))),
DiagnosticServiceURL: "", // Not used since DisableHTMLErrors=true
}
handler := gateway.NewHandler(gwConfig, &offlineGatewayErrWrapper{gwimpl: backend})
@ -270,6 +272,7 @@ func getGatewayConfig(n *core.IpfsNode) (gateway.Config, map[string][]string, er
PublicGateways: map[string]*gateway.PublicGateway{},
RetrievalTimeout: cfg.Gateway.RetrievalTimeout.WithDefault(config.DefaultRetrievalTimeout),
MaxConcurrentRequests: int(cfg.Gateway.MaxConcurrentRequests.WithDefault(int64(config.DefaultMaxConcurrentRequests))),
DiagnosticServiceURL: cfg.Gateway.DiagnosticServiceURL.WithDefault(config.DefaultDiagnosticServiceURL),
}
// Add default implicit known gateways, such as subdomain gateway on localhost.

View File

@ -1,19 +1,31 @@
package corehttp
import (
"fmt"
"net"
"net/http"
"strings"
"github.com/ipfs/go-cid"
"github.com/ipfs/kubo/config"
core "github.com/ipfs/kubo/core"
)
// WebUI version confirmed to work with this Kubo version
const WebUIPath = "/ipfs/bafybeifplj2s3yegn7ko7tdnwpoxa4c5uaqnk2ajnw5geqm34slcj6b6mu" // v4.8.0
const WebUIPath = "/ipfs/bafybeicg7e6o2eszkfdzxg5233gmuip2a7kfzoloh7voyvt2r6ivdet54u" // v4.9.1
// WebUIPaths is a list of all past webUI paths.
var WebUIPaths = []string{
WebUIPath,
"/ipfs/bafybeifplj2s3yegn7ko7tdnwpoxa4c5uaqnk2ajnw5geqm34slcj6b6mu", // v4.8.0
"/ipfs/bafybeibfd5kbebqqruouji6ct5qku3tay273g7mt24mmrfzrsfeewaal5y", // v4.7.0
"/ipfs/bafybeibpaa5kqrj4gkemiswbwndjqiryl65cks64ypwtyerxixu56gnvvm", // v4.6.0
"/ipfs/bafybeiata4qg7xjtwgor6r5dw63jjxyouenyromrrb4lrewxrlvav7gzgi", // v4.5.0
"/ipfs/bafybeigp3zm7cqoiciqk5anlheenqjsgovp7j7zq6hah4nu6iugdgb4nby", // v4.4.2
"/ipfs/bafybeiatztgdllxnp5p6zu7bdwhjmozsmd7jprff4bdjqjljxtylitvss4", // v4.4.1
"/ipfs/bafybeibgic2ex3fvzkinhy6k6aqyv3zy2o7bkbsmrzvzka24xetv7eeadm",
"/ipfs/bafybeid4uxz7klxcu3ffsnmn64r7ihvysamlj4ohl5h2orjsffuegcpaeq",
"/ipfs/bafybeif6abowqcavbkz243biyh7pde7ick5kkwwytrh7pd2hkbtuqysjxy",
"/ipfs/bafybeibgic2ex3fvzkinhy6k6aqyv3zy2o7bkbsmrzvzka24xetv7eeadm", // v4.4.0
"/ipfs/bafybeid4uxz7klxcu3ffsnmn64r7ihvysamlj4ohl5h2orjsffuegcpaeq", // v4.3.3
"/ipfs/bafybeif6abowqcavbkz243biyh7pde7ick5kkwwytrh7pd2hkbtuqysjxy", // v4.3.2
"/ipfs/bafybeihatzsgposbr3hrngo42yckdyqcc56yean2rynnwpzxstvdlphxf4",
"/ipfs/bafybeigggyffcf6yfhx5irtwzx3cgnk6n3dwylkvcpckzhqqrigsxowjwe",
"/ipfs/bafybeidf7cpkwsjkq6xs3r6fbbxghbugilx3jtezbza7gua3k5wjixpmba",
@ -22,18 +34,18 @@ var WebUIPaths = []string{
"/ipfs/bafybeicyp7ssbnj3hdzehcibmapmpuc3atrsc4ch3q6acldfh4ojjdbcxe",
"/ipfs/bafybeigs6d53gpgu34553mbi5bbkb26e4ikruoaaar75jpfdywpup2r3my",
"/ipfs/bafybeic4gops3d3lyrisqku37uio33nvt6fqxvkxihrwlqsuvf76yln4fm",
"/ipfs/bafybeifeqt7mvxaniphyu2i3qhovjaf3sayooxbh5enfdqtiehxjv2ldte",
"/ipfs/bafybeifeqt7mvxaniphyu2i3qhovjaf3sayooxbh5enfdqtiehxjv2ldte", // v2.22.0
"/ipfs/bafybeiequgo72mrvuml56j4gk7crewig5bavumrrzhkqbim6b3s2yqi7ty",
"/ipfs/bafybeibjbq3tmmy7wuihhhwvbladjsd3gx3kfjepxzkq6wylik6wc3whzy",
"/ipfs/bafybeiavrvt53fks6u32n5p2morgblcmck4bh4ymf4rrwu7ah5zsykmqqa",
"/ipfs/bafybeiageaoxg6d7npaof6eyzqbwvbubyler7bq44hayik2hvqcggg7d2y",
"/ipfs/bafybeidb5eryh72zajiokdggzo7yct2d6hhcflncji5im2y5w26uuygdsm",
"/ipfs/bafybeibozpulxtpv5nhfa2ue3dcjx23ndh3gwr5vwllk7ptoyfwnfjjr4q",
"/ipfs/bafybeiednzu62vskme5wpoj4bjjikeg3xovfpp4t7vxk5ty2jxdi4mv4bu",
"/ipfs/bafybeihcyruaeza7uyjd6ugicbcrqumejf6uf353e5etdkhotqffwtguva",
"/ipfs/bafybeibjbq3tmmy7wuihhhwvbladjsd3gx3kfjepxzkq6wylik6wc3whzy", // v2.20.0
"/ipfs/bafybeiavrvt53fks6u32n5p2morgblcmck4bh4ymf4rrwu7ah5zsykmqqa", // v2.19.0
"/ipfs/bafybeiageaoxg6d7npaof6eyzqbwvbubyler7bq44hayik2hvqcggg7d2y", // v2.18.1
"/ipfs/bafybeidb5eryh72zajiokdggzo7yct2d6hhcflncji5im2y5w26uuygdsm", // v2.18.0
"/ipfs/bafybeibozpulxtpv5nhfa2ue3dcjx23ndh3gwr5vwllk7ptoyfwnfjjr4q", // v2.15.1
"/ipfs/bafybeiednzu62vskme5wpoj4bjjikeg3xovfpp4t7vxk5ty2jxdi4mv4bu", // v2.15.0
"/ipfs/bafybeihcyruaeza7uyjd6ugicbcrqumejf6uf353e5etdkhotqffwtguva", // v2.13.0
"/ipfs/bafybeiflkjt66aetfgcrgvv75izymd5kc47g6luepqmfq6zsf5w6ueth6y",
"/ipfs/bafybeid26vjplsejg7t3nrh7mxmiaaxriebbm4xxrxxdunlk7o337m5sqq",
"/ipfs/bafybeif4zkmu7qdhkpf3pnhwxipylqleof7rl6ojbe7mq3fzogz6m4xk3i",
"/ipfs/bafybeif4zkmu7qdhkpf3pnhwxipylqleof7rl6ojbe7mq3fzogz6m4xk3i", // v2.11.4
"/ipfs/bafybeianwe4vy7sprht5sm3hshvxjeqhwcmvbzq73u55sdhqngmohkjgs4",
"/ipfs/bafybeicitin4p7ggmyjaubqpi3xwnagrwarsy6hiihraafk5rcrxqxju6m",
"/ipfs/bafybeihpetclqvwb4qnmumvcn7nh4pxrtugrlpw4jgjpqicdxsv7opdm6e",
@ -72,4 +84,85 @@ var WebUIPaths = []string{
"/ipfs/Qmexhq2sBHnXQbvyP2GfUdbnY7HCagH2Mw5vUNSBn2nxip",
}
var WebUIOption = RedirectOption("webui", WebUIPath)
// WebUIOption provides the WebUI handler for the RPC API.
func WebUIOption(n *core.IpfsNode, _ net.Listener, mux *http.ServeMux) (*http.ServeMux, error) {
cfg, err := n.Repo.Config()
if err != nil {
return nil, err
}
handler := &webUIHandler{
headers: cfg.API.HTTPHeaders,
node: n,
noFetch: cfg.Gateway.NoFetch,
deserializedResponses: cfg.Gateway.DeserializedResponses.WithDefault(config.DefaultDeserializedResponses),
}
mux.Handle("/webui/", handler)
return mux, nil
}
type webUIHandler struct {
headers map[string][]string
node *core.IpfsNode
noFetch bool
deserializedResponses bool
}
func (h *webUIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
for k, v := range h.headers {
w.Header()[http.CanonicalHeaderKey(k)] = v
}
// Check if WebUI is incompatible with current configuration
if !h.deserializedResponses {
h.writeIncompatibleError(w)
return
}
// Check if WebUI is available locally when Gateway.NoFetch is true
if h.noFetch {
cidStr := strings.TrimPrefix(WebUIPath, "/ipfs/")
webUICID, err := cid.Parse(cidStr)
if err != nil {
// This should never happen with hardcoded constant
log.Errorf("failed to parse WebUI CID: %v", err)
} else {
has, err := h.node.Blockstore.Has(r.Context(), webUICID)
if err != nil {
log.Debugf("error checking WebUI availability: %v", err)
} else if !has {
h.writeNotAvailableError(w)
return
}
}
}
// Default behavior: redirect to the WebUI path
http.Redirect(w, r, WebUIPath, http.StatusFound)
}
func (h *webUIHandler) writeIncompatibleError(w http.ResponseWriter) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.WriteHeader(http.StatusServiceUnavailable)
fmt.Fprintf(w, `IPFS WebUI Incompatible
WebUI is not compatible with Gateway.DeserializedResponses=false.
The WebUI requires deserializing IPFS responses to render the interface.
To use the WebUI, set Gateway.DeserializedResponses=true in your config.
`)
}
func (h *webUIHandler) writeNotAvailableError(w http.ResponseWriter) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.WriteHeader(http.StatusServiceUnavailable)
fmt.Fprintf(w, `IPFS WebUI Not Available
WebUI at %s is not in your local node due to Gateway.NoFetch=true.
To use the WebUI, either:
1. Run: ipfs pin add --progress --name ipfs-webui %s
2. Download from https://github.com/ipfs/ipfs-webui/releases and import with: ipfs dag import ipfs-webui.car
`, WebUIPath, WebUIPath)
}

View File

@ -12,6 +12,7 @@ import (
ipld "github.com/ipfs/go-ipld-format"
iface "github.com/ipfs/kubo/core/coreiface"
opt "github.com/ipfs/kubo/core/coreiface/options"
"github.com/stretchr/testify/require"
)
func (tp *TestSuite) TestPin(t *testing.T) {
@ -28,6 +29,7 @@ func (tp *TestSuite) TestPin(t *testing.T) {
t.Run("TestPinLsIndirect", tp.TestPinLsIndirect)
t.Run("TestPinLsPrecedence", tp.TestPinLsPrecedence)
t.Run("TestPinIsPinned", tp.TestPinIsPinned)
t.Run("TestPinNames", tp.TestPinNames)
}
func (tp *TestSuite) TestPinAdd(t *testing.T) {
@ -580,6 +582,145 @@ func assertIsPinned(t *testing.T, ctx context.Context, api iface.CoreAPI, p path
}
}
func (tp *TestSuite) TestPinNames(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
api, err := tp.makeAPI(t, ctx)
require.NoError(t, err)
// Create test content
p1, err := api.Unixfs().Add(ctx, strFile("content1")())
require.NoError(t, err)
p2, err := api.Unixfs().Add(ctx, strFile("content2")())
require.NoError(t, err)
p3, err := api.Unixfs().Add(ctx, strFile("content3")())
require.NoError(t, err)
p4, err := api.Unixfs().Add(ctx, strFile("content4")())
require.NoError(t, err)
// Test 1: Pin with name
err = api.Pin().Add(ctx, p1, opt.Pin.Name("test-pin-1"))
require.NoError(t, err, "failed to add pin with name")
// Test 2: Pin without name
err = api.Pin().Add(ctx, p2)
require.NoError(t, err, "failed to add pin without name")
// Test 3: List pins with detailed option to get names
pins := make(chan iface.Pin)
go func() {
err = api.Pin().Ls(ctx, pins, opt.Pin.Ls.Detailed(true))
}()
pinMap := make(map[string]string)
for pin := range pins {
pinMap[pin.Path().String()] = pin.Name()
}
require.NoError(t, err, "failed to list pins with names")
// Verify pin names
name1, ok := pinMap[p1.String()]
require.True(t, ok, "pin for %s not found", p1)
require.Equal(t, "test-pin-1", name1, "unexpected pin name for %s", p1)
name2, ok := pinMap[p2.String()]
require.True(t, ok, "pin for %s not found", p2)
require.Empty(t, name2, "expected empty pin name for %s, got '%s'", p2, name2)
// Test 4: Pin update preserves name
err = api.Pin().Add(ctx, p3, opt.Pin.Name("updatable-pin"))
require.NoError(t, err, "failed to add pin with name for update test")
err = api.Pin().Update(ctx, p3, p4)
require.NoError(t, err, "failed to update pin")
// Verify name was preserved after update
pins2 := make(chan iface.Pin)
go func() {
err = api.Pin().Ls(ctx, pins2, opt.Pin.Ls.Detailed(true))
}()
updatedPinMap := make(map[string]string)
for pin := range pins2 {
updatedPinMap[pin.Path().String()] = pin.Name()
}
require.NoError(t, err, "failed to list pins after update")
// Old pin should not exist
_, oldExists := updatedPinMap[p3.String()]
require.False(t, oldExists, "old pin %s should not exist after update", p3)
// New pin should have the preserved name
name4, ok := updatedPinMap[p4.String()]
require.True(t, ok, "updated pin for %s not found", p4)
require.Equal(t, "updatable-pin", name4, "pin name not preserved after update from %s to %s", p3, p4)
// Test 5: Re-pinning with different name updates the name
err = api.Pin().Add(ctx, p1, opt.Pin.Name("new-name-for-p1"))
require.NoError(t, err, "failed to re-pin with new name")
// Verify name was updated
pins3 := make(chan iface.Pin)
go func() {
err = api.Pin().Ls(ctx, pins3, opt.Pin.Ls.Detailed(true))
}()
repinMap := make(map[string]string)
for pin := range pins3 {
repinMap[pin.Path().String()] = pin.Name()
}
require.NoError(t, err, "failed to list pins after re-pin")
rePinnedName, ok := repinMap[p1.String()]
require.True(t, ok, "re-pinned content %s not found", p1)
require.Equal(t, "new-name-for-p1", rePinnedName, "pin name not updated after re-pinning %s", p1)
// Test 6: Direct pin with name
p5, err := api.Unixfs().Add(ctx, strFile("direct-content")())
require.NoError(t, err)
err = api.Pin().Add(ctx, p5, opt.Pin.Recursive(false), opt.Pin.Name("direct-pin-name"))
require.NoError(t, err, "failed to add direct pin with name")
// Verify direct pin has name
directPins := make(chan iface.Pin)
typeOpt, err := opt.Pin.Ls.Type("direct")
require.NoError(t, err, "failed to create type option")
go func() {
err = api.Pin().Ls(ctx, directPins, typeOpt, opt.Pin.Ls.Detailed(true))
}()
directPinMap := make(map[string]string)
for pin := range directPins {
directPinMap[pin.Path().String()] = pin.Name()
}
require.NoError(t, err, "failed to list direct pins")
directName, ok := directPinMap[p5.String()]
require.True(t, ok, "direct pin %s not found", p5)
require.Equal(t, "direct-pin-name", directName, "unexpected name for direct pin %s", p5)
// Test 7: List without detailed option doesn't return names
pinsNoDetails := make(chan iface.Pin)
go func() {
err = api.Pin().Ls(ctx, pinsNoDetails)
}()
noDetailsMap := make(map[string]string)
for pin := range pinsNoDetails {
noDetailsMap[pin.Path().String()] = pin.Name()
}
require.NoError(t, err, "failed to list pins without detailed option")
// All names should be empty without detailed option
for path, name := range noDetailsMap {
require.Empty(t, name, "expected empty name for %s without detailed option, got '%s'", path, name)
}
}
func assertNotPinned(t *testing.T, ctx context.Context, api iface.CoreAPI, p path.Path) {
t.Helper()

View File

@ -18,7 +18,6 @@ import (
pathresolver "github.com/ipfs/boxo/path/resolver"
pin "github.com/ipfs/boxo/pinning/pinner"
"github.com/ipfs/boxo/pinning/pinner/dspinner"
provider "github.com/ipfs/boxo/provider"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
format "github.com/ipfs/go-ipld-format"
@ -49,16 +48,17 @@ func BlockService(cfg *config.Config) func(lc fx.Lifecycle, bs blockstore.Blocks
}
// Pinning creates new pinner which tells GC which blocks should be kept
func Pinning(strategy string) func(bstore blockstore.Blockstore, ds format.DAGService, repo repo.Repo, prov provider.System) (pin.Pinner, error) {
func Pinning(strategy string) func(bstore blockstore.Blockstore, ds format.DAGService, repo repo.Repo, prov DHTProvider) (pin.Pinner, error) {
// Parse strategy at function creation time (not inside the returned function)
// This happens before the provider is created, which is why we pass the strategy
// string and parse it here, rather than using fx-provided ProvidingStrategy.
strategyFlag := config.ParseReproviderStrategy(strategy)
strategyFlag := config.ParseProvideStrategy(strategy)
return func(bstore blockstore.Blockstore,
ds format.DAGService,
repo repo.Repo,
prov provider.System) (pin.Pinner, error) {
prov DHTProvider,
) (pin.Pinner, error) {
rootDS := repo.Datastore()
syncFn := func(ctx context.Context) error {
@ -72,8 +72,8 @@ func Pinning(strategy string) func(bstore blockstore.Blockstore, ds format.DAGSe
ctx := context.TODO()
var opts []dspinner.Option
roots := (strategyFlag & config.ReproviderStrategyRoots) != 0
pinned := (strategyFlag & config.ReproviderStrategyPinned) != 0
roots := (strategyFlag & config.ProvideStrategyRoots) != 0
pinned := (strategyFlag & config.ProvideStrategyPinned) != 0
// Important: Only one of WithPinnedProvider or WithRootsProvider should be active.
// Having both would cause duplicate root advertisements since "pinned" includes all
@ -179,8 +179,8 @@ func Dag(bs blockservice.BlockService) format.DAGService {
}
// Files loads persisted MFS root
func Files(strategy string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService, bs blockstore.Blockstore, prov provider.System) (*mfs.Root, error) {
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService, bs blockstore.Blockstore, prov provider.System) (*mfs.Root, error) {
func Files(strategy string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService, bs blockstore.Blockstore, prov DHTProvider) (*mfs.Root, error) {
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService, bs blockstore.Blockstore, prov DHTProvider) (*mfs.Root, error) {
dsk := datastore.NewKey("/local/filesroot")
pf := func(ctx context.Context, c cid.Cid) error {
rootDS := repo.Datastore()
@ -230,18 +230,21 @@ func Files(strategy string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo
return nil, err
}
// MFS (Mutable File System) provider integration:
// Only pass the provider to MFS when the strategy includes "mfs".
// MFS will call Provide() on every DAGService.Add() operation,
// which is sufficient for the "mfs" strategy - it ensures all
// MFS content gets announced as it's added or modified.
// For non-mfs strategies, we set provider to nil to avoid unnecessary providing.
strategyFlag := config.ParseReproviderStrategy(strategy)
if strategyFlag&config.ReproviderStrategyMFS == 0 {
// MFS (Mutable File System) provider integration: Only pass the provider
// to MFS when the strategy includes "mfs". MFS will call StartProviding()
// on every DAGService.Add() operation, which is sufficient for the "mfs"
// strategy - it ensures all MFS content gets announced as it's added or
// modified. For non-mfs strategies, we set provider to nil to avoid
// unnecessary providing.
strategyFlag := config.ParseProvideStrategy(strategy)
if strategyFlag&config.ProvideStrategyMFS == 0 {
prov = nil
}
root, err := mfs.NewRoot(ctx, dag, nd, pf, prov)
if err != nil {
return nil, err
}
lc.Append(fx.Hook{
OnStop: func(ctx context.Context) error {

View File

@ -254,7 +254,7 @@ func Storage(bcfg *BuildCfg, cfg *config.Config) fx.Option {
cacheOpts,
cfg.Datastore.HashOnRead,
cfg.Datastore.WriteThrough.WithDefault(config.DefaultWriteThrough),
cfg.Reprovider.Strategy.WithDefault(config.DefaultReproviderStrategy),
cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy),
)),
finalBstore,
)
@ -347,9 +347,9 @@ func Online(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.Part
isBitswapServerEnabled := cfg.Bitswap.ServerEnabled.WithDefault(config.DefaultBitswapServerEnabled)
isHTTPRetrievalEnabled := cfg.HTTPRetrieval.Enabled.WithDefault(config.DefaultHTTPRetrievalEnabled)
// Right now Provider and Reprovider systems are tied together - disabling Reprovider by setting interval to 0 disables Provider
// and vice versa: Provider.Enabled=false will disable both Provider of new CIDs and the Reprovider of old ones.
isProviderEnabled := cfg.Provider.Enabled.WithDefault(config.DefaultProviderEnabled) && cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval) != 0
// The Provide system handles both new CID announcements and periodic re-announcements.
// Disabling is controlled by Provide.Enabled=false or setting Interval to 0.
isProviderEnabled := cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled) && cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) != 0
return fx.Options(
fx.Provide(BitswapOptions(cfg)),
@ -365,13 +365,7 @@ func Online(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.Part
fx.Provide(p2p.New),
LibP2P(bcfg, cfg, userResourceOverrides),
OnlineProviders(
isProviderEnabled,
cfg.Reprovider.Strategy.WithDefault(config.DefaultReproviderStrategy),
cfg.Reprovider.Interval.WithDefault(config.DefaultReproviderInterval),
cfg.Routing.AcceleratedDHTClient.WithDefault(config.DefaultAcceleratedDHTClient),
int(cfg.Provider.WorkerCount.WithDefault(config.DefaultProviderWorkerCount)),
),
OnlineProviders(isProviderEnabled, cfg),
)
}
@ -432,6 +426,16 @@ func IPFS(ctx context.Context, bcfg *BuildCfg) fx.Option {
cfg.Import.UnixFSHAMTDirectorySizeThreshold = *cfg.Internal.UnixFSShardingSizeThreshold
}
// Validate Import configuration
if err := config.ValidateImportConfig(&cfg.Import); err != nil {
return fx.Error(err)
}
// Validate Provide configuration
if err := config.ValidateProvideConfig(&cfg.Provide); err != nil {
return fx.Error(err)
}
// Auto-sharding settings
shardingThresholdString := cfg.Import.UnixFSHAMTDirectorySizeThreshold.WithDefault(config.DefaultUnixFSHAMTDirectorySizeThreshold)
shardSingThresholdInt, err := humanize.ParseBytes(shardingThresholdString)
@ -443,7 +447,7 @@ func IPFS(ctx context.Context, bcfg *BuildCfg) fx.Option {
uio.HAMTShardingSize = int(shardSingThresholdInt)
uio.DefaultShardWidth = int(shardMaxFanout)
providerStrategy := cfg.Reprovider.Strategy.WithDefault(config.DefaultReproviderStrategy)
providerStrategy := cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy)
return fx.Options(
bcfgOpts,

View File

@ -1,5 +1,4 @@
//go:build linux || darwin
// +build linux darwin
package fd

View File

@ -11,13 +11,28 @@ import (
"github.com/ipfs/boxo/mfs"
pin "github.com/ipfs/boxo/pinning/pinner"
"github.com/ipfs/boxo/pinning/pinner/dspinner"
provider "github.com/ipfs/boxo/provider"
"github.com/ipfs/boxo/provider"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/repo"
irouting "github.com/ipfs/kubo/routing"
dht "github.com/libp2p/go-libp2p-kad-dht"
"github.com/libp2p/go-libp2p-kad-dht/amino"
"github.com/libp2p/go-libp2p-kad-dht/dual"
"github.com/libp2p/go-libp2p-kad-dht/fullrt"
dht_pb "github.com/libp2p/go-libp2p-kad-dht/pb"
dhtprovider "github.com/libp2p/go-libp2p-kad-dht/provider"
"github.com/libp2p/go-libp2p-kad-dht/provider/buffered"
ddhtprovider "github.com/libp2p/go-libp2p-kad-dht/provider/dual"
"github.com/libp2p/go-libp2p-kad-dht/provider/keystore"
routinghelpers "github.com/libp2p/go-libp2p-routing-helpers"
"github.com/libp2p/go-libp2p/core/host"
peer "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/routing"
ma "github.com/multiformats/go-multiaddr"
mh "github.com/multiformats/go-multihash"
"go.uber.org/fx"
)
@ -29,154 +44,523 @@ const sampledBatchSize = 1000
// Datastore key used to store previous reprovide strategy.
const reprovideStrategyKey = "/reprovideStrategy"
func ProviderSys(reprovideInterval time.Duration, acceleratedDHTClient bool, provideWorkerCount int) fx.Option {
return fx.Provide(func(lc fx.Lifecycle, cr irouting.ProvideManyRouter, repo repo.Repo) (provider.System, error) {
// Initialize provider.System first, before pinner/blockstore/etc.
// The KeyChanFunc will be set later via SetKeyProvider() once we have
// created the pinner, blockstore and other dependencies.
opts := []provider.Option{
provider.Online(cr),
provider.ReproviderInterval(reprovideInterval),
provider.ProvideWorkerCount(provideWorkerCount),
// DHTProvider is an interface for providing keys to a DHT swarm. It holds a
// state of keys to be advertised, and is responsible for periodically
// publishing provider records for these keys to the DHT swarm before the
// records expire.
type DHTProvider interface {
// StartProviding ensures keys are periodically advertised to the DHT swarm.
//
// If the `keys` aren't currently being reprovided, they are added to the
// queue to be provided to the DHT swarm as soon as possible, and scheduled
// to be reprovided periodically. If `force` is set to true, all keys are
// provided to the DHT swarm, regardless of whether they were already being
// reprovided in the past. `keys` keep being reprovided until `StopProviding`
// is called.
//
// This operation is asynchronous, it returns as soon as the `keys` are added
// to the provide queue, and provides happens asynchronously.
//
// Returns an error if the keys couldn't be added to the provide queue. This
// can happen if the provider is closed or if the node is currently Offline
// (either never bootstrapped, or disconnected since more than `OfflineDelay`).
// The schedule and provide queue depend on the network size, hence recent
// network connectivity is essential.
StartProviding(force bool, keys ...mh.Multihash) error
// ProvideOnce sends provider records for the specified keys to the DHT swarm
// only once. It does not automatically reprovide those keys afterward.
//
// Add the supplied multihashes to the provide queue, and return immediately.
// The provide operation happens asynchronously.
//
// Returns an error if the keys couldn't be added to the provide queue. This
// can happen if the provider is closed or if the node is currently Offline
// (either never bootstrapped, or disconnected since more than `OfflineDelay`).
// The schedule and provide queue depend on the network size, hence recent
// network connectivity is essential.
ProvideOnce(keys ...mh.Multihash) error
// Clear clears the all the keys from the provide queue and returns the number
// of keys that were cleared.
//
// The keys are not deleted from the keystore, so they will continue to be
// reprovided as scheduled.
Clear() int
// RefreshSchedule scans the Keystore for any keys that are not currently
// scheduled for reproviding. If such keys are found, it schedules their
// associated keyspace region to be reprovided.
//
// This function doesn't remove prefixes that have no keys from the schedule.
// This is done automatically during the reprovide operation if a region has no
// keys.
//
// Returns an error if the provider is closed or if the node is currently
// Offline (either never bootstrapped, or disconnected since more than
// `OfflineDelay`). The schedule depends on the network size, hence recent
// network connectivity is essential.
RefreshSchedule() error
}
var (
_ DHTProvider = &ddhtprovider.SweepingProvider{}
_ DHTProvider = &dhtprovider.SweepingProvider{}
_ DHTProvider = &NoopProvider{}
_ DHTProvider = &LegacyProvider{}
)
// NoopProvider is a no-operation provider implementation that does nothing.
// It is used when providing is disabled or when no DHT is available.
// All methods return successfully without performing any actual operations.
type NoopProvider struct{}
func (r *NoopProvider) StartProviding(bool, ...mh.Multihash) error { return nil }
func (r *NoopProvider) ProvideOnce(...mh.Multihash) error { return nil }
func (r *NoopProvider) Clear() int { return 0 }
func (r *NoopProvider) RefreshSchedule() error { return nil }
// LegacyProvider is a wrapper around the boxo/provider.System that implements
// the DHTProvider interface. This provider manages reprovides using a burst
// strategy where it sequentially reprovides all keys at once during each
// reprovide interval, rather than spreading the load over time.
//
// This is the legacy provider implementation that can cause resource spikes
// during reprovide operations. For more efficient providing, consider using
// the SweepingProvider which spreads the load over the reprovide interval.
type LegacyProvider struct {
provider.System
}
func (r *LegacyProvider) StartProviding(force bool, keys ...mh.Multihash) error {
return r.ProvideOnce(keys...)
}
func (r *LegacyProvider) ProvideOnce(keys ...mh.Multihash) error {
if many, ok := r.System.(routinghelpers.ProvideManyRouter); ok {
return many.ProvideMany(context.Background(), keys)
}
for _, k := range keys {
if err := r.Provide(context.Background(), cid.NewCidV1(cid.Raw, k), true); err != nil {
return err
}
if !acceleratedDHTClient && reprovideInterval > 0 {
// The estimation kinda suck if you are running with accelerated DHT client,
// given this message is just trying to push people to use the acceleratedDHTClient
// let's not report on through if it's in use
opts = append(opts,
provider.ThroughputReport(func(reprovide bool, complete bool, keysProvided uint, duration time.Duration) bool {
avgProvideSpeed := duration / time.Duration(keysProvided)
count := uint64(keysProvided)
}
return nil
}
if !reprovide || !complete {
// We don't know how many CIDs we have to provide, try to fetch it from the blockstore.
// But don't try for too long as this might be very expensive if you have a huge datastore.
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5)
defer cancel()
func (r *LegacyProvider) Clear() int {
return r.System.Clear()
}
// FIXME: I want a running counter of blocks so size of blockstore can be an O(1) lookup.
// Note: talk to datastore directly, as to not depend on Blockstore here.
qr, err := repo.Datastore().Query(ctx, query.Query{
Prefix: blockstore.BlockPrefix.String(),
KeysOnly: true})
if err != nil {
logger.Errorf("fetching AllKeysChain in provider ThroughputReport: %v", err)
return false
}
defer qr.Close()
count = 0
countLoop:
for {
select {
case _, ok := <-qr.Next():
if !ok {
break countLoop
}
count++
case <-ctx.Done():
// really big blockstore mode
func (r *LegacyProvider) RefreshSchedule() error { return nil }
// how many blocks would be in a 10TiB blockstore with 128KiB blocks.
const probableBigBlockstore = (10 * 1024 * 1024 * 1024 * 1024) / (128 * 1024)
// How long per block that lasts us.
expectedProvideSpeed := reprovideInterval / probableBigBlockstore
if avgProvideSpeed > expectedProvideSpeed {
logger.Errorf(`
🔔🔔🔔 YOU MAY BE FALLING BEHIND DHT REPROVIDES! 🔔🔔🔔
// LegacyProviderOpt creates a LegacyProvider to be used as provider in the
// IpfsNode
func LegacyProviderOpt(reprovideInterval time.Duration, strategy string, acceleratedDHTClient bool, provideWorkerCount int) fx.Option {
system := fx.Provide(
fx.Annotate(func(lc fx.Lifecycle, cr irouting.ProvideManyRouter, repo repo.Repo) (*LegacyProvider, error) {
// Initialize provider.System first, before pinner/blockstore/etc.
// The KeyChanFunc will be set later via SetKeyProvider() once we have
// created the pinner, blockstore and other dependencies.
opts := []provider.Option{
provider.Online(cr),
provider.ReproviderInterval(reprovideInterval),
provider.ProvideWorkerCount(provideWorkerCount),
}
if !acceleratedDHTClient && reprovideInterval > 0 {
// The estimation kinda suck if you are running with accelerated DHT client,
// given this message is just trying to push people to use the acceleratedDHTClient
// let's not report on through if it's in use
opts = append(opts,
provider.ThroughputReport(func(reprovide bool, complete bool, keysProvided uint, duration time.Duration) bool {
avgProvideSpeed := duration / time.Duration(keysProvided)
count := uint64(keysProvided)
Your system might be struggling to keep up with DHT reprovides!
This means your content could be partially or completely inaccessible on the network.
We observed that you recently provided %d keys at an average rate of %v per key.
if !reprovide || !complete {
// We don't know how many CIDs we have to provide, try to fetch it from the blockstore.
// But don't try for too long as this might be very expensive if you have a huge datastore.
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5)
defer cancel()
🕑 An attempt to estimate your blockstore size timed out after 5 minutes,
implying your blockstore might be exceedingly large. Assuming a considerable
size of 10TiB, it would take %v to provide the complete set.
// FIXME: I want a running counter of blocks so size of blockstore can be an O(1) lookup.
// Note: talk to datastore directly, as to not depend on Blockstore here.
qr, err := repo.Datastore().Query(ctx, query.Query{
Prefix: blockstore.BlockPrefix.String(),
KeysOnly: true,
})
if err != nil {
logger.Errorf("fetching AllKeysChain in provider ThroughputReport: %v", err)
return false
}
defer qr.Close()
count = 0
countLoop:
for {
select {
case _, ok := <-qr.Next():
if !ok {
break countLoop
}
count++
case <-ctx.Done():
// really big blockstore mode
The total provide time needs to stay under your reprovide interval (%v) to prevent falling behind!
// how many blocks would be in a 10TiB blockstore with 128KiB blocks.
const probableBigBlockstore = (10 * 1024 * 1024 * 1024 * 1024) / (128 * 1024)
// How long per block that lasts us.
expectedProvideSpeed := reprovideInterval / probableBigBlockstore
if avgProvideSpeed > expectedProvideSpeed {
logger.Errorf(`
🔔🔔🔔 Reprovide Operations Too Slow 🔔🔔🔔
💡 Consider enabling the Accelerated DHT to enhance your system performance. See:
https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtclient`,
keysProvided, avgProvideSpeed, avgProvideSpeed*probableBigBlockstore, reprovideInterval)
return false
Your node may be falling behind on DHT reprovides, which could affect content availability.
Observed: %d keys at %v per key
Estimated: Assuming 10TiB blockstore, would take %v to complete
Must finish within %v (Provide.DHT.Interval)
Solutions (try in order):
1. Enable Provide.DHT.SweepEnabled=true (recommended)
2. Increase Provide.DHT.MaxWorkers if needed
3. Enable Routing.AcceleratedDHTClient=true (last resort, resource intensive)
Learn more: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide`,
keysProvided, avgProvideSpeed, avgProvideSpeed*probableBigBlockstore, reprovideInterval)
return false
}
}
}
}
}
// How long per block that lasts us.
expectedProvideSpeed := reprovideInterval
if count > 0 {
expectedProvideSpeed = reprovideInterval / time.Duration(count)
}
// How long per block that lasts us.
expectedProvideSpeed := reprovideInterval
if count > 0 {
expectedProvideSpeed = reprovideInterval / time.Duration(count)
}
if avgProvideSpeed > expectedProvideSpeed {
logger.Errorf(`
🔔🔔🔔 YOU ARE FALLING BEHIND DHT REPROVIDES! 🔔🔔🔔
if avgProvideSpeed > expectedProvideSpeed {
logger.Errorf(`
🔔🔔🔔 Reprovide Operations Too Slow 🔔🔔🔔
Your system is struggling to keep up with DHT reprovides!
This means your content could be partially or completely inaccessible on the network.
We observed that you recently provided %d keys at an average rate of %v per key.
Your node is falling behind on DHT reprovides, which will affect content availability.
💾 Your total CID count is ~%d which would total at %v reprovide process.
Observed: %d keys at %v per key
Confirmed: ~%d total CIDs requiring %v to complete
Must finish within %v (Provide.DHT.Interval)
The total provide time needs to stay under your reprovide interval (%v) to prevent falling behind!
Solutions (try in order):
1. Enable Provide.DHT.SweepEnabled=true (recommended)
2. Increase Provide.DHT.MaxWorkers if needed
3. Enable Routing.AcceleratedDHTClient=true (last resort, resource intensive)
💡 Consider enabling the Accelerated DHT to enhance your reprovide throughput. See:
https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtclient`,
keysProvided, avgProvideSpeed, count, avgProvideSpeed*time.Duration(count), reprovideInterval)
}
return false
}, sampledBatchSize))
Learn more: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide`,
keysProvided, avgProvideSpeed, count, avgProvideSpeed*time.Duration(count), reprovideInterval)
}
return false
}, sampledBatchSize))
}
sys, err := provider.New(repo.Datastore(), opts...)
if err != nil {
return nil, err
}
lc.Append(fx.Hook{
OnStop: func(ctx context.Context) error {
return sys.Close()
},
})
prov := &LegacyProvider{sys}
handleStrategyChange(strategy, prov, repo.Datastore())
return prov, nil
},
fx.As(new(provider.System)),
fx.As(new(DHTProvider)),
),
)
setKeyProvider := fx.Invoke(func(lc fx.Lifecycle, system provider.System, keyProvider provider.KeyChanFunc) {
lc.Append(fx.Hook{
OnStart: func(ctx context.Context) error {
// SetKeyProvider breaks the circular dependency between provider, blockstore, and pinner.
// We cannot create the blockstore without the provider (it needs to provide blocks),
// and we cannot determine the reproviding strategy without the pinner/blockstore.
// This deferred initialization allows us to create provider.System first,
// then set the actual key provider function after all dependencies are ready.
system.SetKeyProvider(keyProvider)
return nil
},
})
})
return fx.Options(
system,
setKeyProvider,
)
}
type dhtImpl interface {
routing.Routing
GetClosestPeers(context.Context, string) ([]peer.ID, error)
Host() host.Host
MessageSender() dht_pb.MessageSender
}
type addrsFilter interface {
FilteredAddrs() []ma.Multiaddr
}
func SweepingProviderOpt(cfg *config.Config) fx.Option {
reprovideInterval := cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval)
type providerInput struct {
fx.In
DHT routing.Routing `name:"dhtc"`
Repo repo.Repo
}
sweepingReprovider := fx.Provide(func(in providerInput) (DHTProvider, *keystore.ResettableKeystore, error) {
ds := in.Repo.Datastore()
ks, err := keystore.NewResettableKeystore(ds,
keystore.WithPrefixBits(16),
keystore.WithDatastorePath("/provider/keystore"),
keystore.WithBatchSize(int(cfg.Provide.DHT.KeystoreBatchSize.WithDefault(config.DefaultProvideDHTKeystoreBatchSize))),
)
if err != nil {
return nil, nil, err
}
// Constants for buffered provider configuration
// These values match the upstream defaults from go-libp2p-kad-dht and have been battle-tested
const (
// bufferedDsName is the datastore namespace used by the buffered provider.
// The dsqueue persists operations here to handle large data additions without
// being memory-bound, allowing operations on hardware with limited RAM and
// enabling core operations to return instantly while processing happens async.
bufferedDsName = "bprov"
// bufferedBatchSize controls how many operations are dequeued and processed
// together from the datastore queue. The worker processes up to this many
// operations at once, grouping them by type for efficiency.
bufferedBatchSize = 1 << 10 // 1024 items
// bufferedIdleWriteTime is an implementation detail of go-dsqueue that controls
// how long the datastore buffer waits for new multihashes to arrive before
// flushing in-memory items to the datastore. This does NOT affect providing speed -
// provides happen as fast as possible via a dedicated worker that continuously
// processes the queue regardless of this timing.
bufferedIdleWriteTime = time.Minute
)
bufferedProviderOpts := []buffered.Option{
buffered.WithBatchSize(bufferedBatchSize),
buffered.WithDsName(bufferedDsName),
buffered.WithIdleWriteTime(bufferedIdleWriteTime),
}
var impl dhtImpl
switch inDht := in.DHT.(type) {
case *dht.IpfsDHT:
if inDht != nil {
impl = inDht
}
case *dual.DHT:
if inDht != nil {
prov, err := ddhtprovider.New(inDht,
ddhtprovider.WithKeystore(ks),
ddhtprovider.WithReprovideInterval(reprovideInterval),
ddhtprovider.WithMaxReprovideDelay(time.Hour),
ddhtprovider.WithOfflineDelay(cfg.Provide.DHT.OfflineDelay.WithDefault(config.DefaultProvideDHTOfflineDelay)),
ddhtprovider.WithConnectivityCheckOnlineInterval(1*time.Minute),
ddhtprovider.WithMaxWorkers(int(cfg.Provide.DHT.MaxWorkers.WithDefault(config.DefaultProvideDHTMaxWorkers))),
ddhtprovider.WithDedicatedPeriodicWorkers(int(cfg.Provide.DHT.DedicatedPeriodicWorkers.WithDefault(config.DefaultProvideDHTDedicatedPeriodicWorkers))),
ddhtprovider.WithDedicatedBurstWorkers(int(cfg.Provide.DHT.DedicatedBurstWorkers.WithDefault(config.DefaultProvideDHTDedicatedBurstWorkers))),
ddhtprovider.WithMaxProvideConnsPerWorker(int(cfg.Provide.DHT.MaxProvideConnsPerWorker.WithDefault(config.DefaultProvideDHTMaxProvideConnsPerWorker))),
)
if err != nil {
return nil, nil, err
}
return buffered.New(prov, ds, bufferedProviderOpts...), ks, nil
}
case *fullrt.FullRT:
if inDht != nil {
impl = inDht
}
}
if impl == nil {
return &NoopProvider{}, nil, nil
}
sys, err := provider.New(repo.Datastore(), opts...)
var selfAddrsFunc func() []ma.Multiaddr
if imlpFilter, ok := impl.(addrsFilter); ok {
selfAddrsFunc = imlpFilter.FilteredAddrs
} else {
selfAddrsFunc = func() []ma.Multiaddr { return impl.Host().Addrs() }
}
opts := []dhtprovider.Option{
dhtprovider.WithKeystore(ks),
dhtprovider.WithPeerID(impl.Host().ID()),
dhtprovider.WithRouter(impl),
dhtprovider.WithMessageSender(impl.MessageSender()),
dhtprovider.WithSelfAddrs(selfAddrsFunc),
dhtprovider.WithAddLocalRecord(func(h mh.Multihash) error {
return impl.Provide(context.Background(), cid.NewCidV1(cid.Raw, h), false)
}),
dhtprovider.WithReplicationFactor(amino.DefaultBucketSize),
dhtprovider.WithReprovideInterval(reprovideInterval),
dhtprovider.WithMaxReprovideDelay(time.Hour),
dhtprovider.WithOfflineDelay(cfg.Provide.DHT.OfflineDelay.WithDefault(config.DefaultProvideDHTOfflineDelay)),
dhtprovider.WithConnectivityCheckOnlineInterval(1 * time.Minute),
dhtprovider.WithMaxWorkers(int(cfg.Provide.DHT.MaxWorkers.WithDefault(config.DefaultProvideDHTMaxWorkers))),
dhtprovider.WithDedicatedPeriodicWorkers(int(cfg.Provide.DHT.DedicatedPeriodicWorkers.WithDefault(config.DefaultProvideDHTDedicatedPeriodicWorkers))),
dhtprovider.WithDedicatedBurstWorkers(int(cfg.Provide.DHT.DedicatedBurstWorkers.WithDefault(config.DefaultProvideDHTDedicatedBurstWorkers))),
dhtprovider.WithMaxProvideConnsPerWorker(int(cfg.Provide.DHT.MaxProvideConnsPerWorker.WithDefault(config.DefaultProvideDHTMaxProvideConnsPerWorker))),
}
prov, err := dhtprovider.New(opts...)
if err != nil {
return nil, err
return nil, nil, err
}
return buffered.New(prov, ds, bufferedProviderOpts...), ks, nil
})
type keystoreInput struct {
fx.In
Provider DHTProvider
Keystore *keystore.ResettableKeystore
KeyProvider provider.KeyChanFunc
}
initKeystore := fx.Invoke(func(lc fx.Lifecycle, in keystoreInput) {
// Skip keystore initialization for NoopProvider
if _, ok := in.Provider.(*NoopProvider); ok {
return
}
var (
cancel context.CancelFunc
done = make(chan struct{})
)
syncKeystore := func(ctx context.Context) error {
kcf, err := in.KeyProvider(ctx)
if err != nil {
return err
}
if err := in.Keystore.ResetCids(ctx, kcf); err != nil {
return err
}
if err := in.Provider.RefreshSchedule(); err != nil {
logger.Infow("refreshing provider schedule", "err", err)
}
return nil
}
lc.Append(fx.Hook{
OnStart: func(ctx context.Context) error {
// Set the KeyProvider as a garbage collection function for the
// keystore. Periodically purge the Keystore from all its keys and
// replace them with the keys that needs to be reprovided, coming from
// the KeyChanFunc. So far, this is the less worse way to remove CIDs
// that shouldn't be reprovided from the provider's state.
go func() {
// Sync the keystore once at startup. This operation is async since
// we need to walk the DAG of objects matching the provide strategy,
// which can take a while.
strategy := cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy)
logger.Infow("provider keystore sync started", "strategy", strategy)
if err := syncKeystore(ctx); err != nil {
logger.Errorw("provider keystore sync failed", "err", err, "strategy", strategy)
} else {
logger.Infow("provider keystore sync completed", "strategy", strategy)
}
}()
gcCtx, c := context.WithCancel(context.Background())
cancel = c
go func() { // garbage collection loop for cids to reprovide
defer close(done)
ticker := time.NewTicker(reprovideInterval)
defer ticker.Stop()
for {
select {
case <-gcCtx.Done():
return
case <-ticker.C:
if err := syncKeystore(gcCtx); err != nil {
logger.Errorw("provider keystore sync", "err", err)
}
}
}
}()
return nil
},
OnStop: func(ctx context.Context) error {
return sys.Close()
if cancel != nil {
cancel()
}
select {
case <-done:
case <-ctx.Done():
return ctx.Err()
}
// Keystore data isn't purged, on close, but it will be overwritten
// when the node starts again.
return in.Keystore.Close()
},
})
return sys, nil
})
return fx.Options(
sweepingReprovider,
initKeystore,
)
}
// ONLINE/OFFLINE
// OnlineProviders groups units managing provider routing records online
func OnlineProviders(provide bool, providerStrategy string, reprovideInterval time.Duration, acceleratedDHTClient bool, provideWorkerCount int) fx.Option {
// OnlineProviders groups units managing provide routing records online
func OnlineProviders(provide bool, cfg *config.Config) fx.Option {
if !provide {
return OfflineProviders()
}
strategyFlag := config.ParseReproviderStrategy(providerStrategy)
providerStrategy := cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy)
strategyFlag := config.ParseProvideStrategy(providerStrategy)
if strategyFlag == 0 {
return fx.Error(fmt.Errorf("unknown reprovider strategy %q", providerStrategy))
return fx.Error(fmt.Errorf("provider: unknown strategy %q", providerStrategy))
}
return fx.Options(
opts := []fx.Option{
fx.Provide(setReproviderKeyProvider(providerStrategy)),
ProviderSys(reprovideInterval, acceleratedDHTClient, provideWorkerCount),
)
}
if cfg.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled) {
opts = append(opts, SweepingProviderOpt(cfg))
} else {
reprovideInterval := cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval)
acceleratedDHTClient := cfg.Routing.AcceleratedDHTClient.WithDefault(config.DefaultAcceleratedDHTClient)
provideWorkerCount := int(cfg.Provide.DHT.MaxWorkers.WithDefault(config.DefaultProvideDHTMaxWorkers))
opts = append(opts, LegacyProviderOpt(reprovideInterval, providerStrategy, acceleratedDHTClient, provideWorkerCount))
}
return fx.Options(opts...)
}
// OfflineProviders groups units managing provider routing records offline
// OfflineProviders groups units managing provide routing records offline
func OfflineProviders() fx.Option {
return fx.Provide(provider.NewNoopProvider)
return fx.Provide(func() DHTProvider {
return &NoopProvider{}
})
}
func mfsProvider(mfsRoot *mfs.Root, fetcher fetcher.Factory) provider.KeyChanFunc {
return func(ctx context.Context) (<-chan cid.Cid, error) {
err := mfsRoot.FlushMemFree(ctx)
if err != nil {
return nil, fmt.Errorf("error flushing mfs, cannot provide MFS: %w", err)
return nil, fmt.Errorf("provider: error flushing MFS, cannot provide MFS: %w", err)
}
rootNode, err := mfsRoot.GetDirectory().GetNode()
if err != nil {
return nil, fmt.Errorf("error loading mfs root, cannot provide MFS: %w", err)
return nil, fmt.Errorf("provider: error loading MFS root, cannot provide MFS: %w", err)
}
kcf := provider.NewDAGProvider(rootNode.Cid(), fetcher)
@ -191,13 +575,12 @@ type provStrategyIn struct {
OfflineIPLDFetcher fetcher.Factory `name:"offlineIpldFetcher"`
OfflineUnixFSFetcher fetcher.Factory `name:"offlineUnixfsFetcher"`
MFSRoot *mfs.Root
Provider provider.System
Repo repo.Repo
}
type provStrategyOut struct {
fx.Out
ProvidingStrategy config.ReproviderStrategy
ProvidingStrategy config.ProvideStrategy
ProvidingKeyChanFunc provider.KeyChanFunc
}
@ -207,18 +590,18 @@ type provStrategyOut struct {
// - "pinned": All pinned content (roots + children)
// - "mfs": Only MFS content
// - "all": all blocks
func createKeyProvider(strategyFlag config.ReproviderStrategy, in provStrategyIn) provider.KeyChanFunc {
func createKeyProvider(strategyFlag config.ProvideStrategy, in provStrategyIn) provider.KeyChanFunc {
switch strategyFlag {
case config.ReproviderStrategyRoots:
case config.ProvideStrategyRoots:
return provider.NewBufferedProvider(dspinner.NewPinnedProvider(true, in.Pinner, in.OfflineIPLDFetcher))
case config.ReproviderStrategyPinned:
case config.ProvideStrategyPinned:
return provider.NewBufferedProvider(dspinner.NewPinnedProvider(false, in.Pinner, in.OfflineIPLDFetcher))
case config.ReproviderStrategyPinned | config.ReproviderStrategyMFS:
case config.ProvideStrategyPinned | config.ProvideStrategyMFS:
return provider.NewPrioritizedProvider(
provider.NewBufferedProvider(dspinner.NewPinnedProvider(false, in.Pinner, in.OfflineIPLDFetcher)),
mfsProvider(in.MFSRoot, in.OfflineUnixFSFetcher),
)
case config.ReproviderStrategyMFS:
case config.ProvideStrategyMFS:
return mfsProvider(in.MFSRoot, in.OfflineUnixFSFetcher)
default: // "all", "", "flat" (compat)
return in.Blockstore.AllKeysChan
@ -257,7 +640,7 @@ func persistStrategy(ctx context.Context, strategy string, ds datastore.Datastor
// Strategy change detection: when the reproviding strategy changes,
// we clear the provide queue to avoid unexpected behavior from mixing
// strategies. This ensures a clean transition between different providing modes.
func handleStrategyChange(strategy string, provider provider.System, ds datastore.Datastore) {
func handleStrategyChange(strategy string, provider DHTProvider, ds datastore.Datastore) {
ctx := context.Background()
previous, changed, err := detectStrategyChange(ctx, strategy, ds)
@ -270,7 +653,7 @@ func handleStrategyChange(strategy string, provider provider.System, ds datastor
return
}
logger.Infow("Reprovider.Strategy changed, clearing provide queue", "previous", previous, "current", strategy)
logger.Infow("Provide.Strategy changed, clearing provide queue", "previous", previous, "current", strategy)
provider.Clear()
if err := persistStrategy(ctx, strategy, ds); err != nil {
@ -279,22 +662,11 @@ func handleStrategyChange(strategy string, provider provider.System, ds datastor
}
func setReproviderKeyProvider(strategy string) func(in provStrategyIn) provStrategyOut {
strategyFlag := config.ParseReproviderStrategy(strategy)
strategyFlag := config.ParseProvideStrategy(strategy)
return func(in provStrategyIn) provStrategyOut {
// Create the appropriate key provider based on strategy
kcf := createKeyProvider(strategyFlag, in)
// SetKeyProvider breaks the circular dependency between provider, blockstore, and pinner.
// We cannot create the blockstore without the provider (it needs to provide blocks),
// and we cannot determine the reproviding strategy without the pinner/blockstore.
// This deferred initialization allows us to create provider.System first,
// then set the actual key provider function after all dependencies are ready.
in.Provider.SetKeyProvider(kcf)
// Handle strategy changes (detection, queue clearing, persistence)
handleStrategyChange(strategy, in.Provider, in.Repo.Datastore())
return provStrategyOut{
ProvidingStrategy: strategyFlag,
ProvidingKeyChanFunc: kcf,

View File

@ -2,7 +2,6 @@ package node
import (
blockstore "github.com/ipfs/boxo/blockstore"
provider "github.com/ipfs/boxo/provider"
"github.com/ipfs/go-datastore"
config "github.com/ipfs/kubo/config"
"go.uber.org/fx"
@ -33,9 +32,8 @@ func BaseBlockstoreCtor(
hashOnRead bool,
writeThrough bool,
providingStrategy string,
) func(mctx helpers.MetricsCtx, repo repo.Repo, prov provider.System, lc fx.Lifecycle) (bs BaseBlocks, err error) {
return func(mctx helpers.MetricsCtx, repo repo.Repo, prov provider.System, lc fx.Lifecycle) (bs BaseBlocks, err error) {
) func(mctx helpers.MetricsCtx, repo repo.Repo, prov DHTProvider, lc fx.Lifecycle) (bs BaseBlocks, err error) {
return func(mctx helpers.MetricsCtx, repo repo.Repo, prov DHTProvider, lc fx.Lifecycle) (bs BaseBlocks, err error) {
opts := []blockstore.Option{blockstore.WriteThrough(writeThrough)}
// Blockstore providing integration:
@ -43,8 +41,8 @@ func BaseBlockstoreCtor(
// Important: Provide calls from blockstore are intentionally BLOCKING.
// The Provider implementation (not the blockstore) should handle concurrency/queuing.
// This avoids spawning unbounded goroutines for concurrent block additions.
strategyFlag := config.ParseReproviderStrategy(providingStrategy)
if strategyFlag&config.ReproviderStrategyAll != 0 {
strategyFlag := config.ParseProvideStrategy(providingStrategy)
if strategyFlag&config.ProvideStrategyAll != 0 {
opts = append(opts, blockstore.Provider(prov))
}
@ -79,11 +77,11 @@ func GcBlockstoreCtor(bb BaseBlocks) (gclocker blockstore.GCLocker, gcbs blockst
}
// FilestoreBlockstoreCtor wraps GcBlockstore and adds Filestore support
func FilestoreBlockstoreCtor(repo repo.Repo, bb BaseBlocks) (gclocker blockstore.GCLocker, gcbs blockstore.GCBlockstore, bs blockstore.Blockstore, fstore *filestore.Filestore) {
func FilestoreBlockstoreCtor(repo repo.Repo, bb BaseBlocks, prov DHTProvider) (gclocker blockstore.GCLocker, gcbs blockstore.GCBlockstore, bs blockstore.Blockstore, fstore *filestore.Filestore) {
gclocker = blockstore.NewGCLocker()
// hash security
fstore = filestore.NewFilestore(bb, repo.FileManager())
fstore = filestore.NewFilestore(bb, repo.FileManager(), prov)
gcbs = blockstore.NewGCBlockstore(fstore, gclocker)
gcbs = &verifbs.VerifBSGC{GCBlockstore: gcbs}

View File

@ -1,5 +1,4 @@
//go:build testrunmain
// +build testrunmain
package main

View File

@ -1,136 +1,113 @@
<!-- Last updated during [v0.36.0 release](https://github.com/ipfs/kubo/issues/10816) -->
<!-- Last updated during [v0.37.0 release](https://github.com/ipfs/kubo/issues/10867) -->
# ✅ Release Checklist (vX.Y.Z[-rcN])
## Labels
**Release types:** RC (Release Candidate) | FINAL | PATCH
If an item should be executed only for a specific release type, it is labeled with:
## Prerequisites
- ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) execute **ONLY** when releasing a Release Candidate
- ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) execute **ONLY** when releasing a Final Release
- ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) do **NOT** execute when releasing a Patch Release
- [ ] [GPG signature](https://docs.github.com/en/authentication/managing-commit-signature-verification) configured in local git and GitHub
- [ ] [Docker](https://docs.docker.com/get-docker/) installed on your system
- [ ] [npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) installed on your system
- [ ] kubo repository cloned locally
- [ ] **non-PATCH:** Upgrade Go in CI to latest patch from <https://go.dev/dl/>
Otherwise, it means a step should be executed for **ALL** release types.
## 1. Prepare Release Branch
## Before the release
- [ ] Fetch latest changes: `git fetch origin master release`
- [ ] Create branch `release-vX.Y.Z` (base from: `master` if Z=0 for new minor/major, `release` if Z>0 for patch)
- [ ] **RC1 only:** Switch to `master` branch and prepare for next release cycle:
- [ ] Update [version.go](https://github.com/ipfs/kubo/blob/master/version.go) to `vX.Y+1.0-dev` (⚠️ double-check Y+1 is correct) ([example PR](https://github.com/ipfs/kubo/pull/9305))
- [ ] Create `./docs/changelogs/vX.Y+1.md` and add link in [CHANGELOG.md](https://github.com/ipfs/kubo/blob/master/CHANGELOG.md)
- [ ] Switch to `release-vX.Y.Z` branch and update [version.go](https://github.com/ipfs/kubo/blob/master/version.go) to `vX.Y.Z(-rcN)` (⚠️ double-check Y matches release) ([example](https://github.com/ipfs/kubo/pull/9394))
- [ ] Create draft PR: `release-vX.Y.Z``release` ([example](https://github.com/ipfs/kubo/pull/9306))
- [ ] In `release-vX.Y.Z` branch, cherry-pick commits from `master`: `git cherry-pick -x <commit>` ([example](https://github.com/ipfs/kubo/pull/10636/commits/033de22e3bc6191dbb024ad6472f5b96b34e3ccf))
- ⚠️ **NOTE:** `-x` flag records original commit SHA for traceability and ensures cleaner merges with deduplicated commits in history
- [ ] Verify all CI checks on the PR are passing
- [ ] **FINAL only:** In `release-vX.Y.Z` branch, replace `Changelog` and `Contributors` sections with `./bin/mkreleaselog` stdout (do **NOT** copy stderr)
- [ ] **FINAL only:** Merge PR (`release-vX.Y.Z` → `release`) using `Create a merge commit`
- ⚠️ do **NOT** use `Squash and merge` nor `Rebase and merge` because we need to be able to sign the merge commit
- ⚠️ do **NOT** delete the `release-vX.Y.Z` branch (needed for future patch releases and git history)
This section covers tasks to be done ahead of the release.
## 2. Tag & Publish
- [ ] Verify you have access to all the services and tools required for the release
- [ ] [GPG signature](https://docs.github.com/en/authentication/managing-commit-signature-verification) configured in local git and in GitHub
- [ ] [docker](https://docs.docker.com/get-docker/) installed on your system
- [ ] [npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) installed on your system
- [ ] [kubo](https://github.com/ipfs/kubo) checked out under `$(go env GOPATH)/src/github.com/ipfs/kubo`
- you can also symlink your clone to the expected location by running `mkdir -p $(go env GOPATH)/src/github.com/ipfs && ln -s $(pwd) $(go env GOPATH)/src/github.com/ipfs/kubo`
- ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) Upgrade Go used in CI to the latest patch release available at <https://go.dev/dl/>
### Create Tag
⚠️ **POINT OF NO RETURN:** Once pushed, tags trigger automatic Docker/NPM publishing that cannot be reversed!
If you're making a release for the first time, do pair programming and have the release reviewer verify all commands.
## The release
- [ ] **RC:** From `release-vX.Y.Z` branch: `git tag -s vX.Y.Z-rcN -m 'Prerelease X.Y.Z-rcN'`
- [ ] **FINAL:** After PR merge, from `release` branch: `git tag -s vX.Y.Z -m 'Release X.Y.Z'`
- [ ] ⚠️ Verify tag is signed and correct: `git show vX.Y.Z(-rcN)`
- [ ] Push tag: `git push origin vX.Y.Z(-rcN)`
- ⚠️ do **NOT** use `git push --tags` because it pushes all your local tags
- [ ] **STOP:** Wait for [Docker build](https://github.com/ipfs/kubo/actions/workflows/docker-image.yml) to complete before proceeding
This section covers tasks to be done during each release.
### Publish Artifacts
### 1. Prepare release branch
- [ ] **Docker:** Publish to [DockerHub](https://hub.docker.com/r/ipfs/kubo/tags)
- [ ] Wait for [Publish docker image](https://github.com/ipfs/kubo/actions/workflows/docker-image.yml) workflow triggered by tag push
- [ ] Verify image is available on [Docker Hub → tags](https://hub.docker.com/r/ipfs/kubo/tags)
- [ ] **dist.ipfs.tech:** Publish to [dist.ipfs.tech](https://dist.ipfs.tech)
- [ ] Check out [ipfs/distributions](https://github.com/ipfs/distributions)
- [ ] Create branch: `git checkout -b release-kubo-X.Y.Z(-rcN)`
- [ ] Verify `.tool-versions` golang matches [Kubo's CI](https://github.com/ipfs/kubo/blob/master/.github/workflows/gotest.yml) `go-version:` (update if needed)
- [ ] Run: `./dist.sh add-version kubo vX.Y.Z(-rcN)` ([usage](https://github.com/ipfs/distributions#usage))
- [ ] Create and merge PR (updates `dists/kubo/versions`, **FINAL** also updates `dists/kubo/current` - [example](https://github.com/ipfs/distributions/pull/1125))
- [ ] Wait for [CI workflow](https://github.com/ipfs/distributions/actions/workflows/main.yml) triggered by merge
- [ ] Verify release on [dist.ipfs.tech](https://dist.ipfs.tech/#kubo)
- [ ] **NPM:** Publish to [NPM](https://www.npmjs.com/package/kubo?activeTab=versions)
- [ ] Manually dispatch [Release to npm](https://github.com/ipfs/npm-kubo/actions/workflows/main.yml) workflow if not auto-triggered
- [ ] Verify release on [NPM](https://www.npmjs.com/package/kubo?activeTab=versions)
- [ ] **GitHub Release:** Publish to [GitHub](https://github.com/ipfs/kubo/releases)
- [ ] [Create release](https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository#creating-a-release) ([RC example](https://github.com/ipfs/kubo/releases/tag/v0.36.0-rc1), [FINAL example](https://github.com/ipfs/kubo/releases/tag/v0.35.0))
- [ ] Use tag `vX.Y.Z(-rcN)`
- [ ] Link to release issue
- [ ] **RC:** Link to changelog, check `This is a pre-release`
- [ ] **FINAL:** Copy changelog content (without header), do **NOT** check pre-release
- [ ] Run [sync-release-assets](https://github.com/ipfs/kubo/actions/workflows/sync-release-assets.yml) workflow
- [ ] Verify assets are attached to the GitHub release
- [ ] Prepare the release branch and update version numbers accordingly
- [ ] create a new branch `release-vX.Y.Z`
- use `master` as base if `Z == 0`
- use `release` as base if `Z > 0`
- [ ] ![](https://img.shields.io/badge/only-RC1-blue?style=flat-square) update the `CurrentVersionNumber` in [version.go](version.go) in the `master` branch to `vX.Y+1.0-dev` ([example](https://github.com/ipfs/kubo/pull/9305))
- [ ] update the `CurrentVersionNumber` in [version.go](version.go) in the `release-vX.Y.Z` branch to `vX.Y.Z(-rcN)` ([example](https://github.com/ipfs/kubo/pull/9394))
- [ ] create a draft PR from `release-vX.Y.Z` to `release` ([example](https://github.com/ipfs/kubo/pull/9306))
- [ ] Cherry-pick commits from `master` to the `release-vX.Y.Z` using `git cherry-pick -x <commit>` ([example](https://github.com/ipfs/kubo/pull/10636/commits/033de22e3bc6191dbb024ad6472f5b96b34e3ccf))
- **NOTE:** cherry-picking with `-x` is important
- [ ] verify all CI checks on the PR from `release-vX.Y.Z` to `release` are passing
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) Replace the `Changelog` and `Contributors` sections of the [changelog](docs/changelogs/vX.Y.md) with the stdout (do **NOT** copy the stderr) of `./bin/mkreleaselog`.
- **NOTE:** `mkreleaselog` expects your `$GOPATH/src/github.com/ipfs/kubo` to include latest commits from `release-vX.Y.Z`
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) Merge the PR from `release-vX.Y.Z` to `release` using the `Create a merge commit`
- do **NOT** use `Squash and merge` nor `Rebase and merge` because we need to be able to sign the merge commit
- do **NOT** delete the `release-vX.Y.Z` branch
## 3. Post-Release
### 2. Tag release
### Technical Tasks
- [ ] Create the release tag
- ⚠️ **NOTE:** This is a dangerous operation! Go and Docker publishing are difficult to reverse! Have the release reviewer verify all the commands marked with !
- [ ] ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) tag the HEAD commit using `git tag -s vX.Y.Z(-rcN) -m 'Prerelease X.Y.Z(-rcN)'`
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) tag the HEAD commit of the `release` branch using `git tag -s vX.Y.Z -m 'Release X.Y.Z'`
- [ ] ⚠️ verify the tag is signed and tied to the correct commit using `git show vX.Y.Z(-rcN)`
- [ ] push the tag to GitHub using `git push origin vX.Y.Z(-rcN)`
- ⚠️ do **NOT** use `git push --tags` because it pushes all your local tags
### 3. Publish
- [ ] Publish Docker image to [DockerHub](https://hub.docker.com/r/ipfs/kubo/tags)
- [ ] Wait for [Publish docker image](https://github.com/ipfs/kubo/actions/workflows/docker-image.yml) workflow run initiated by the tag push to finish
- [ ] verify the image is available on [Docker Hub → tags](https://hub.docker.com/r/ipfs/kubo/tags)
- [ ] Publish the release to [dist.ipfs.tech](https://dist.ipfs.tech)
- [ ] check out [ipfs/distributions](https://github.com/ipfs/distributions)
- [ ] create new branch: run `git checkout -b release-kubo-X.Y.Z(-rcN)`
- [ ] Verify [ipfs/distributions](https://github.com/ipfs/distributions)'s `.tool-versions`'s `golang` entry is set to the [latest go release](https://go.dev/doc/devel/release) on the major go branch [Kubo is being tested on](https://github.com/ipfs/kubo/blob/master/.github/workflows/gotest.yml) (see `go-version:`). If not, update `.tool-versions` to match the latest golang.
- [ ] run `./dist.sh add-version kubo vX.Y.Z(-rcN)` to add the new version to the `versions` file ([usage](https://github.com/ipfs/distributions#usage))
- [ ] create and merge the PR which updates `dists/kubo/versions` (**NOTE:** ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) will also have `dists/kubo/current` [example](https://github.com/ipfs/distributions/pull/1125))
- [ ] wait for the [CI](https://github.com/ipfs/distributions/actions/workflows/main.yml) workflow run initiated by the merge to master to finish
- [ ] verify the release is available on [dist.ipfs.tech](https://dist.ipfs.tech/#kubo)
- [ ] Publish the release to [NPM](https://www.npmjs.com/package/kubo?activeTab=versions)
- [ ] manually dispatch the [Release to npm](https://github.com/ipfs/npm-kubo/actions/workflows/main.yml) workflow if it was not executed already and verify it discovered the new release
- [ ] verify the release is available on [NPM](https://www.npmjs.com/package/kubo?activeTab=versions)
- [ ] Publish the release to [GitHub kubo/releases](https://github.com/ipfs/kubo/releases)
- [ ] [create](https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository#creating-a-release) a new release
- [RC example](https://github.com/ipfs/kubo/releases/tag/v0.36.0-rc1)
- [FINAL example](https://github.com/ipfs/kubo/releases/tag/v0.35.0)
- [ ] use the `vX.Y.Z(-rcN)` tag
- [ ] link to the release issue
- [ ] ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) link to the changelog in the description
- [ ] ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) check the `This is a pre-release` checkbox
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) copy the changelog (without the header) in the description
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) do **NOT** check the `This is a pre-release` checkbox
- [ ] run the [sync-release-assets](https://github.com/ipfs/kubo/actions/workflows/sync-release-assets.yml) workflow and verify the release assets are attached to the GitHub release
### 4. After Publishing
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) Merge the [release](https://github.com/ipfs/kubo/tree/release) branch back into [master](https://github.com/ipfs/kubo/tree/master)
- [ ] Create a new branch `merge-release-vX.Y.Z` from `release`
- [ ] Create the next [`./docs/changelogs/vA.B.md`](https://github.com/ipfs/kubo/blob/master/docs/changelogs/) and link to the new changelog from the [`./CHANGELOG.md`](https://github.com/ipfs/kubo/blob/master/CHANGELOG.md) file
- [ ] Create and merge a PR from `merge-release-vX.Y.Z` to `master`
- ⚠️ do **NOT** use `Squash and merge` nor `Rebase and merge` because we need to be able to sign the merge commit
- ⚠️ **NOTE:** make sure to ignore the changes to [version.go](version.go) (keep the `-dev` in `master`)
- [ ] **FINAL only:** Merge `release``master`
- [ ] Create branch `merge-release-vX.Y.Z` from `release`
- [ ] Merge `master` to `merge-release-vX.Y.Z` first, and resolve conflict in `version.go`
- ⚠️ **NOTE:** make sure to ignore the changes to [version.go](https://github.com/ipfs/kubo/blob/master/version.go) (keep the `-dev` in `master`)
- [ ] Create and merge PR from `merge-release-vX.Y.Z` to `master` using `Create a merge commit`
- ⚠️ do **NOT** use `Squash and merge` nor `Rebase and merge` because we want to preserve original commit history
- [ ] Update [ipshipyard/waterworks-infra](https://github.com/ipshipyard/waterworks-infra)
- [ ] Update Kubo staging environment, see the [Running Kubo tests on staging](https://www.notion.so/Running-Kubo-tests-on-staging-488578bb46154f9bad982e4205621af8) for details.
- [ ] ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) Test last release against the current RC
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) Test last release against the current one
- [ ] Update collab cluster boxes to the tagged release (final or RC)
- [ ] Update libp2p bootstrappers to the tagged release (final or RC)
- [ ] Promote the release
- [ ] create an [IPFS Discourse](https://discuss.ipfs.tech) topic ([prerelease example](https://discuss.ipfs.tech/t/kubo-v0-16-0-rc1-release-candidate-is-out/15248), [release example](https://discuss.ipfs.tech/t/kubo-v0-16-0-release-is-out/15249))
- [ ] use `Kubo vX.Y.Z(-rcN) is out!` as the title and `kubo` as tags
- [ ] repeat the title as a heading (`##`) in the description
- [ ] link to the GitHub Release, binaries on IPNS, docker pull command and release notes in the description
- [ ] pin the [IPFS Discourse](https://discuss.ipfs.tech) topic globally, you can make the topic a banner if there is no banner already
- [ ] verify the [IPFS Discourse](https://discuss.ipfs.tech) topic was copied to:
- [ ] [#ipfs-chatter](https://discord.com/channels/669268347736686612/669268347736686615) in IPFS Discord
- [ ] [#ipfs-chatter](https://filecoinproject.slack.com/archives/C018EJ8LWH1) in FIL Slack
- [ ] [#ipfs-chatter:ipfs.io](https://matrix.to/#/#ipfs-chatter:ipfs.io) in Matrix
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) Add the link to the [IPFS Discourse](https://discuss.ipfs.tech) topic to the [GitHub Release](https://github.com/ipfs/kubo/releases/tag/vX.Y.Z(-rcN)) description ([example](https://github.com/ipfs/kubo/releases/tag/v0.17.0))
- [ ] ![](https://img.shields.io/badge/only-RC-blue?style=flat-square) create an issue comment mentioning early testers on the release issue ([example](https://github.com/ipfs/kubo/issues/9319#issuecomment-1311002478))
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) create an issue comment linking to the release on the release issue ([example](https://github.com/ipfs/kubo/issues/9417#issuecomment-1400740975))
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) promote on bsky.app ([example](https://bsky.app/profile/ipshipyard.com/post/3lh2brzrwbs2c))
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) promote on x.com ([example](https://x.com/ipshipyard/status/1885346348808929609))
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) post the link to the [GitHub Release](https://github.com/ipfs/kubo/releases/tag/vX.Y.Z(-rcN)) to [Reddit](https://reddit.com/r/ipfs) ([example](https://www.reddit.com/r/ipfs/comments/9x0q0k/kubo_v0160_release_is_out/))
- [ ] Manually smoke-test the new version with [IPFS Companion Browser Extension](https://docs.ipfs.tech/install/ipfs-companion/)
- [ ] Update Kubo in [ipfs-desktop](https://github.com/ipfs/ipfs-desktop)
- [ ] create a PR which updates `kubo` version to the tagged version in `package.json` and `package-lock.json`
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) switch to final release and merge
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) Update Kubo docs at docs.ipfs.tech:
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) run the [update-on-new-ipfs-tag.yml](https://github.com/ipfs/ipfs-docs/actions/workflows/update-on-new-ipfs-tag.yml) workflow
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) merge the PR created by the [update-on-new-ipfs-tag.yml](https://github.com/ipfs/ipfs-docs/actions/workflows/update-on-new-ipfs-tag.yml) workflow run
</details>
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) Create a blog entry on [blog.ipfs.tech](https://blog.ipfs.tech)
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) create a PR which adds a release note for the new Kubo version ([example](https://github.com/ipfs/ipfs-blog/pull/529))
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) merge the PR
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) verify the blog entry was published
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) Create a dependency update PR
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) check out [ipfs/kubo](https://github.com/ipfs/kubo)
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) go over direct dependencies from `go.mod` in the root directory (NOTE: do not run `go get -u` as it will upgrade indirect dependencies which may cause problems)
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) run `make mod_tidy`
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) create a PR which updates `go.mod` and `go.sum`
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) add the PR to the next release milestone
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) ![](https://img.shields.io/badge/not-PATCH-orange?style=flat-square) Create the next release issue
- [ ] ![](https://img.shields.io/badge/only-FINAL-darkgreen?style=flat-square) Close the release issue
- [ ] Update Kubo staging environment ([Running Kubo tests on staging](https://www.notion.so/Running-Kubo-tests-on-staging-488578bb46154f9bad982e4205621af8))
- [ ] **RC:** Test last release against current RC
- [ ] **FINAL:** Test last release against current one
- [ ] Update collab cluster boxes to the tagged release
- [ ] Update libp2p bootstrappers to the tagged release
- [ ] Smoke test with [IPFS Companion Browser Extension](https://docs.ipfs.tech/install/ipfs-companion/)
- [ ] Update [ipfs-desktop](https://github.com/ipfs/ipfs-desktop)
- [ ] Create PR updating kubo version in `package.json` and `package-lock.json`
- [ ] **FINAL only:** Merge and create/request new release
- [ ] **FINAL only:** Update [docs.ipfs.tech](https://docs.ipfs.tech/): run [update-on-new-ipfs-tag.yml](https://github.com/ipfs/ipfs-docs/actions/workflows/update-on-new-ipfs-tag.yml) workflow and merge the PR
### Promotion
- [ ] Create [IPFS Discourse](https://discuss.ipfs.tech) topic ([RC example](https://discuss.ipfs.tech/t/kubo-v0-16-0-rc1-release-candidate-is-out/15248), [FINAL example](https://discuss.ipfs.tech/t/kubo-v0-37-0-is-out/19673))
- [ ] Title: `Kubo vX.Y.Z(-rcN) is out!`, tag: `kubo`
- [ ] Use title as heading (`##`) in description
- [ ] Include: GitHub release link, IPNS binaries, docker pull command, release notes
- [ ] Pin topic globally (make banner if no existing banner)
- [ ] Verify bot posted to [#ipfs-chatter](https://discord.com/channels/669268347736686612/669268347736686615) (Discord) or [#ipfs-chatter:ipfs.io](https://matrix.to/#/#ipfs-chatter:ipfs.io) (Matrix)
- [ ] **RC only:** Comment on release issue mentioning early testers ([example](https://github.com/ipfs/kubo/issues/9319#issuecomment-1311002478))
- [ ] **FINAL only:** Comment on release issue with link ([example](https://github.com/ipfs/kubo/issues/9417#issuecomment-1400740975))
- [ ] **FINAL only:** Create [blog.ipfs.tech](https://blog.ipfs.tech) entry ([example](https://github.com/ipfs/ipfs-blog/commit/32040d1e90279f21bad56b924fe4710bba5ba043))
- [ ] **FINAL non-PATCH:** (optional) Post on social media ([bsky](https://bsky.app/profile/ipshipyard.com/post/3ltxcsrbn5s2k), [x.com](https://x.com/ipshipyard/status/1944867893226635603), [Reddit](https://www.reddit.com/r/ipfs/comments/1lzy6ze/release_v0360_ipfskubo/))
### Final Steps
- [ ] **FINAL non-PATCH:** Create dependency update PR
- [ ] Review direct dependencies from root `go.mod` (⚠️ do **NOT** run `go get -u` as it will upgrade indirect dependencies which may cause problems)
- [ ] Run `make mod_tidy`
- [ ] Create PR with `go.mod` and `go.sum` updates
- [ ] Add PR to next release milestone
- [ ] **FINAL non-PATCH:** Create next release issue ([example](https://github.com/ipfs/kubo/issues/10816))
- [ ] **FINAL only:** Close release issue

View File

@ -10,20 +10,283 @@ This release was brought to you by the [Shipyard](https://ipshipyard.com/) team.
- [Overview](#overview)
- [🔦 Highlights](#-highlights)
- [🚀 Repository migration: simplified provide configuration](#-repository-migration-simplified-provide-configuration)
- [🧹 Experimental Sweeping DHT Provider](#-experimental-sweeping-dht-provider)
- [📊 Exposed DHT metrics](#-exposed-dht-metrics)
- [🚨 Improved gateway error pages with diagnostic tools](#-improved-gateway-error-pages-with-diagnostic-tools)
- [🎨 Updated WebUI](#-updated-webui)
- [📌 Pin name improvements](#-pin-name-improvements)
- [🛠️ Identity CID size enforcement and `ipfs files write` fixes](#-identity-cid-size-enforcement-and-ipfs-files-write-fixes)
- [📤 Provide Filestore and Urlstore blocks on write](#-provide-filestore-and-urlstore-blocks-on-write)
- [🚦 MFS operation limit for --flush=false](#-mfs-operation-limit-for---flush=false)
- [📦️ Important dependency updates](#-important-dependency-updates)
- [📝 Changelog](#-changelog)
- [👨‍👩‍👧‍👦 Contributors](#-contributors)
### Overview
Kubo 0.38.0 simplifies content announcement configuration, introduces an experimental sweeping DHT provider for efficient large-scale operations, and includes various performance improvements.
### 🔦 Highlights
#### 🚀 Repository migration: simplified provide configuration
This release migrates the repository from version 17 to version 18, simplifying how you configure content announcements.
The old `Provider` and `Reprovider` sections are now combined into a single [`Provide`](https://github.com/ipfs/kubo/blob/master/docs/config.md#provide) section. Your existing settings are automatically migrated - no manual changes needed.
**Migration happens automatically** when you run `ipfs daemon --migrate`. For manual migration: `ipfs repo migrate --to=18`.
Read more about the new system below.
#### 🧹 Experimental Sweeping DHT Provider
A new experimental DHT provider is available as an alternative to both the default provider and the resource-intensive [accelerated DHT client](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingaccelerateddhtclient). Enable it via [`Provide.DHT.SweepEnabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtsweepenabled).
**How it works:** Instead of providing keys one-by-one, the sweep provider systematically explores DHT keyspace regions in batches.
> <picture>
> <source media="(prefers-color-scheme: dark)" srcset="https://github.com/user-attachments/assets/f6e06b08-7fee-490c-a681-1bf440e16e27">
> <source media="(prefers-color-scheme: light)" srcset="https://github.com/user-attachments/assets/e1662d7c-f1be-4275-a9ed-f2752fcdcabe">
> <img alt="Reprovide Cycle Comparison" src="https://github.com/user-attachments/assets/e1662d7c-f1be-4275-a9ed-f2752fcdcabe">
> </picture>
>
> The diagram shows how sweep mode avoids the hourly traffic spikes of Accelerated DHT while maintaining similar effectiveness. By grouping CIDs into keyspace regions and processing them in batches, sweep mode reduces memory overhead and creates predictable network patterns.
**Benefits for large-scale operations:** Handles hundreds of thousands of CIDs with reduced memory and network connections, spreads operations evenly to eliminate resource spikes, maintains state across restarts through persistent keystore, and provides better metrics visibility.
**Monitoring and debugging:** Legacy mode (`SweepEnabled=false`) tracks `provider_reprovider_provide_count` and `provider_reprovider_reprovide_count`, while sweep mode (`SweepEnabled=true`) tracks `total_provide_count_total`. Enable debug logging with `GOLOG_LOG_LEVEL=error,provider=debug,dht/provider=debug` to see detailed logs from either system.
> [!NOTE]
> This feature is experimental and opt-in. In the future, it will become the default and replace the legacy system. Some commands like `ipfs stats provide` and `ipfs routing provide` are not yet available with sweep mode. Run `ipfs provide --help` for alternatives.
For configuration details, see [`Provide.DHT`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providedht). For metrics documentation, see [Provide metrics](https://github.com/ipfs/kubo/blob/master/docs/metrics.md#provide).
#### 📊 Exposed DHT metrics
Kubo now exposes DHT metrics from [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht/), including `total_provide_count_total` for sweep provider operations and RPC metrics prefixed with `rpc_inbound_` and `rpc_outbound_` for DHT message traffic. See [Kubo metrics documentation](https://github.com/ipfs/kubo/blob/master/docs/metrics.md) for details.
#### 🚨 Improved gateway error pages with diagnostic tools
Gateway error pages now provide more actionable information during content retrieval failures. When a 504 Gateway Timeout occurs, users see detailed retrieval state information including which phase failed and a sample of providers that were attempted:
> ![Improved gateway error page showing retrieval diagnostics](https://github.com/user-attachments/assets/18432c74-a5e0-4bbf-9815-7c780779dc98)
>
> - **[`Gateway.DiagnosticServiceURL`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewaydiagnosticserviceurl)** (default: `https://check.ipfs.network`): Configures the diagnostic service URL. When set, 504 errors show a "Check CID retrievability" button that links to this service with `?cid=<failed-cid>` for external diagnostics. Set to empty string to disable.
> - **Enhanced error details**: Timeout errors now display the retrieval phase where failure occurred (e.g., "connecting to providers", "fetching data") and up to 3 peer IDs that were attempted but couldn't deliver the content, making it easier to diagnose network or provider issues.
> - **Retry button on all error pages**: Every gateway error page now includes a retry button for quick page refresh without manual URL re-entry.
#### 🎨 Updated WebUI
The Web UI has been updated to [v4.9](https://github.com/ipfs/ipfs-webui/releases/tag/v4.9.0) with a new **Diagnostics** screen for troubleshooting and system monitoring. Access it at `http://127.0.0.1:5001/webui` when running your local IPFS node.
| Diagnostics: Logs | Files: Check Retrieval | Diagnostics: Retrieval Results |
|:---:|:---:|:---:|
| ![Diagnostics logs](https://github.com/user-attachments/assets/a1560fd2-6f4e-4e4f-9506-85ecb10f96e5) | ![Retrieval check interface](https://github.com/user-attachments/assets/6efa8bf1-705e-4256-8c66-282455daf789) | ![Retrieval check results](https://github.com/user-attachments/assets/970f2de3-94a3-4d48-b0a4-46832f73c2e9) |
| Debug issues in real-time by adjusting [log level](https://github.com/ipfs/kubo/blob/master/docs/environment-variables.md#golog_log_level) without restart (global or per-subsystem like bitswap) | Check if content is available to other peers directly from Files screen | Find out why content won't load or who is providing it to the network |
| Peers: Agent Versions | Files: Custom Sorting |
|:---:|:---:|
| ![Peers with Agent Version](https://github.com/user-attachments/assets/4bf95e72-193a-415d-9428-dd222795107a) | ![File sorting options](https://github.com/user-attachments/assets/fd7a1807-c487-4393-ab60-a16ae087e6cd) |
| Know what software peers run | Find files faster with new sorting |
Additional improvements include a close button in the file viewer, better error handling, and fixed navigation highlighting.
#### 📌 Pin name improvements
`ipfs pin ls <cid> --names` now correctly returns pin names for specific CIDs ([#10649](https://github.com/ipfs/kubo/issues/10649), [boxo#1035](https://github.com/ipfs/boxo/pull/1035)), RPC no longer incorrectly returns names from other pins ([#10966](https://github.com/ipfs/kubo/pull/10966)), and pin names are now limited to 255 bytes for better cross-platform compatibility ([#10981](https://github.com/ipfs/kubo/pull/10981)).
#### 🛠️ Identity CID size enforcement and `ipfs files write` fixes
**Identity CID size limits are now enforced**
This release enforces a maximum of 128 bytes for identity CIDs ([IPIP-512](https://github.com/ipfs/specs/pull/512)) - attempting to exceed this limit will return a clear error message.
Identity CIDs use [multihash `0x00`](https://github.com/multiformats/multicodec/blob/master/table.csv#L2) to embed data directly in the CID without hashing. This experimental optimization was designed for tiny data where a CID reference would be larger than the data itself, but without size limits it was easy to misuse and could turn into an anti-pattern that wastes resources and enables abuse.
- `ipfs add --inline-limit` and `--hash=identity` now enforce the 128-byte maximum (error when exceeded)
- `ipfs files write` prevents creation of oversized identity CIDs
**Multiple `ipfs files write` bugs have been fixed**
This release resolves several long-standing MFS issues: raw nodes now preserve their codec instead of being forced to dag-pb, append operations on raw nodes work correctly by converting to UnixFS when needed, and identity CIDs properly inherit the full CID prefix from parent directories.
#### 📤 Provide Filestore and Urlstore blocks on write
Improvements to the providing system in the last release (provide blocks according to the configured [Strategy](https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy)) left out [Filestore](https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#ipfs-filestore) and [Urlstore](https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#ipfs-urlstore) blocks when the "all" strategy was used. They would only be reprovided but not provided on write. This is now fixed, and both Filestore blocks (local file references) and Urlstore blocks (HTTP/HTTPS URL references) will be provided correctly shortly after initial add.
#### 🚦 MFS operation limit for --flush=false
The new [`Internal.MFSNoFlushLimit`](https://github.com/ipfs/kubo/blob/master/docs/config.md#internalmfsnoflushlimit) configuration option prevents unbounded memory growth when using `--flush=false` with `ipfs files` commands. After performing the configured number of operations without flushing (default: 256), further operations will fail with a clear error message instructing users to flush manually.
### 📦️ Important dependency updates
- update `boxo` to [v0.35.0](https://github.com/ipfs/boxo/releases/tag/v0.35.0)
- update `go-libp2p-kad-dht` to [v0.35.0](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.35.0)
- update `ipfs-webui` to [v4.9.1](https://github.com/ipfs/ipfs-webui/releases/tag/v4.9.1) (incl. [v4.9.0](https://github.com/ipfs/ipfs-webui/releases/tag/v4.9.0))
### 📝 Changelog
<details><summary>Full Changelog</summary>
- github.com/ipfs/kubo:
- chore: v0.38.0
- chore: bump go-libp2p-kad-dht to v0.35.0 (#11002) ([ipfs/kubo#11002](https://github.com/ipfs/kubo/pull/11002))
- docs: add sweeping provide worker count recommendation (#11001) ([ipfs/kubo#11001](https://github.com/ipfs/kubo/pull/11001))
- Upgrade to Boxo v0.35.0 (#10999) ([ipfs/kubo#10999](https://github.com/ipfs/kubo/pull/10999))
- chore: 0.38.0-rc2
- chore: update boxo and kad-dht dependencies (#10995) ([ipfs/kubo#10995](https://github.com/ipfs/kubo/pull/10995))
- fix: update webui to v4.9.1 (#10994) ([ipfs/kubo#10994](https://github.com/ipfs/kubo/pull/10994))
- fix: provider merge conflicts (#10989) ([ipfs/kubo#10989](https://github.com/ipfs/kubo/pull/10989))
- fix(mfs): add soft limit for `--flush=false` (#10985) ([ipfs/kubo#10985](https://github.com/ipfs/kubo/pull/10985))
- fix: provide Filestore nodes (#10990) ([ipfs/kubo#10990](https://github.com/ipfs/kubo/pull/10990))
- feat: limit pin names to 255 bytes (#10981) ([ipfs/kubo#10981](https://github.com/ipfs/kubo/pull/10981))
- fix: SweepingProvider slow start (#10980) ([ipfs/kubo#10980](https://github.com/ipfs/kubo/pull/10980))
- chore: release v0.38.0-rc1
- fix: SweepingProvider shouldn't error when missing DHT (#10975) ([ipfs/kubo#10975](https://github.com/ipfs/kubo/pull/10975))
- fix: allow custom http provide when libp2p node is offline (#10974) ([ipfs/kubo#10974](https://github.com/ipfs/kubo/pull/10974))
- docs(provide): validation and reprovide cycle visualization (#10977) ([ipfs/kubo#10977](https://github.com/ipfs/kubo/pull/10977))
- refactor(ci): optimize build workflows (#10973) ([ipfs/kubo#10973](https://github.com/ipfs/kubo/pull/10973))
- fix(cmds): cleanup unicode identify strings (#9465) ([ipfs/kubo#9465](https://github.com/ipfs/kubo/pull/9465))
- feat: ipfs-webui v4.9.0 with retrieval diagnostics (#10969) ([ipfs/kubo#10969](https://github.com/ipfs/kubo/pull/10969))
- fix(mfs): unbound cache growth with `flush=false` (#10971) ([ipfs/kubo#10971](https://github.com/ipfs/kubo/pull/10971))
- fix: `ipfs pin ls <cid> --names` (#10970) ([ipfs/kubo#10970](https://github.com/ipfs/kubo/pull/10970))
- refactor(config): migration 17-to-18 to unify Provider/Reprovider into Provide.DHT (#10951) ([ipfs/kubo#10951](https://github.com/ipfs/kubo/pull/10951))
- feat: opt-in new Sweep provide system (#10834) ([ipfs/kubo#10834](https://github.com/ipfs/kubo/pull/10834))
- rpc: retrieve pin names when Detailed option provided (#10966) ([ipfs/kubo#10966](https://github.com/ipfs/kubo/pull/10966))
- fix: enforce identity CID size limits (#10949) ([ipfs/kubo#10949](https://github.com/ipfs/kubo/pull/10949))
- docs: kubo logo sources (#10964) ([ipfs/kubo#10964](https://github.com/ipfs/kubo/pull/10964))
- feat(config): validate Import config at daemon startup (#10957) ([ipfs/kubo#10957](https://github.com/ipfs/kubo/pull/10957))
- fix(telemetry): improve vm/container detection (#10944) ([ipfs/kubo#10944](https://github.com/ipfs/kubo/pull/10944))
- feat(gateway): improved error page with retrieval state details (#10950) ([ipfs/kubo#10950](https://github.com/ipfs/kubo/pull/10950))
- close files opened during migration (#10956) ([ipfs/kubo#10956](https://github.com/ipfs/kubo/pull/10956))
- fix ctrl-c prompt during run migrations prompt (#10947) ([ipfs/kubo#10947](https://github.com/ipfs/kubo/pull/10947))
- repo: use config api to get node root path (#10934) ([ipfs/kubo#10934](https://github.com/ipfs/kubo/pull/10934))
- docs: simplify release process (#10870) ([ipfs/kubo#10870](https://github.com/ipfs/kubo/pull/10870))
- Merge release v0.37.0 ([ipfs/kubo#10943](https://github.com/ipfs/kubo/pull/10943))
- feat(ci): docker linting (#10927) ([ipfs/kubo#10927](https://github.com/ipfs/kubo/pull/10927))
- fix: disable telemetry in test profile (#10931) ([ipfs/kubo#10931](https://github.com/ipfs/kubo/pull/10931))
- fix: harness tests random panic (#10933) ([ipfs/kubo#10933](https://github.com/ipfs/kubo/pull/10933))
- chore: 0.38.0-dev
- github.com/ipfs/boxo (v0.34.0 -> v0.35.0):
- Release v0.35.0 ([ipfs/boxo#1046](https://github.com/ipfs/boxo/pull/1046))
- feat(gateway): add `MaxRangeRequestFileSize` protection (#1043) ([ipfs/boxo#1043](https://github.com/ipfs/boxo/pull/1043))
- revert: remove MFS auto-flush mechanism (#1041) ([ipfs/boxo#1041](https://github.com/ipfs/boxo/pull/1041))
- Filestore: add Provider option to provide filestore blocks. (#1042) ([ipfs/boxo#1042](https://github.com/ipfs/boxo/pull/1042))
- fix(pinner): restore indirect pin detection and add context cancellation (#1039) ([ipfs/boxo#1039](https://github.com/ipfs/boxo/pull/1039))
- fix(mfs): limit cache growth by default (#1037) ([ipfs/boxo#1037](https://github.com/ipfs/boxo/pull/1037))
- update dependencies (#1038) ([ipfs/boxo#1038](https://github.com/ipfs/boxo/pull/1038))
- feat(pinner): add `CheckIfPinnedWithType` for efficient checks with names (#1035) ([ipfs/boxo#1035](https://github.com/ipfs/boxo/pull/1035))
- fix(routing/http): don't cancel batch prematurely (#1036) ([ipfs/boxo#1036](https://github.com/ipfs/boxo/pull/1036))
- refactor: use the new Reprovide Sweep interface (#995) ([ipfs/boxo#995](https://github.com/ipfs/boxo/pull/995))
- Update go-dsqueue to latest (#1034) ([ipfs/boxo#1034](https://github.com/ipfs/boxo/pull/1034))
- feat(routing/http): return 200 for empty results per IPIP-513 (#1032) ([ipfs/boxo#1032](https://github.com/ipfs/boxo/pull/1032))
- replace provider queue with go-dsqueue (#1033) ([ipfs/boxo#1033](https://github.com/ipfs/boxo/pull/1033))
- refactor: use slices package to simplify slice manipulation (#1031) ([ipfs/boxo#1031](https://github.com/ipfs/boxo/pull/1031))
- bitswap/network: fix read/write data race in bitswap network test (#1030) ([ipfs/boxo#1030](https://github.com/ipfs/boxo/pull/1030))
- fix(verifcid): enforce size limit for identity CIDs (#1018) ([ipfs/boxo#1018](https://github.com/ipfs/boxo/pull/1018))
- docs: boxo logo source files (#1028) ([ipfs/boxo#1028](https://github.com/ipfs/boxo/pull/1028))
- feat(gateway): enhance 504 timeout errors with diagnostic UX (#1023) ([ipfs/boxo#1023](https://github.com/ipfs/boxo/pull/1023))
- Use `time.Duration` for rebroadcast delay (#1027) ([ipfs/boxo#1027](https://github.com/ipfs/boxo/pull/1027))
- refactor(bitswap/client/internal): close session with Close method instead of context (#1011) ([ipfs/boxo#1011](https://github.com/ipfs/boxo/pull/1011))
- fix: use %q for logging routing keys with binary data (#1025) ([ipfs/boxo#1025](https://github.com/ipfs/boxo/pull/1025))
- rename `retrieval.RetrievalState` to `retrieval.State` (#1026) ([ipfs/boxo#1026](https://github.com/ipfs/boxo/pull/1026))
- feat(gateway): add retrieval state tracking for timeout diagnostics (#1015) ([ipfs/boxo#1015](https://github.com/ipfs/boxo/pull/1015))
- Nonfunctional changes (#1017) ([ipfs/boxo#1017](https://github.com/ipfs/boxo/pull/1017))
- fix: flaky TestCancelOverridesPendingWants (#1016) ([ipfs/boxo#1016](https://github.com/ipfs/boxo/pull/1016))
- bitswap/client: GetBlocks cancels session when finished (#1007) ([ipfs/boxo#1007](https://github.com/ipfs/boxo/pull/1007))
- Remove unused context ([ipfs/boxo#1006](https://github.com/ipfs/boxo/pull/1006))
- github.com/ipfs/go-block-format (v0.2.2 -> v0.2.3):
- new version (#66) ([ipfs/go-block-format#66](https://github.com/ipfs/go-block-format/pull/66))
- Replace CI badge and add GoDoc link in README (#65) ([ipfs/go-block-format#65](https://github.com/ipfs/go-block-format/pull/65))
- github.com/ipfs/go-datastore (v0.8.3 -> v0.9.0):
- new version (#255) ([ipfs/go-datastore#255](https://github.com/ipfs/go-datastore/pull/255))
- feat(keytransform): support transaction feature (#239) ([ipfs/go-datastore#239](https://github.com/ipfs/go-datastore/pull/239))
- feat: context datastore (#238) ([ipfs/go-datastore#238](https://github.com/ipfs/go-datastore/pull/238))
- new version (#254) ([ipfs/go-datastore#254](https://github.com/ipfs/go-datastore/pull/254))
- fix comment (#253) ([ipfs/go-datastore#253](https://github.com/ipfs/go-datastore/pull/253))
- feat: query iterator (#244) ([ipfs/go-datastore#244](https://github.com/ipfs/go-datastore/pull/244))
- Update readme links (#246) ([ipfs/go-datastore#246](https://github.com/ipfs/go-datastore/pull/246))
- github.com/ipfs/go-ipld-format (v0.6.2 -> v0.6.3):
- new version (#100) ([ipfs/go-ipld-format#100](https://github.com/ipfs/go-ipld-format/pull/100))
- avoid unnecessary slice allocation (#99) ([ipfs/go-ipld-format#99](https://github.com/ipfs/go-ipld-format/pull/99))
- github.com/ipfs/go-unixfsnode (v1.10.1 -> v1.10.2):
- new version ([ipfs/go-unixfsnode#88](https://github.com/ipfs/go-unixfsnode/pull/88))
- github.com/ipld/go-car/v2 (v2.14.3 -> v2.15.0):
- v2.15.0 bump (#606) ([ipld/go-car#606](https://github.com/ipld/go-car/pull/606))
- feat: add NextReader to BlockReader (#603) ([ipld/go-car#603](https://github.com/ipld/go-car/pull/603))
- Remove `@masih` form CODEOWNERS ([ipld/go-car#605](https://github.com/ipld/go-car/pull/605))
- github.com/libp2p/go-libp2p-kad-dht (v0.34.0 -> v0.35.0):
- chore: release v0.35.0 (#1162) ([libp2p/go-libp2p-kad-dht#1162](https://github.com/libp2p/go-libp2p-kad-dht/pull/1162))
- refactor: adjust FIND_NODE response exceptions (#1158) ([libp2p/go-libp2p-kad-dht#1158](https://github.com/libp2p/go-libp2p-kad-dht/pull/1158))
- refactor: remove provider status command (#1157) ([libp2p/go-libp2p-kad-dht#1157](https://github.com/libp2p/go-libp2p-kad-dht/pull/1157))
- refactor(provider): closestPeerToPrefix coverage trie (#1156) ([libp2p/go-libp2p-kad-dht#1156](https://github.com/libp2p/go-libp2p-kad-dht/pull/1156))
- fix: don't empty mapdatastore keystore on close (#1155) ([libp2p/go-libp2p-kad-dht#1155](https://github.com/libp2p/go-libp2p-kad-dht/pull/1155))
- provider: default options (#1153) ([libp2p/go-libp2p-kad-dht#1153](https://github.com/libp2p/go-libp2p-kad-dht/pull/1153))
- fix(keystore): use new batch after commit (#1154) ([libp2p/go-libp2p-kad-dht#1154](https://github.com/libp2p/go-libp2p-kad-dht/pull/1154))
- provider: more minor fixes (#1152) ([libp2p/go-libp2p-kad-dht#1152](https://github.com/libp2p/go-libp2p-kad-dht/pull/1152))
- rename KeyStore -> Keystore (#1151) ([libp2p/go-libp2p-kad-dht#1151](https://github.com/libp2p/go-libp2p-kad-dht/pull/1151))
- provider: minor fixes (#1150) ([libp2p/go-libp2p-kad-dht#1150](https://github.com/libp2p/go-libp2p-kad-dht/pull/1150))
- buffered provider (#1149) ([libp2p/go-libp2p-kad-dht#1149](https://github.com/libp2p/go-libp2p-kad-dht/pull/1149))
- keystore: remove mutex (#1147) ([libp2p/go-libp2p-kad-dht#1147](https://github.com/libp2p/go-libp2p-kad-dht/pull/1147))
- provider: ResettableKeyStore (#1146) ([libp2p/go-libp2p-kad-dht#1146](https://github.com/libp2p/go-libp2p-kad-dht/pull/1146))
- keystore: revamp (#1142) ([libp2p/go-libp2p-kad-dht#1142](https://github.com/libp2p/go-libp2p-kad-dht/pull/1142))
- provider: use synctest for testing time (#1136) ([libp2p/go-libp2p-kad-dht#1136](https://github.com/libp2p/go-libp2p-kad-dht/pull/1136))
- provider: connectivity state machine (#1135) ([libp2p/go-libp2p-kad-dht#1135](https://github.com/libp2p/go-libp2p-kad-dht/pull/1135))
- provider: minor fixes (#1133) ([libp2p/go-libp2p-kad-dht#1133](https://github.com/libp2p/go-libp2p-kad-dht/pull/1133))
- dual: provider (#1132) ([libp2p/go-libp2p-kad-dht#1132](https://github.com/libp2p/go-libp2p-kad-dht/pull/1132))
- provider: refresh schedule (#1131) ([libp2p/go-libp2p-kad-dht#1131](https://github.com/libp2p/go-libp2p-kad-dht/pull/1131))
- provider: integration tests (#1127) ([libp2p/go-libp2p-kad-dht#1127](https://github.com/libp2p/go-libp2p-kad-dht/pull/1127))
- provider: daemon (#1126) ([libp2p/go-libp2p-kad-dht#1126](https://github.com/libp2p/go-libp2p-kad-dht/pull/1126))
- provide: handle reprovide (#1125) ([libp2p/go-libp2p-kad-dht#1125](https://github.com/libp2p/go-libp2p-kad-dht/pull/1125))
- provider: options (#1124) ([libp2p/go-libp2p-kad-dht#1124](https://github.com/libp2p/go-libp2p-kad-dht/pull/1124))
- provider: catchup pending work (#1123) ([libp2p/go-libp2p-kad-dht#1123](https://github.com/libp2p/go-libp2p-kad-dht/pull/1123))
- provider: batch reprovide (#1122) ([libp2p/go-libp2p-kad-dht#1122](https://github.com/libp2p/go-libp2p-kad-dht/pull/1122))
- provider: batch provide (#1121) ([libp2p/go-libp2p-kad-dht#1121](https://github.com/libp2p/go-libp2p-kad-dht/pull/1121))
- provider: swarm exploration (#1120) ([libp2p/go-libp2p-kad-dht#1120](https://github.com/libp2p/go-libp2p-kad-dht/pull/1120))
- provider: handleProvide (#1118) ([libp2p/go-libp2p-kad-dht#1118](https://github.com/libp2p/go-libp2p-kad-dht/pull/1118))
- provider: schedule (#1117) ([libp2p/go-libp2p-kad-dht#1117](https://github.com/libp2p/go-libp2p-kad-dht/pull/1117))
- provider: schedule prefix length (#1116) ([libp2p/go-libp2p-kad-dht#1116](https://github.com/libp2p/go-libp2p-kad-dht/pull/1116))
- provider: ProvideStatus interface (#1110) ([libp2p/go-libp2p-kad-dht#1110](https://github.com/libp2p/go-libp2p-kad-dht/pull/1110))
- provider: network operations (#1115) ([libp2p/go-libp2p-kad-dht#1115](https://github.com/libp2p/go-libp2p-kad-dht/pull/1115))
- provider: adding provide and reprovide queue (#1114) ([libp2p/go-libp2p-kad-dht#1114](https://github.com/libp2p/go-libp2p-kad-dht/pull/1114))
- provider: trie allocation helper (#1108) ([libp2p/go-libp2p-kad-dht#1108](https://github.com/libp2p/go-libp2p-kad-dht/pull/1108))
- add missing ShortestCoveredPrefix ([libp2p/go-libp2p-kad-dht@d0b110d](https://github.com/libp2p/go-libp2p-kad-dht/commit/d0b110d))
- provider: keyspace helpers ([libp2p/go-libp2p-kad-dht@af3ce09](https://github.com/libp2p/go-libp2p-kad-dht/commit/af3ce09))
- provider: helpers package rename (#1111) ([libp2p/go-libp2p-kad-dht#1111](https://github.com/libp2p/go-libp2p-kad-dht/pull/1111))
- provider: trie region helpers (#1109) ([libp2p/go-libp2p-kad-dht#1109](https://github.com/libp2p/go-libp2p-kad-dht/pull/1109))
- provider: PruneSubtrie helper (#1107) ([libp2p/go-libp2p-kad-dht#1107](https://github.com/libp2p/go-libp2p-kad-dht/pull/1107))
- provider: NextNonEmptyLeaf trie helper (#1106) ([libp2p/go-libp2p-kad-dht#1106](https://github.com/libp2p/go-libp2p-kad-dht/pull/1106))
- provider: find subtrie helper (#1105) ([libp2p/go-libp2p-kad-dht#1105](https://github.com/libp2p/go-libp2p-kad-dht/pull/1105))
- provider: helpers trie find prefix (#1104) ([libp2p/go-libp2p-kad-dht#1104](https://github.com/libp2p/go-libp2p-kad-dht/pull/1104))
- provider: trie items listing helpers (#1103) ([libp2p/go-libp2p-kad-dht#1103](https://github.com/libp2p/go-libp2p-kad-dht/pull/1103))
- provider: add ShortestCoveredPrefix helper (#1102) ([libp2p/go-libp2p-kad-dht#1102](https://github.com/libp2p/go-libp2p-kad-dht/pull/1102))
- provider: key helpers (#1101) ([libp2p/go-libp2p-kad-dht#1101](https://github.com/libp2p/go-libp2p-kad-dht/pull/1101))
- provider: Connectivity Checker (#1099) ([libp2p/go-libp2p-kad-dht#1099](https://github.com/libp2p/go-libp2p-kad-dht/pull/1099))
- provider: SweepingProvider interface (#1098) ([libp2p/go-libp2p-kad-dht#1098](https://github.com/libp2p/go-libp2p-kad-dht/pull/1098))
- provider: keystore (#1096) ([libp2p/go-libp2p-kad-dht#1096](https://github.com/libp2p/go-libp2p-kad-dht/pull/1096))
- provider initial commit ([libp2p/go-libp2p-kad-dht@70d21a8](https://github.com/libp2p/go-libp2p-kad-dht/commit/70d21a8))
- test GCP result order (#1097) ([libp2p/go-libp2p-kad-dht#1097](https://github.com/libp2p/go-libp2p-kad-dht/pull/1097))
- refactor: apply suggestions in records (#1113) ([libp2p/go-libp2p-kad-dht#1113](https://github.com/libp2p/go-libp2p-kad-dht/pull/1113))
- github.com/libp2p/go-libp2p-kbucket (v0.7.0 -> v0.8.0):
- chore: release v0.8.0 (#147) ([libp2p/go-libp2p-kbucket#147](https://github.com/libp2p/go-libp2p-kbucket/pull/147))
- feat: generic find PeerID with CPL (#145) ([libp2p/go-libp2p-kbucket#145](https://github.com/libp2p/go-libp2p-kbucket/pull/145))
- github.com/multiformats/go-varint (v0.0.7 -> v0.1.0):
- v0.1.0 bump (#29) ([multiformats/go-varint#29](https://github.com/multiformats/go-varint/pull/29))
- chore: optimise UvarintSize (#28) ([multiformats/go-varint#28](https://github.com/multiformats/go-varint/pull/28))
</details>
### 👨‍👩‍👧‍👦 Contributors
### 👨‍👩‍👧‍👦 Contributors
| Contributor | Commits | Lines ± | Files Changed |
|-------------|---------|---------|---------------|
| Guillaume Michel | 62 | +15401/-5657 | 209 |
| Marcin Rataj | 33 | +9540/-1734 | 215 |
| Andrew Gillis | 29 | +771/-1093 | 70 |
| Hlib Kanunnikov | 2 | +350/-0 | 5 |
| Rod Vagg | 3 | +260/-9 | 4 |
| Hector Sanjuan | 4 | +188/-33 | 11 |
| Jakub Sztandera | 1 | +67/-15 | 3 |
| Masih H. Derkani | 1 | +1/-2 | 2 |
| Dominic Della Valle | 1 | +2/-1 | 1 |

File diff suppressed because it is too large Load Diff

View File

@ -7,7 +7,7 @@ go 1.25
replace github.com/ipfs/kubo => ./../../..
require (
github.com/ipfs/boxo v0.34.0
github.com/ipfs/boxo v0.35.0
github.com/ipfs/kubo v0.0.0-00010101000000-000000000000
github.com/libp2p/go-libp2p v0.43.0
github.com/multiformats/go-multiaddr v0.16.1
@ -26,7 +26,7 @@ require (
github.com/caddyserver/certmagic v0.23.0 // indirect
github.com/caddyserver/zerossl v0.1.3 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/ceramicnetwork/go-dag-jose v0.1.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 // indirect
@ -50,21 +50,22 @@ require (
github.com/flynn/noise v1.1.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
github.com/gabriel-vasile/mimetype v1.4.10 // indirect
github.com/gammazero/chanqueue v1.1.1 // indirect
github.com/gammazero/deque v1.1.0 // indirect
github.com/getsentry/sentry-go v0.27.0 // indirect
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
github.com/go-jose/go-jose/v4 v4.1.1 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/glog v1.2.4 // indirect
github.com/golang/glog v1.2.5 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
github.com/guillaumemichel/reservedpool v0.3.0 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/huin/goupnp v1.3.0 // indirect
@ -72,29 +73,31 @@ require (
github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 // indirect
github.com/ipfs/bbloom v0.0.4 // indirect
github.com/ipfs/go-bitfield v1.1.0 // indirect
github.com/ipfs/go-block-format v0.2.2 // indirect
github.com/ipfs/go-block-format v0.2.3 // indirect
github.com/ipfs/go-cid v0.5.0 // indirect
github.com/ipfs/go-cidutil v0.1.0 // indirect
github.com/ipfs/go-datastore v0.8.3 // indirect
github.com/ipfs/go-datastore v0.9.0 // indirect
github.com/ipfs/go-ds-badger v0.3.4 // indirect
github.com/ipfs/go-ds-flatfs v0.5.5 // indirect
github.com/ipfs/go-ds-leveldb v0.5.2 // indirect
github.com/ipfs/go-ds-measure v0.2.2 // indirect
github.com/ipfs/go-ds-pebble v0.5.1 // indirect
github.com/ipfs/go-dsqueue v0.0.5 // indirect
github.com/ipfs/go-fs-lock v0.1.1 // indirect
github.com/ipfs/go-ipfs-delay v0.0.1 // indirect
github.com/ipfs/go-ipfs-cmds v0.15.0 // indirect
github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect
github.com/ipfs/go-ipfs-pq v0.0.3 // indirect
github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect
github.com/ipfs/go-ipld-cbor v0.2.1 // indirect
github.com/ipfs/go-ipld-format v0.6.2 // indirect
github.com/ipfs/go-ipld-format v0.6.3 // indirect
github.com/ipfs/go-ipld-git v0.1.1 // indirect
github.com/ipfs/go-ipld-legacy v0.2.2 // indirect
github.com/ipfs/go-log/v2 v2.8.1 // indirect
github.com/ipfs/go-metrics-interface v0.3.0 // indirect
github.com/ipfs/go-peertaskqueue v0.8.2 // indirect
github.com/ipfs/go-unixfsnode v1.10.1 // indirect
github.com/ipld/go-car/v2 v2.14.3 // indirect
github.com/ipfs/go-test v0.2.3 // indirect
github.com/ipfs/go-unixfsnode v1.10.2 // indirect
github.com/ipld/go-car/v2 v2.15.0 // indirect
github.com/ipld/go-codec-dagpb v1.7.0 // indirect
github.com/ipld/go-ipld-prime v0.21.0 // indirect
github.com/ipshipyard/p2p-forge v0.6.1 // indirect
@ -111,8 +114,8 @@ require (
github.com/libp2p/go-doh-resolver v0.5.0 // indirect
github.com/libp2p/go-flow-metrics v0.3.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
github.com/libp2p/go-libp2p-kad-dht v0.34.0 // indirect
github.com/libp2p/go-libp2p-kbucket v0.7.0 // indirect
github.com/libp2p/go-libp2p-kad-dht v0.35.0 // indirect
github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect
github.com/libp2p/go-libp2p-pubsub v0.14.2 // indirect
github.com/libp2p/go-libp2p-pubsub-router v0.6.0 // indirect
github.com/libp2p/go-libp2p-record v0.3.1 // indirect
@ -139,7 +142,7 @@ require (
github.com/multiformats/go-multicodec v0.9.2 // indirect
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-multistream v0.6.1 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/multiformats/go-varint v0.1.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/openzipkin/zipkin-go v0.4.3 // indirect
@ -166,14 +169,15 @@ require (
github.com/pion/webrtc/v4 v4.1.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/polydawn/refmt v0.89.0 // indirect
github.com/prometheus/client_golang v1.23.0 // indirect
github.com/probe-lab/go-libdht v0.2.1 // indirect
github.com/prometheus/client_golang v1.23.2 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.65.0 // indirect
github.com/prometheus/common v0.66.1 // indirect
github.com/prometheus/procfs v0.17.0 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.54.0 // indirect
github.com/quic-go/webtransport-go v0.9.0 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb // indirect
@ -186,40 +190,42 @@ require (
github.com/wlynxg/anet v0.0.5 // indirect
github.com/zeebo/blake3 v0.2.4 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect
go.opentelemetry.io/otel v1.37.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 // indirect
go.opentelemetry.io/otel/exporters/zipkin v1.37.0 // indirect
go.opentelemetry.io/otel/metric v1.37.0 // indirect
go.opentelemetry.io/otel/sdk v1.37.0 // indirect
go.opentelemetry.io/otel/trace v1.37.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect
go.opentelemetry.io/otel v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/zipkin v1.38.0 // indirect
go.opentelemetry.io/otel/metric v1.38.0 // indirect
go.opentelemetry.io/otel/sdk v1.38.0 // indirect
go.opentelemetry.io/otel/trace v1.38.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
go.uber.org/dig v1.19.0 // indirect
go.uber.org/fx v1.24.0 // indirect
go.uber.org/mock v0.5.2 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
go.uber.org/zap/exp v0.3.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
golang.org/x/crypto v0.41.0 // indirect
golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 // indirect
golang.org/x/mod v0.27.0 // indirect
golang.org/x/net v0.43.0 // indirect
golang.org/x/sync v0.16.0 // indirect
golang.org/x/sys v0.35.0 // indirect
golang.org/x/text v0.28.0 // indirect
golang.org/x/crypto v0.42.0 // indirect
golang.org/x/exp v0.0.0-20250911091902-df9299821621 // indirect
golang.org/x/mod v0.28.0 // indirect
golang.org/x/net v0.44.0 // indirect
golang.org/x/sync v0.17.0 // indirect
golang.org/x/sys v0.36.0 // indirect
golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053 // indirect
golang.org/x/text v0.29.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/tools v0.36.0 // indirect
golang.org/x/tools v0.37.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
gonum.org/v1/gonum v0.16.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
google.golang.org/grpc v1.73.0 // indirect
google.golang.org/protobuf v1.36.7 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect
google.golang.org/grpc v1.75.0 // indirect
google.golang.org/protobuf v1.36.9 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.4.1 // indirect
)

View File

@ -67,8 +67,8 @@ github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+Y
github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/ceramicnetwork/go-dag-jose v0.1.1 h1:7pObs22egc14vSS3AfCFfS1VmaL4lQUsAK7OGC3PlKk=
github.com/ceramicnetwork/go-dag-jose v0.1.1/go.mod h1:8ptnYwY2Z2y/s5oJnNBn/UCxLg6CpramNJ2ZXF/5aNY=
@ -159,8 +159,8 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0=
github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/gammazero/chanqueue v1.1.1 h1:n9Y+zbBxw2f7uUE9wpgs0rOSkP/I/yhDLiNuhyVjojQ=
github.com/gammazero/chanqueue v1.1.1/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc=
github.com/gammazero/deque v1.1.0 h1:OyiyReBbnEG2PP0Bnv1AASLIYvyKqIFN5xfl1t8oGLo=
@ -177,8 +177,8 @@ github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxI
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI=
github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
@ -193,8 +193,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc=
github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I=
github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -264,8 +264,10 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
github.com/guillaumemichel/reservedpool v0.3.0 h1:eqqO/QvTllLBrit7LVtVJBqw4cD0WdV9ajUe7WNTajw=
github.com/guillaumemichel/reservedpool v0.3.0/go.mod h1:sXSDIaef81TFdAJglsCFCMfgF5E5Z5xK1tFhjDhvbUc=
github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU=
github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@ -287,13 +289,13 @@ github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 h1:OqNqsGZPX8zh3eFMO8Lf8EHRRnSGBMqcd
github.com/ipfs-shipyard/nopfs/ipfs v0.25.0/go.mod h1:BxhUdtBgOXg1B+gAPEplkg/GpyTZY+kCMSfsJvvydqU=
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
github.com/ipfs/boxo v0.34.0 h1:pMP9bAsTs4xVh8R0ZmxIWviV7kjDa60U24QrlGgHb1g=
github.com/ipfs/boxo v0.34.0/go.mod h1:kzdH/ewDybtO3+M8MCVkpwnIIc/d2VISX95DFrY4vQA=
github.com/ipfs/boxo v0.35.0 h1:3Mku5arSbAZz0dvb4goXRsQuZkFkPrGr5yYdu0YM1pY=
github.com/ipfs/boxo v0.35.0/go.mod h1:uhaF0DGnbgEiXDTmD249jCGbxVkMm6+Ew85q6Uub7lo=
github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA=
github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU=
github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk=
github.com/ipfs/go-block-format v0.2.2 h1:uecCTgRwDIXyZPgYspaLXoMiMmxQpSx2aq34eNc4YvQ=
github.com/ipfs/go-block-format v0.2.2/go.mod h1:vmuefuWU6b+9kIU0vZJgpiJt1yicQz9baHXE8qR+KB8=
github.com/ipfs/go-block-format v0.2.3 h1:mpCuDaNXJ4wrBJLrtEaGFGXkferrw5eqVvzaHhtFKQk=
github.com/ipfs/go-block-format v0.2.3/go.mod h1:WJaQmPAKhD3LspLixqlqNFxiZ3BZ3xgqxxoSR/76pnA=
github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M=
github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
@ -303,8 +305,8 @@ github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q
github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA=
github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw=
github.com/ipfs/go-datastore v0.8.3 h1:z391GsQyGKUIUof2tPoaZVeDknbt7fNHs6Gqjcw5Jo4=
github.com/ipfs/go-datastore v0.8.3/go.mod h1:raxQ/CreIy9L6MxT71ItfMX12/ASN6EhXJoUFjICQ2M=
github.com/ipfs/go-datastore v0.9.0 h1:WocriPOayqalEsueHv6SdD4nPVl4rYMfYGLD4bqCZ+w=
github.com/ipfs/go-datastore v0.9.0/go.mod h1:uT77w/XEGrvJWwHgdrMr8bqCN6ZTW9gzmi+3uK+ouHg=
github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk=
@ -319,10 +321,14 @@ github.com/ipfs/go-ds-measure v0.2.2 h1:4kwvBGbbSXNYe4ANlg7qTIYoZU6mNlqzQHdVqICk
github.com/ipfs/go-ds-measure v0.2.2/go.mod h1:b/87ak0jMgH9Ylt7oH0+XGy4P8jHx9KG09Qz+pOeTIs=
github.com/ipfs/go-ds-pebble v0.5.1 h1:p0FAE0zw9J/3T1VkGB9s98jWmfKmw2t0iEwfMUv8iSQ=
github.com/ipfs/go-ds-pebble v0.5.1/go.mod h1:LsmQx4w+0o9znl4hTxYo1Y2lnBTzNCwc4kNpD3wWXM0=
github.com/ipfs/go-dsqueue v0.0.5 h1:TUOk15TlCJ/NKV8Yk2W5wgkEjDa44Nem7a7FGIjsMNU=
github.com/ipfs/go-dsqueue v0.0.5/go.mod h1:i/jAlpZjBbQJLioN+XKbFgnd+u9eAhGZs9IrqIzTd9g=
github.com/ipfs/go-fs-lock v0.1.1 h1:TecsP/Uc7WqYYatasreZQiP9EGRy4ZnKoG4yXxR33nw=
github.com/ipfs/go-fs-lock v0.1.1/go.mod h1:2goSXMCw7QfscHmSe09oXiR34DQeUdm+ei+dhonqly0=
github.com/ipfs/go-ipfs-blockstore v1.3.1 h1:cEI9ci7V0sRNivqaOr0elDsamxXFxJMMMy7PTTDQNsQ=
github.com/ipfs/go-ipfs-blockstore v1.3.1/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE=
github.com/ipfs/go-ipfs-cmds v0.15.0 h1:nQDgKadrzyiFyYoZMARMIoVoSwe3gGTAfGvrWLeAQbQ=
github.com/ipfs/go-ipfs-cmds v0.15.0/go.mod h1:VABf/mv/wqvYX6hLG6Z+40eNAEw3FQO0bSm370Or3Wk=
github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ=
github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
@ -336,8 +342,8 @@ github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyB
github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ=
github.com/ipfs/go-ipld-cbor v0.2.1 h1:H05yEJbK/hxg0uf2AJhyerBDbjOuHX4yi+1U/ogRa7E=
github.com/ipfs/go-ipld-cbor v0.2.1/go.mod h1:x9Zbeq8CoE5R2WicYgBMcr/9mnkQ0lHddYWJP2sMV3A=
github.com/ipfs/go-ipld-format v0.6.2 h1:bPZQ+A05ol0b3lsJSl0bLvwbuQ+HQbSsdGTy4xtYUkU=
github.com/ipfs/go-ipld-format v0.6.2/go.mod h1:nni2xFdHKx5lxvXJ6brt/pndtGxKAE+FPR1rg4jTkyk=
github.com/ipfs/go-ipld-format v0.6.3 h1:9/lurLDTotJpZSuL++gh3sTdmcFhVkCwsgx2+rAh4j8=
github.com/ipfs/go-ipld-format v0.6.3/go.mod h1:74ilVN12NXVMIV+SrBAyC05UJRk0jVvGqdmrcYZvCBk=
github.com/ipfs/go-ipld-git v0.1.1 h1:TWGnZjS0htmEmlMFEkA3ogrNCqWjIxwr16x1OsdhG+Y=
github.com/ipfs/go-ipld-git v0.1.1/go.mod h1:+VyMqF5lMcJh4rwEppV0e6g4nCCHXThLYYDpKUkJubI=
github.com/ipfs/go-ipld-legacy v0.2.2 h1:DThbqCPVLpWBcGtU23KDLiY2YRZZnTkXQyfz8aOfBkQ=
@ -353,10 +359,10 @@ github.com/ipfs/go-peertaskqueue v0.8.2 h1:PaHFRaVFdxQk1Qo3OKiHPYjmmusQy7gKQUaL8
github.com/ipfs/go-peertaskqueue v0.8.2/go.mod h1:L6QPvou0346c2qPJNiJa6BvOibxDfaiPlqHInmzg0FA=
github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc=
github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o=
github.com/ipfs/go-unixfsnode v1.10.1 h1:hGKhzuH6NSzZ4y621wGuDspkjXRNG3B+HqhlyTjSwSM=
github.com/ipfs/go-unixfsnode v1.10.1/go.mod h1:eguv/otvacjmfSbYvmamc9ssNAzLvRk0+YN30EYeOOY=
github.com/ipld/go-car/v2 v2.14.3 h1:1Mhl82/ny8MVP+w1M4LXbj4j99oK3gnuZG2GmG1IhC8=
github.com/ipld/go-car/v2 v2.14.3/go.mod h1:/vpSvPngOX8UnvmdFJ3o/mDgXa9LuyXsn7wxOzHDYQE=
github.com/ipfs/go-unixfsnode v1.10.2 h1:TREegX1J4X+k1w4AhoDuxxFvVcS9SegMRvrmxF6Tca8=
github.com/ipfs/go-unixfsnode v1.10.2/go.mod h1:ImDPTSiKZ+2h4UVdkSDITJHk87bUAp7kX/lgifjRicg=
github.com/ipld/go-car/v2 v2.15.0 h1:RxtZcGXFx72zFESl+UUsCNQV2YMcy3gEMYx9M3uio24=
github.com/ipld/go-car/v2 v2.15.0/go.mod h1:ovlq/n3xlVJDmoiN3Kd/Z7kIzQbdTIFSwltfOP+qIgk=
github.com/ipld/go-codec-dagpb v1.7.0 h1:hpuvQjCSVSLnTnHXn+QAMR0mLmb1gA6wl10LExo2Ts0=
github.com/ipld/go-codec-dagpb v1.7.0/go.mod h1:rD3Zg+zub9ZnxcLwfol/OTQRVjaLzXypgy4UqHQvilM=
github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8=
@ -426,11 +432,11 @@ github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl9
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g=
github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw=
github.com/libp2p/go-libp2p-kad-dht v0.34.0 h1:yvJ/Vrt36GVjsqPxiGcuuwOloKuZLV9Aa7awIKyNXy0=
github.com/libp2p/go-libp2p-kad-dht v0.34.0/go.mod h1:JNbkES4W5tajS6uYivw6MPs0842cPHAwhgaPw8sQG4o=
github.com/libp2p/go-libp2p-kad-dht v0.35.0 h1:pWRC4FKR9ptQjA9DuMSrAn2D3vABE8r58iAeoLoK1Ig=
github.com/libp2p/go-libp2p-kad-dht v0.35.0/go.mod h1:s70f017NjhsBx+SVl0/w+x//uyglrFpKLfvuQJj4QAU=
github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio=
github.com/libp2p/go-libp2p-kbucket v0.7.0 h1:vYDvRjkyJPeWunQXqcW2Z6E93Ywx7fX0jgzb/dGOKCs=
github.com/libp2p/go-libp2p-kbucket v0.7.0/go.mod h1:blOINGIj1yiPYlVEX0Rj9QwEkmVnz3EP8LK1dRKBC6g=
github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s=
github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4=
github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs=
github.com/libp2p/go-libp2p-pubsub v0.14.2 h1:nT5lFHPQOFJcp9CW8hpKtvbpQNdl2udJuzLQWbgRum8=
github.com/libp2p/go-libp2p-pubsub v0.14.2/go.mod h1:MKPU5vMI8RRFyTP0HfdsF9cLmL1nHAeJm44AxJGJx44=
@ -534,8 +540,8 @@ github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuV
github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
github.com/multiformats/go-varint v0.1.0 h1:i2wqFp4sdl3IcIxfAonHQV9qU5OsZ4Ts9IOoETFs5dI=
github.com/multiformats/go-varint v0.1.0/go.mod h1:5KVAVXegtfmNQQm/lCY+ATvDzvJJhSkUlGQV9wgObdI=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
@ -622,16 +628,18 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4=
github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw=
github.com/probe-lab/go-libdht v0.2.1 h1:oBCsKBvS/OVirTO5+BT6/AOocWjdqwpfSfkTfBjUPJE=
github.com/probe-lab/go-libdht v0.2.1/go.mod h1:q+WlGiqs/UIRfdhw9Gmc+fPoAYlOim7VvXTjOI6KJmQ=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
@ -643,8 +651,8 @@ github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssk
github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
@ -712,8 +720,8 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
@ -767,32 +775,32 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/XLML9ElpiHVDNwvqI0hIFlzV8dgIr35kV1kRU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY=
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 h1:bDMKF3RUSxshZ5OjOTi8rsHGaPKsAt76FaqgvIUySLc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0/go.mod h1:dDT67G/IkA46Mr2l9Uj7HsQVwsjASyV9SjGofsiUZDA=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 h1:SNhVp/9q4Go/XHBkQ1/d5u9P/U+L1yaGPoi0x+mStaI=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0/go.mod h1:tx8OOlGH6R4kLV67YaYO44GFXloEjGPZuMjEkaaqIp4=
go.opentelemetry.io/otel/exporters/zipkin v1.37.0 h1:Z2apuaRnHEjzDAkpbWNPiksz1R0/FCIrJSjiMA43zwI=
go.opentelemetry.io/otel/exporters/zipkin v1.37.0/go.mod h1:ofGu/7fG+bpmjZoiPUUmYDJ4vXWxMT57HmGoegx49uw=
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os=
go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE=
go.opentelemetry.io/otel/exporters/zipkin v1.38.0 h1:0rJ2TmzpHDG+Ib9gPmu3J3cE0zXirumQcKS4wCoZUa0=
go.opentelemetry.io/otel/exporters/zipkin v1.38.0/go.mod h1:Su/nq/K5zRjDKKC3Il0xbViE3juWgG3JDoqLumFx5G0=
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
@ -809,6 +817,8 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U=
go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
@ -832,8 +842,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -842,8 +852,8 @@ golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 h1:SbTAbRFnd5kjQXbczszQ0hdk3ctwYf3qBNH9jIsGclE=
golang.org/x/exp v0.0.0-20250813145105-42675adae3e6/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4=
golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU=
golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -866,8 +876,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -903,8 +913,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -923,8 +933,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -971,8 +981,10 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053 h1:dHQOQddU4YHS5gY33/6klKjq7Gp3WwMyOXGNp5nzRj8=
golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053/go.mod h1:+nZKN+XVh4LCiA9DV3ywrzN4gumyCnKjau3NGb9SGoE=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -992,8 +1004,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1034,8 +1046,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1081,10 +1093,10 @@ google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvx
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY=
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
@ -1097,8 +1109,8 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4=
google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -1110,8 +1122,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -539,7 +539,7 @@ ipfs config --json Swarm.RelayClient.Enabled true
`Experimental.StrategicProviding` was removed in Kubo v0.35.
Replaced by [`Provide.Enabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providerenabled) and [`Reprovider.Strategy`](https://github.com/ipfs/kubo/blob/master/docs/config.md#reproviderstrategy).
Replaced by [`Provide.Enabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#provideenabled) and [`Provide.Strategy`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy).
## GraphSync

BIN
docs/logo/kubo-logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

34
docs/logo/kubo-logo.svg Normal file
View File

@ -0,0 +1,34 @@
<svg width="382" height="440" viewBox="0 0 382 440" fill="none" xmlns="http://www.w3.org/2000/svg">
<mask id="mask0_34_687" style="mask-type:alpha" maskUnits="userSpaceOnUse" x="0" y="0" width="382" height="440">
<path d="M190.526 0L381.051 110V330L190.526 440L0 330V110L190.526 0Z" fill="#D9D9D9"/>
</mask>
<g mask="url(#mask0_34_687)">
<path d="M190.526 0L381.051 110V330L190.526 440L0 330V110L190.526 0Z" fill="#194649"/>
<path d="M190.526 0L381.051 110V330L190.526 440L0 330V110L190.526 0Z" fill="#194649"/>
<path d="M71 99L75.3215 110.679L87 115L75.3215 119.321L71 131L66.6785 119.321L55 115L66.6785 110.679L71 99Z" fill="white"/>
<path d="M110 83L113.511 92.4888L123 96L113.511 99.5112L110 109L106.489 99.5112L97 96L106.489 92.4888L110 83Z" fill="white"/>
<path d="M343 169L347.321 180.679L359 185L347.321 189.321L343 201L338.679 189.321L327 185L338.679 180.679L343 169Z" fill="white"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M381.051 331.294V335L190.526 445L0 335V325.09C14.8033 243.957 94.2454 182 190.026 182C288.233 182 369.263 247.136 381.051 331.294Z" fill="#F2F2F2"/>
<path d="M351.82 342.01C357.225 356.34 346.548 373.637 327.971 380.644C309.395 387.651 289.954 381.714 284.549 367.383C279.143 353.053 289.821 335.756 308.397 328.749C326.974 321.742 346.415 327.679 351.82 342.01Z" fill="#DBDBDB"/>
<path d="M307.394 418.64L193.525 352.82L307.394 287L421.92 352.82L307.394 418.64Z" fill="#194649" fill-opacity="0.4"/>
<path d="M229.646 407.625C234.039 418.858 219.915 434.88 198.099 443.411C176.284 451.943 155.038 449.752 150.645 438.52C146.252 427.287 160.376 411.265 182.192 402.734C204.007 394.202 225.253 396.392 229.646 407.625Z" fill="#DBDBDB"/>
<path d="M62.0046 320.754C67.9944 336.635 61.7391 353.699 48.0331 358.869C34.3271 364.038 18.3606 355.355 12.3708 339.475C6.38105 323.594 12.6363 306.53 26.3423 301.36C40.0483 296.191 56.0148 304.874 62.0046 320.754Z" fill="#DBDBDB"/>
<path d="M193.409 220.962L79.541 155.143V220.962L193.409 287.441L307.936 220.962V155.143L193.409 220.962Z" fill="#88CDD2"/>
<path d="M193.409 220.962L79.541 155.142L193.409 89.3223L307.936 155.142L193.409 220.962Z" fill="#9BDBDF"/>
<path d="M145.705 147.284C143.791 146.18 143.791 144.389 145.704 143.284L172.971 127.529C174.884 126.424 177.986 126.423 179.9 127.527L241.385 162.999C243.298 164.103 243.299 165.894 241.386 166.999L214.118 182.754C212.206 183.859 209.104 183.86 207.19 182.756L145.705 147.284Z" fill="#62B3BA"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M79.541 220.961L193.409 286.781L307.936 220.962V286.781L193.409 353.259L130.83 316.725C130.831 316.623 130.832 316.52 130.832 316.417C130.832 301.876 120.626 284.192 108.035 276.918C95.4453 269.643 85.239 275.534 85.239 290.074C85.239 290.086 85.239 290.097 85.239 290.108L79.541 286.781V220.961Z" fill="#62B3BA"/>
<path d="M90.9385 178.892C90.9385 176.682 92.4891 175.788 94.4019 176.893L121.67 192.647C123.583 193.753 125.133 196.439 125.133 198.649V230.14C125.133 232.35 123.583 233.245 121.67 232.139L94.4019 216.385C92.4891 215.279 90.9385 212.593 90.9385 210.384V178.892Z" fill="#30868D"/>
<path d="M147.931 211.819C147.931 209.61 149.481 208.715 151.394 209.82L178.662 225.575C180.575 226.68 182.125 229.367 182.125 231.576V328.888C182.125 331.097 180.575 331.992 178.662 330.887L151.394 315.132C149.481 314.027 147.931 311.34 147.931 309.131V211.819Z" fill="#30868D"/>
<path d="M157.211 267.414C157.211 268.908 156.162 269.513 154.868 268.766C153.574 268.018 152.525 266.201 152.525 264.706C152.525 263.212 153.574 262.606 154.868 263.354C156.162 264.101 157.211 265.919 157.211 267.414Z" fill="#194649"/>
<path d="M125.133 313.124C125.133 324.029 117.478 328.447 108.036 322.991C98.5932 317.536 90.9385 304.272 90.9385 293.367C90.9385 282.462 98.5932 278.044 108.036 283.5C117.478 288.955 125.133 302.219 125.133 313.124Z" fill="#194649"/>
<path d="M112.235 306.216C112.235 309.312 110.062 310.566 107.381 309.017C104.7 307.468 102.526 303.703 102.526 300.606C102.526 297.51 104.7 296.256 107.381 297.805C110.062 299.354 112.235 303.12 112.235 306.216Z" fill="#F2F2F2"/>
<path d="M193.409 286.782V353.26L307.936 286.782V220.962L193.409 286.782Z" fill="#4E9EA5"/>
<path d="M193.409 220.961V286.781L307.936 220.961V155.142L193.409 220.961Z" fill="#5BB4BE"/>
<path d="M216.864 224.99C216.864 222.781 218.415 220.094 220.328 218.989L281.79 183.478C283.703 182.372 285.254 183.267 285.254 185.476V216.968C285.254 219.177 283.703 221.864 281.79 222.969L220.328 258.481C218.415 259.586 216.864 258.691 216.864 256.482V224.99Z" fill="#328B93"/>
<path d="M256.758 295.938C256.758 299.978 253.922 304.891 250.424 306.912C246.926 308.933 244.091 307.297 244.091 303.257C244.091 299.217 246.926 294.304 250.424 292.283C253.922 290.262 256.758 291.899 256.758 295.938Z" fill="#194649"/>
<path d="M253.305 308.232L247.427 304.964C245.224 303.74 246.736 298.541 249.822 296.726L251.222 295.903C252.135 295.366 253.014 295.265 253.668 295.622L263.655 301.074C264.325 301.44 264.703 302.255 264.703 303.335V334.147C264.703 335.251 263.928 336.595 262.972 337.147L256.769 340.731C254.856 341.836 253.305 340.941 253.305 338.732V308.232Z" fill="#194649"/>
<path d="M80.0528 212.742L29.5176 183.781L80.0528 154.82L130.222 184.439L80.0528 212.742Z" fill="#4E9EA5"/>
<path d="M142.165 377L103.525 355L142.165 333L180.525 355.5L142.165 377Z" fill="#62B3BA"/>
</g>
<path d="M170.84 143.185L166.768 143.406L162.836 145.674L159.757 143.895L173.902 135.734L176.981 137.513L170.253 141.395L170.375 141.465L175.116 141.045L182.289 140.58L185.773 142.593L175.197 143.198L172.053 150.999L168.427 148.904L170.84 143.185ZM190.925 145.569L182.251 150.573C181.251 151.15 180.697 151.696 180.589 152.21C180.494 152.717 180.906 153.236 181.825 153.767C182.743 154.297 183.641 154.535 184.519 154.481C185.411 154.419 186.356 154.1 187.356 153.523L196.03 148.519L199.028 150.251L190.679 155.068C189.638 155.668 188.632 156.132 187.659 156.459C186.714 156.786 185.768 156.965 184.822 156.996C183.89 157.019 182.952 156.898 182.006 156.633C181.074 156.359 180.102 155.93 179.089 155.345C178.076 154.76 177.333 154.198 176.861 153.66C176.401 153.114 176.192 152.572 176.233 152.034C176.287 151.488 176.591 150.938 177.145 150.384C177.726 149.831 178.537 149.254 179.577 148.654L187.927 143.837L190.925 145.569ZM202.074 152.011L208.941 155.979C210.116 156.657 210.697 157.375 210.683 158.132C210.683 158.896 210.135 159.594 209.041 160.225C208.501 160.537 207.967 160.759 207.44 160.891C206.927 161.031 206.427 161.102 205.941 161.101C205.468 161.109 205.009 161.054 204.563 160.937C204.144 160.82 203.746 160.66 203.368 160.457L203.246 160.527C203.584 160.722 203.874 160.96 204.117 161.241C204.374 161.53 204.522 161.842 204.563 162.177C204.616 162.505 204.535 162.848 204.319 163.207C204.116 163.573 203.711 163.932 203.103 164.282C202.549 164.602 201.928 164.851 201.239 165.03C200.577 165.21 199.901 165.311 199.212 165.334C198.524 165.357 197.848 165.302 197.186 165.17C196.538 165.029 195.95 164.806 195.424 164.502L187.928 160.171L202.074 152.011ZM193.601 160.454L197.147 162.502C197.552 162.736 197.977 162.857 198.423 162.865C198.896 162.873 199.335 162.76 199.74 162.527L200.429 162.129C200.834 161.895 201.024 161.646 200.997 161.381C200.997 161.115 200.794 160.866 200.389 160.632L196.844 158.583L193.601 160.454ZM199.357 157.134L202.416 158.901C202.821 159.135 203.253 159.252 203.712 159.252C204.172 159.252 204.604 159.136 205.009 158.902L205.617 158.551C206.023 158.317 206.225 158.068 206.225 157.803C206.226 157.537 206.023 157.288 205.618 157.054L202.559 155.286L199.357 157.134ZM206.869 171.396C205.937 170.858 205.242 170.276 204.783 169.652C204.324 169.028 204.128 168.385 204.196 167.722C204.277 167.051 204.622 166.369 205.23 165.675C205.852 164.973 206.764 164.276 207.966 163.582C209.168 162.888 210.371 162.366 211.573 162.016C212.789 161.657 213.971 161.459 215.119 161.42C216.281 161.374 217.402 161.483 218.482 161.749C219.563 162.014 220.569 162.416 221.501 162.955C222.433 163.493 223.128 164.074 223.587 164.698C224.047 165.322 224.235 165.97 224.154 166.64C224.086 167.303 223.742 167.986 223.12 168.687C222.512 169.381 221.607 170.075 220.404 170.769C219.202 171.462 217.993 171.988 216.777 172.347C215.575 172.697 214.393 172.896 213.231 172.942C212.083 172.981 210.968 172.867 209.888 172.602C208.807 172.336 207.801 171.934 206.869 171.396ZM209.605 169.818C210.537 170.356 211.564 170.598 212.685 170.544C213.819 170.497 214.954 170.147 216.089 169.492L218.197 168.276C219.332 167.621 219.94 166.967 220.021 166.311C220.116 165.664 219.697 165.071 218.765 164.533C217.833 163.995 216.8 163.749 215.665 163.795C214.544 163.849 213.416 164.204 212.281 164.858L210.174 166.074C209.039 166.729 208.424 167.38 208.329 168.027C208.248 168.682 208.673 169.279 209.605 169.818Z" fill="#F2F2F2"/>
</svg>

After

Width:  |  Height:  |  Size: 8.6 KiB

118
docs/metrics.md Normal file
View File

@ -0,0 +1,118 @@
## Kubo metrics
By default, a Prometheus endpoint is exposed by Kubo at `http://127.0.0.1:5001/debug/metrics/prometheus`.
It includes default [Prometheus Go client metrics](https://prometheus.io/docs/guides/go-application/) + Kubo-specific metrics listed below.
### Table of Contents
- [DHT RPC](#dht-rpc)
- [Inbound RPC metrics](#inbound-rpc-metrics)
- [Outbound RPC metrics](#outbound-rpc-metrics)
- [Provide](#provide)
- [Legacy Provider](#legacy-provider)
- [DHT Provider](#dht-provider)
- [Gateway (`boxo/gateway`)](#gateway-boxogateway)
- [HTTP metrics](#http-metrics)
- [Blockstore cache metrics](#blockstore-cache-metrics)
- [Backend metrics](#backend-metrics)
- [Generic HTTP Servers](#generic-http-servers)
- [Core HTTP metrics](#core-http-metrics-ipfs_http_)
- [HTTP Server metrics](#http-server-metrics-http_server_)
- [OpenTelemetry Metadata](#opentelemetry-metadata)
> [!WARNING]
> This documentation is incomplete. For an up-to-date list of metrics available at daemon startup, see [test/sharness/t0119-prometheus-data/prometheus_metrics_added_by_measure_profile](https://github.com/ipfs/kubo/blob/master/test/sharness/t0119-prometheus-data/prometheus_metrics_added_by_measure_profile).
>
> Additional metrics may appear during runtime as some components (like boxo/gateway) register metrics only after their first event occurs (e.g., HTTP request/response).
## DHT RPC
Metrics from `go-libp2p-kad-dht` for DHT RPC operations:
### Inbound RPC metrics
- `rpc_inbound_messages_total` - Counter: total messages received per RPC
- `rpc_inbound_message_errors_total` - Counter: total errors for received messages
- `rpc_inbound_bytes_[bucket|sum|count]` - Histogram: distribution of received bytes per RPC
- `rpc_inbound_request_latency_[bucket|sum|count]` - Histogram: latency distribution for inbound RPCs
### Outbound RPC metrics
- `rpc_outbound_messages_total` - Counter: total messages sent per RPC
- `rpc_outbound_message_errors_total` - Counter: total errors for sent messages
- `rpc_outbound_requests_total` - Counter: total requests sent
- `rpc_outbound_request_errors_total` - Counter: total errors for sent requests
- `rpc_outbound_bytes_[bucket|sum|count]` - Histogram: distribution of sent bytes per RPC
- `rpc_outbound_request_latency_[bucket|sum|count]` - Histogram: latency distribution for outbound RPCs
## Provide
### Legacy Provider
Metrics for the legacy provider system when `Provide.DHT.SweepEnabled=false`:
- `provider_reprovider_provide_count` - Counter: total successful provide operations since node startup
- `provider_reprovider_reprovide_count` - Counter: total reprovide sweep operations since node startup
### DHT Provider
Metrics for the DHT provider system when `Provide.DHT.SweepEnabled=true`:
- `total_provide_count_total` - Counter: total successful provide operations since node startup (includes both one-time provides and periodic provides done on `Provide.DHT.Interval`)
> [!NOTE]
> These metrics are exposed by [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht/). You can enable debug logging for DHT provider activity with `GOLOG_LOG_LEVEL=dht/provider=debug`.
## Gateway (`boxo/gateway`)
> [!TIP]
> These metrics are limited to [IPFS Gateway](https://specs.ipfs.tech/http-gateways/) endpoints. For general HTTP metrics across all endpoints, consider using a reverse proxy.
Gateway metrics appear after the first HTTP request is processed:
### HTTP metrics
- `ipfs_http_gw_responses_total{code}` - Counter: total HTTP responses by status code
- `ipfs_http_gw_retrieval_timeouts_total{code,truncated}` - Counter: requests that timed out during content retrieval
- `ipfs_http_gw_concurrent_requests` - Gauge: number of requests currently being processed
### Blockstore cache metrics
- `ipfs_http_blockstore_cache_hit` - Counter: global block cache hits
- `ipfs_http_blockstore_cache_requests` - Counter: global block cache requests
### Backend metrics
- `ipfs_gw_backend_api_call_duration_seconds_[bucket|sum|count]{backend_method}` - Histogram: time spent in IPFSBackend API calls
## Generic HTTP Servers
> [!TIP]
> The metrics below are not very useful and exist mostly for historical reasons. If you need non-gateway HTTP metrics, it's better to put a reverse proxy in front of Kubo and use its metrics.
### Core HTTP metrics (`ipfs_http_*`)
Prometheus metrics for the HTTP API exposed at port 5001:
- `ipfs_http_requests_total{method,code,handler}` - Counter: total HTTP requests (Legacy - new metrics are provided by boxo/gateway for gateway traffic)
- `ipfs_http_request_duration_seconds[_sum|_count]{handler}` - Summary: request processing duration
- `ipfs_http_request_size_bytes[_sum|_count]{handler}` - Summary: request body sizes
- `ipfs_http_response_size_bytes[_sum|_count]{handler}` - Summary: response body sizes
### HTTP Server metrics (`http_server_*`)
Additional HTTP instrumentation for all handlers (Gateway, API commands, etc.):
- `http_server_request_body_size_bytes_[bucket|count|sum]` - Histogram: distribution of request body sizes
- `http_server_request_duration_seconds_[bucket|count|sum]` - Histogram: distribution of request processing times
- `http_server_response_body_size_bytes_[bucket|count|sum]` - Histogram: distribution of response body sizes
These metrics are automatically added to Gateway handlers, Hostname Gateway, Libp2p Gateway, and API command handlers.
## OpenTelemetry Metadata
Kubo uses Prometheus for metrics collection for historical reasons, but OpenTelemetry metrics are automatically exposed through the same Prometheus endpoint. These metadata metrics provide context about the instrumentation:
- `otel_scope_info` - Information about instrumentation libraries producing metrics
- `target_info` - Service metadata including version and instance information

View File

@ -1,5 +1,4 @@
//go:build !nofuse && !openbsd && !netbsd && !plan9
// +build !nofuse,!openbsd,!netbsd,!plan9
package ipns

View File

@ -1,5 +1,4 @@
//go:build !nofuse && !openbsd && !netbsd && !plan9
// +build !nofuse,!openbsd,!netbsd,!plan9
// package fuse/ipns implements a fuse filesystem that interfaces
// with ipns, the naming system for ipfs.

View File

@ -1,5 +1,4 @@
//go:build !nofuse && !openbsd && !netbsd && !plan9
// +build !nofuse,!openbsd,!netbsd,!plan9
package ipns

View File

@ -1,6 +1,4 @@
//go:build (linux || darwin || freebsd || netbsd || openbsd) && !nofuse
// +build linux darwin freebsd netbsd openbsd
// +build !nofuse
package ipns

View File

@ -1,5 +1,4 @@
//go:build !nofuse && !openbsd && !netbsd && !plan9
// +build !nofuse,!openbsd,!netbsd,!plan9
package mfs

View File

@ -1,6 +1,4 @@
//go:build (linux || darwin || freebsd || netbsd || openbsd) && !nofuse
// +build linux darwin freebsd netbsd openbsd
// +build !nofuse
package mfs

View File

@ -1,6 +1,4 @@
//go:build (linux || darwin || freebsd || netbsd || openbsd) && !nofuse
// +build linux darwin freebsd netbsd openbsd
// +build !nofuse
package mfs

View File

@ -1,5 +1,4 @@
//go:build !nofuse && !windows && !openbsd && !netbsd && !plan9
// +build !nofuse,!windows,!openbsd,!netbsd,!plan9
package mount

View File

@ -1,5 +1,4 @@
//go:build !nofuse
// +build !nofuse
//go:build !nofuse && darwin
package node

View File

@ -1,5 +1,4 @@
//go:build !windows && nofuse
// +build !windows,nofuse
package node

View File

@ -1,5 +1,4 @@
//go:build (!nofuse && openbsd) || (!nofuse && netbsd) || (!nofuse && plan9)
// +build !nofuse,openbsd !nofuse,netbsd !nofuse,plan9
package node

View File

@ -1,5 +1,4 @@
//go:build !openbsd && !nofuse && !netbsd && !plan9
// +build !openbsd,!nofuse,!netbsd,!plan9
package node

View File

@ -1,5 +1,4 @@
//go:build !windows && !openbsd && !netbsd && !plan9 && !nofuse
// +build !windows,!openbsd,!netbsd,!plan9,!nofuse
package node

View File

@ -1,5 +1,4 @@
//go:build !nofuse && !openbsd && !netbsd && !plan9
// +build !nofuse,!openbsd,!netbsd,!plan9
package readonly

View File

@ -1,6 +1,4 @@
//go:build (linux || darwin || freebsd) && !nofuse
// +build linux darwin freebsd
// +build !nofuse
package readonly

View File

@ -1,6 +1,4 @@
//go:build (linux || darwin || freebsd) && !nofuse
// +build linux darwin freebsd
// +build !nofuse
package readonly

View File

@ -165,7 +165,7 @@ func Descendants(ctx context.Context, getLinks dag.GetLinks, set *cid.Set, roots
}
verboseCidError := func(err error) error {
if strings.Contains(err.Error(), verifcid.ErrBelowMinimumHashLength.Error()) ||
if strings.Contains(err.Error(), verifcid.ErrDigestTooSmall.Error()) ||
strings.Contains(err.Error(), verifcid.ErrPossiblyInsecureHashFunction.Error()) {
err = fmt.Errorf("\"%s\"\nPlease run 'ipfs pin verify'"+ // nolint
" to list insecure hashes. If you want to read them,"+

Some files were not shown because too many files have changed in this diff Show More