Merge pull request #11064 from ipfs/release-v0.39.0

Release v0.39.0
This commit is contained in:
Marcin Rataj 2025-11-27 02:56:04 +01:00 committed by GitHub
commit 2896aed9f4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
80 changed files with 5840 additions and 490 deletions

26
.github/legacy/Dockerfile.goipfs-stub vendored Normal file
View File

@ -0,0 +1,26 @@
# syntax=docker/dockerfile:1
# Stub Dockerfile for the deprecated 'ipfs/go-ipfs' image name.
# This image redirects users to the new 'ipfs/kubo' name.
FROM busybox:stable-glibc
# Copy stub entrypoint that displays deprecation message
COPY .github/legacy/goipfs_stub.sh /usr/local/bin/ipfs
# Make it executable
RUN chmod +x /usr/local/bin/ipfs
# Use the same ports as the real image for compatibility
EXPOSE 4001 4001/udp 5001 8080 8081
# Create ipfs user for consistency
ENV IPFS_PATH=/data/ipfs
RUN mkdir -p $IPFS_PATH \
&& adduser -D -h $IPFS_PATH -u 1000 -G users ipfs \
&& chown ipfs:users $IPFS_PATH
# Run as ipfs user
USER ipfs
# The stub script will run and exit with an error message
ENTRYPOINT ["/usr/local/bin/ipfs"]
CMD ["daemon"]

20
.github/legacy/goipfs_stub.sh vendored Executable file
View File

@ -0,0 +1,20 @@
#!/bin/sh
# Stub script for the deprecated 'ipfs/go-ipfs' Docker image.
# This informs users to switch to 'ipfs/kubo'.
cat >&2 <<'EOF'
ERROR: The name 'go-ipfs' is no longer used.
Please update your Docker scripts to use 'ipfs/kubo' instead of 'ipfs/go-ipfs'.
For example:
docker pull ipfs/kubo:release
More information:
- https://github.com/ipfs/kubo#docker
- https://hub.docker.com/r/ipfs/kubo
- https://docs.ipfs.tech/install/run-ipfs-inside-docker/
EOF
exit 1

View File

@ -39,7 +39,8 @@ jobs:
timeout-minutes: 15
env:
IMAGE_NAME: ipfs/kubo
LEGACY_IMAGE_NAME: ipfs/go-ipfs
outputs:
tags: ${{ steps.tags.outputs.value }}
steps:
- name: Check out the repo
uses: actions/checkout@v5
@ -140,3 +141,52 @@ jobs:
cache-to: |
type=gha,mode=max
type=registry,ref=${{ env.IMAGE_NAME }}:buildcache,mode=max
# Build and push stub image to the legacy ipfs/go-ipfs name
# This redirects users to use ipfs/kubo instead
legacy-name:
needs: docker-hub
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
name: Push stub to legacy ipfs/go-ipfs name
runs-on: ubuntu-latest
timeout-minutes: 5
env:
LEGACY_IMAGE_NAME: ipfs/go-ipfs
steps:
- name: Check out the repo
uses: actions/checkout@v5
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ vars.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Convert tags to legacy image name
id: legacy_tags
run: |
TAGS="${{ github.event.inputs.tags || needs.docker-hub.outputs.tags }}"
if ! echo "$TAGS" | grep -q "kubo"; then
echo "ERROR: Tags must contain kubo image name"
exit 1
fi
echo "value<<EOF" >> $GITHUB_OUTPUT
echo "$TAGS" | sed "s|ipfs/kubo|$LEGACY_IMAGE_NAME|g" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
shell: bash
- if: github.event_name != 'workflow_dispatch' || github.event.inputs.push == 'true'
name: Build and push legacy stub image
uses: docker/build-push-action@v6
with:
platforms: linux/amd64,linux/arm/v7,linux/arm64/v8
context: .
push: true
file: ./.github/legacy/Dockerfile.goipfs-stub
tags: ${{ steps.legacy_tags.outputs.value }}

View File

@ -109,13 +109,13 @@ jobs:
run: cat output.md >> $GITHUB_STEP_SUMMARY
- name: Upload HTML report
if: failure() || success()
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v5
with:
name: gateway-conformance.html
path: output.html
- name: Upload JSON report
if: failure() || success()
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v5
with:
name: gateway-conformance.json
path: output.json
@ -214,13 +214,13 @@ jobs:
run: cat output.md >> $GITHUB_STEP_SUMMARY
- name: Upload HTML report
if: failure() || success()
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v5
with:
name: gateway-conformance-libp2p.html
path: output.html
- name: Upload JSON report
if: failure() || success()
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v5
with:
name: gateway-conformance-libp2p.json
path: output.json

View File

@ -78,7 +78,7 @@ jobs:
output: test/unit/gotest.junit.xml
if: failure() || success()
- name: Archive the JUnit XML report
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v5
with:
name: unit
path: test/unit/gotest.junit.xml
@ -91,7 +91,7 @@ jobs:
output: test/unit/gotest.html
if: failure() || success()
- name: Archive the HTML report
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v5
with:
name: html
path: test/unit/gotest.html

View File

@ -37,7 +37,7 @@ jobs:
with:
go-version-file: 'go.mod'
- run: make build
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@v5
with:
name: kubo
path: cmd/ipfs/ipfs
@ -49,10 +49,10 @@ jobs:
run:
shell: bash
steps:
- uses: actions/setup-node@v5
- uses: actions/setup-node@v6
with:
node-version: lts/*
- uses: actions/download-artifact@v5
- uses: actions/download-artifact@v6
with:
name: kubo
path: cmd/ipfs
@ -84,10 +84,10 @@ jobs:
run:
shell: bash
steps:
- uses: actions/setup-node@v5
- uses: actions/setup-node@v6
with:
node-version: 20.x
- uses: actions/download-artifact@v5
- uses: actions/download-artifact@v6
with:
name: kubo
path: cmd/ipfs

View File

@ -88,7 +88,7 @@ jobs:
destination: sharness.html
- name: Upload one-page HTML report
if: github.repository != 'ipfs/kubo' && (failure() || success())
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v5
with:
name: sharness.html
path: kubo/test/sharness/test-results/sharness.html
@ -108,7 +108,7 @@ jobs:
destination: sharness-html/
- name: Upload full HTML report
if: github.repository != 'ipfs/kubo' && (failure() || success())
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v5
with:
name: sharness-html
path: kubo/test/sharness/test-results/sharness-html

View File

@ -22,7 +22,7 @@ jobs:
- uses: ipfs/start-ipfs-daemon-action@v1
with:
args: --init --init-profile=flatfs,server --enable-gc=false
- uses: actions/setup-node@v5
- uses: actions/setup-node@v6
with:
node-version: 14
- name: Sync the latest 5 github releases

View File

@ -77,7 +77,7 @@ jobs:
- name: Upload test results
if: always()
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v5
with:
name: ${{ matrix.os }}-test-results
path: |

View File

@ -1,5 +1,6 @@
# Kubo Changelogs
- [v0.39](docs/changelogs/v0.39.md)
- [v0.38](docs/changelogs/v0.38.md)
- [v0.37](docs/changelogs/v0.37.md)
- [v0.36](docs/changelogs/v0.36.md)

View File

@ -191,13 +191,13 @@ $ ipfs ls /ipns/dist.ipfs.tech/kubo/$VERSION
To download a given build of a version:
```console
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_darwin-386.tar.gz # darwin 32-bit build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_darwin-amd64.tar.gz # darwin 64-bit build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_freebsd-amd64.tar.gz # freebsd 64-bit build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-386.tar.gz # linux 32-bit build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-amd64.tar.gz # linux 64-bit build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-arm.tar.gz # linux arm build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_windows-amd64.zip # windows 64-bit build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_darwin-amd64.tar.gz # darwin amd64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_darwin-arm64.tar.gz # darwin arm64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_freebsd-amd64.tar.gz # freebsd amd64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-amd64.tar.gz # linux amd64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-riscv64.tar.gz # linux riscv64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-arm64.tar.gz # linux arm64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_windows-amd64.zip # windows amd64 build
```
### Unofficial Linux packages

View File

@ -50,6 +50,6 @@ else
unset IPFS_SWARM_KEY_FILE
fi
find /container-init.d -maxdepth 1 -type f -iname '*.sh' -print0 | sort -z | xargs -n 1 -0 -r container_init_run
find /container-init.d -maxdepth 1 \( -type f -o -type l \) -iname '*.sh' -print0 | sort -z | xargs -n 1 -0 -r container_init_run
exec ipfs "$@"

View File

@ -29,12 +29,10 @@ GIT_BRANCH=${3:-$(git symbolic-ref -q --short HEAD || echo "unknown")}
GIT_TAG=${4:-$(git describe --tags --exact-match 2> /dev/null || echo "")}
IMAGE_NAME=${IMAGE_NAME:-ipfs/kubo}
LEGACY_IMAGE_NAME=${LEGACY_IMAGE_NAME:-ipfs/go-ipfs}
echoImageName () {
local IMAGE_TAG=$1
echo "$IMAGE_NAME:$IMAGE_TAG"
echo "$LEGACY_IMAGE_NAME:$IMAGE_TAG"
}
if [[ $GIT_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+-rc ]]; then

View File

@ -1,10 +1,19 @@
#!/bin/zsh
#!/bin/bash
#
# Invocation: mkreleaselog [FIRST_REF [LAST_REF]]
#
# Generates release notes with contributor statistics, deduplicating by GitHub handle.
# GitHub handles are resolved from:
# 1. GitHub noreply emails (user@users.noreply.github.com)
# 2. Merge commit messages (Merge pull request #N from user/branch)
# 3. GitHub API via gh CLI (for squash merges)
#
# Results are cached in ~/.cache/mkreleaselog/github-handles.json
set -euo pipefail
export GO111MODULE=on
export GOPATH="$(go env GOPATH)"
GOPATH="$(go env GOPATH)"
export GOPATH
# List of PCRE regular expressions to match "included" modules.
INCLUDE_MODULES=(
@ -15,10 +24,15 @@ INCLUDE_MODULES=(
"^github.com/multiformats/"
"^github.com/filecoin-project/"
"^github.com/ipfs-shipyard/"
"^github.com/ipshipyard/"
"^github.com/probe-lab/"
# Authors of personal modules used by go-ipfs that should be mentioned in the
# release notes.
"^github.com/whyrusleeping/"
"^github.com/gammazero/"
"^github.com/Jorropo/"
"^github.com/guillaumemichel/"
"^github.com/Kubuxu/"
"^github.com/jbenet/"
"^github.com/Stebalien/"
@ -48,15 +62,348 @@ IGNORE_FILES=(
)
##########################################################################################
# GitHub Handle Resolution Infrastructure
##########################################################################################
# Cache location following XDG spec
GITHUB_CACHE_DIR="${XDG_CACHE_HOME:-$HOME/.cache}/mkreleaselog"
GITHUB_CACHE_FILE="$GITHUB_CACHE_DIR/github-handles.json"
# Timeout for gh CLI commands (seconds)
GH_TIMEOUT=10
# Associative array for email -> github handle mapping (runtime cache)
declare -A EMAIL_TO_GITHUB
# Check if gh CLI is available and authenticated
gh_available() {
command -v gh >/dev/null 2>&1 && gh auth status >/dev/null 2>&1
}
# Load cached email -> github handle mappings from disk
load_github_cache() {
EMAIL_TO_GITHUB=()
if [[ ! -f "$GITHUB_CACHE_FILE" ]]; then
return 0
fi
# Validate JSON before loading
if ! jq -e '.' "$GITHUB_CACHE_FILE" >/dev/null 2>&1; then
msg "Warning: corrupted cache file, ignoring"
return 0
fi
local email handle
while IFS=$'\t' read -r email handle; do
# Validate handle format (alphanumeric, hyphens, max 39 chars)
if [[ -n "$email" && -n "$handle" && "$handle" =~ ^[a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?$ ]]; then
EMAIL_TO_GITHUB["$email"]="$handle"
fi
done < <(jq -r 'to_entries[] | "\(.key)\t\(.value)"' "$GITHUB_CACHE_FILE" 2>/dev/null)
msg "Loaded ${#EMAIL_TO_GITHUB[@]} cached GitHub handle mappings"
}
# Save email -> github handle mappings to disk (atomic write)
save_github_cache() {
if [[ ${#EMAIL_TO_GITHUB[@]} -eq 0 ]]; then
return 0
fi
mkdir -p "$GITHUB_CACHE_DIR"
local tmp_file
tmp_file="$(mktemp "$GITHUB_CACHE_DIR/cache.XXXXXX")" || return 1
# Build JSON from associative array
{
echo "{"
local first=true
local key
for key in "${!EMAIL_TO_GITHUB[@]}"; do
if [[ "$first" == "true" ]]; then
first=false
else
echo ","
fi
# Escape special characters in email for JSON
printf ' %s: %s' "$(jq -n --arg e "$key" '$e')" "$(jq -n --arg h "${EMAIL_TO_GITHUB[$key]}" '$h')"
done
echo
echo "}"
} > "$tmp_file"
# Validate before replacing
if jq -e '.' "$tmp_file" >/dev/null 2>&1; then
mv "$tmp_file" "$GITHUB_CACHE_FILE"
msg "Saved ${#EMAIL_TO_GITHUB[@]} GitHub handle mappings to cache"
else
rm -f "$tmp_file"
msg "Warning: failed to save cache (invalid JSON)"
fi
}
# Extract GitHub handle from email if it's a GitHub noreply address
# Handles: user@users.noreply.github.com and 12345678+user@users.noreply.github.com
extract_handle_from_noreply() {
local email="$1"
if [[ "$email" =~ ^([0-9]+\+)?([a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?)@users\.noreply\.github\.com$ ]]; then
echo "${BASH_REMATCH[2]}"
return 0
fi
return 1
}
# Extract GitHub handle from merge commit subject
# Handles: "Merge pull request #123 from username/branch"
extract_handle_from_merge_commit() {
local subject="$1"
if [[ "$subject" =~ ^Merge\ pull\ request\ \#[0-9]+\ from\ ([a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?)/.*$ ]]; then
echo "${BASH_REMATCH[1]}"
return 0
fi
return 1
}
# Extract PR number from commit subject
# Handles: "Subject (#123)" and "Merge pull request #123 from"
extract_pr_number() {
local subject="$1"
if [[ "$subject" =~ \(#([0-9]+)\)$ ]]; then
echo "${BASH_REMATCH[1]}"
return 0
elif [[ "$subject" =~ ^Merge\ pull\ request\ \#([0-9]+)\ from ]]; then
echo "${BASH_REMATCH[1]}"
return 0
fi
return 1
}
# Query GitHub API for PR author (with timeout and error handling)
query_pr_author() {
local gh_repo="$1" # e.g., "ipfs/kubo"
local pr_num="$2"
if ! gh_available; then
return 1
fi
local handle
handle="$(timeout "$GH_TIMEOUT" gh pr view "$pr_num" --repo "$gh_repo" --json author -q '.author.login' 2>/dev/null)" || return 1
# Validate handle format
if [[ -n "$handle" && "$handle" =~ ^[a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?$ ]]; then
echo "$handle"
return 0
fi
return 1
}
# Query GitHub API for commit author (fallback when no PR available)
query_commit_author() {
local gh_repo="$1" # e.g., "ipfs/kubo"
local commit_sha="$2"
if ! gh_available; then
return 1
fi
local handle
handle="$(timeout "$GH_TIMEOUT" gh api "/repos/$gh_repo/commits/$commit_sha" --jq '.author.login // empty' 2>/dev/null)" || return 1
# Validate handle format
if [[ -n "$handle" && "$handle" =~ ^[a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?$ ]]; then
echo "$handle"
return 0
fi
return 1
}
# Resolve email to GitHub handle using all available methods
# Args: email, commit_hash (optional), repo_dir (optional), gh_repo (optional)
resolve_github_handle() {
local email="$1"
local commit="${2:-}"
local repo_dir="${3:-}"
local gh_repo="${4:-}"
# Skip empty emails
[[ -z "$email" ]] && return 1
# Check runtime cache first
if [[ -n "${EMAIL_TO_GITHUB[$email]:-}" ]]; then
echo "${EMAIL_TO_GITHUB[$email]}"
return 0
fi
local handle=""
# Method 1: Extract from noreply email
if handle="$(extract_handle_from_noreply "$email")"; then
EMAIL_TO_GITHUB["$email"]="$handle"
echo "$handle"
return 0
fi
# Method 2: Look at commit message for merge commit pattern
if [[ -n "$commit" && -n "$repo_dir" ]]; then
local subject
subject="$(git -C "$repo_dir" log -1 --format='%s' "$commit" 2>/dev/null)" || true
if [[ -n "$subject" ]]; then
if handle="$(extract_handle_from_merge_commit "$subject")"; then
EMAIL_TO_GITHUB["$email"]="$handle"
echo "$handle"
return 0
fi
# Method 3: Query GitHub API for PR author
if [[ -n "$gh_repo" ]]; then
local pr_num
if pr_num="$(extract_pr_number "$subject")"; then
if handle="$(query_pr_author "$gh_repo" "$pr_num")"; then
EMAIL_TO_GITHUB["$email"]="$handle"
echo "$handle"
return 0
fi
fi
fi
fi
fi
return 1
}
# Build GitHub handle mappings for all commits in a range
# This does a single pass to collect PR numbers, then batch queries them
build_github_mappings() {
local module="$1"
local start="$2"
local end="${3:-HEAD}"
local repo
repo="$(strip_version "$module")"
local dir
local gh_repo=""
if [[ "$module" == "github.com/ipfs/kubo" ]]; then
dir="$ROOT_DIR"
else
dir="$GOPATH/src/$repo"
fi
# Extract gh_repo for API calls (e.g., "ipfs/kubo" from "github.com/ipfs/kubo")
if [[ "$repo" =~ ^github\.com/(.+)$ ]]; then
gh_repo="${BASH_REMATCH[1]}"
fi
msg "Building GitHub handle mappings for $module..."
# Collect all unique emails and their commit context
declare -A email_commits=()
local hash email subject
while IFS=$'\t' read -r hash email subject; do
[[ -z "$email" ]] && continue
# Skip if already resolved
[[ -n "${EMAIL_TO_GITHUB[$email]:-}" ]] && continue
# Try to resolve without API first
local handle=""
# Method 1: noreply email
if handle="$(extract_handle_from_noreply "$email")"; then
EMAIL_TO_GITHUB["$email"]="$handle"
continue
fi
# Method 2: merge commit message
if handle="$(extract_handle_from_merge_commit "$subject")"; then
EMAIL_TO_GITHUB["$email"]="$handle"
continue
fi
# Store for potential API lookup
if [[ -z "${email_commits[$email]:-}" ]]; then
email_commits["$email"]="$hash"
fi
done < <(git -C "$dir" log --format='tformat:%H%x09%aE%x09%s' --no-merges "$start..$end" 2>/dev/null)
# API batch lookup for remaining emails (if gh is available)
if gh_available && [[ -n "$gh_repo" && ${#email_commits[@]} -gt 0 ]]; then
msg "Querying GitHub API for ${#email_commits[@]} unknown contributors..."
local key
for key in "${!email_commits[@]}"; do
# Skip if already resolved
[[ -n "${EMAIL_TO_GITHUB[$key]:-}" ]] && continue
local commit_hash="${email_commits[$key]}"
local subj handle
subj="$(git -C "$dir" log -1 --format='%s' "$commit_hash" 2>/dev/null)" || true
# Try PR author lookup first (cheaper API call)
local pr_num
if pr_num="$(extract_pr_number "$subj")"; then
if handle="$(query_pr_author "$gh_repo" "$pr_num")"; then
EMAIL_TO_GITHUB["$key"]="$handle"
continue
fi
fi
# Fallback: commit author API (works for any commit)
if handle="$(query_commit_author "$gh_repo" "$commit_hash")"; then
EMAIL_TO_GITHUB["$key"]="$handle"
fi
done
fi
}
##########################################################################################
# Original infrastructure with modifications
##########################################################################################
build_include_regex() {
local result=""
local mod
for mod in "${INCLUDE_MODULES[@]}"; do
if [[ -n "$result" ]]; then
result="$result|$mod"
else
result="$mod"
fi
done
echo "($result)"
}
build_exclude_regex() {
local result=""
local mod
for mod in "${EXCLUDE_MODULES[@]}"; do
if [[ -n "$result" ]]; then
result="$result|$mod"
else
result="$mod"
fi
done
if [[ -n "$result" ]]; then
echo "($result)"
else
echo '$^' # match nothing
fi
}
if [[ ${#INCLUDE_MODULES[@]} -gt 0 ]]; then
INCLUDE_REGEX="(${$(printf "|%s" "${INCLUDE_MODULES[@]}"):1})"
INCLUDE_REGEX="$(build_include_regex)"
else
INCLUDE_REGEX="" # "match anything"
fi
if [[ ${#EXCLUDE_MODULES[@]} -gt 0 ]]; then
EXCLUDE_REGEX="(${$(printf "|%s" "${EXCLUDE_MODULES[@]}"):1})"
EXCLUDE_REGEX="$(build_exclude_regex)"
else
EXCLUDE_REGEX='$^' # "match nothing"
fi
@ -71,8 +418,6 @@ NL=$'\n'
ROOT_DIR="$(git rev-parse --show-toplevel)"
alias jq="jq --unbuffered"
msg() {
echo "$*" >&2
}
@ -80,11 +425,21 @@ msg() {
statlog() {
local module="$1"
local rpath
local gh_repo=""
if [[ "$module" == "github.com/ipfs/kubo" ]]; then
rpath="$ROOT_DIR"
else
rpath="$GOPATH/src/$(strip_version "$module")"
fi
# Extract gh_repo for API calls
local repo
repo="$(strip_version "$module")"
if [[ "$repo" =~ ^github\.com/(.+)$ ]]; then
gh_repo="${BASH_REMATCH[1]}"
fi
local start="${2:-}"
local end="${3:-HEAD}"
local mailmap_file="$rpath/.mailmap"
@ -93,18 +448,21 @@ statlog() {
fi
local stack=()
git -C "$rpath" -c mailmap.file="$mailmap_file" log --use-mailmap --shortstat --no-merges --pretty="tformat:%H%x09%aN%x09%aE" "$start..$end" -- . "${IGNORE_FILES_PATHSPEC[@]}" | while read -r line; do
local line
while read -r line; do
if [[ -n "$line" ]]; then
stack+=("$line")
continue
fi
local changes
read -r changes
changed=0
insertions=0
deletions=0
while read count event; do
local changed=0
local insertions=0
local deletions=0
local count event
while read -r count event; do
if [[ "$event" =~ ^file ]]; then
changed=$count
elif [[ "$event" =~ ^insertion ]]; then
@ -117,27 +475,32 @@ statlog() {
fi
done<<<"${changes//,/$NL}"
local author
for author in "${stack[@]}"; do
local hash name email
IFS=$'\t' read -r hash name email <<<"$author"
# Resolve GitHub handle
local github_handle=""
github_handle="$(resolve_github_handle "$email" "$hash" "$rpath" "$gh_repo")" || true
jq -n \
--arg "hash" "$hash" \
--arg "name" "$name" \
--arg "email" "$email" \
--arg "github" "$github_handle" \
--argjson "changed" "$changed" \
--argjson "insertions" "$insertions" \
--argjson "deletions" "$deletions" \
'{Commit: $hash, Author: $name, Email: $email, Files: $changed, Insertions: $insertions, Deletions: $deletions}'
'{Commit: $hash, Author: $name, Email: $email, GitHub: $github, Files: $changed, Insertions: $insertions, Deletions: $deletions}'
done
stack=()
done
done < <(git -C "$rpath" -c mailmap.file="$mailmap_file" log --use-mailmap --shortstat --no-merges --pretty="tformat:%H%x09%aN%x09%aE" "$start..$end" -- . "${IGNORE_FILES_PATHSPEC[@]}")
}
# Returns a stream of deps changed between $1 and $2.
dep_changes() {
{
<"$1"
<"$2"
} | jq -s 'JOIN(INDEX(.[0][]; .Path); .[1][]; .Path; {Path: .[0].Path, Old: (.[1] | del(.Path)), New: (.[0] | del(.Path))}) | select(.New.Version != .Old.Version)'
cat "$1" "$2" | jq -s 'JOIN(INDEX(.[0][]; .Path); .[1][]; .Path; {Path: .[0].Path, Old: (.[1] | del(.Path)), New: (.[0] | del(.Path))}) | select(.New.Version != .Old.Version)'
}
# resolve_commits resolves a git ref for each version.
@ -165,12 +528,11 @@ ignored_commit() {
# Generate a release log for a range of commits in a single repo.
release_log() {
setopt local_options BASH_REMATCH
local module="$1"
local start="$2"
local end="${3:-HEAD}"
local repo="$(strip_version "$1")"
local repo
repo="$(strip_version "$1")"
local dir
if [[ "$module" == "github.com/ipfs/kubo" ]]; then
dir="$ROOT_DIR"
@ -178,28 +540,25 @@ release_log() {
dir="$GOPATH/src/$repo"
fi
local commit pr
git -C "$dir" log \
--format='tformat:%H %s' \
--first-parent \
"$start..$end" |
while read commit subject; do
# Skip commits that only touch ignored files.
if ignored_commit "$dir" "$commit"; then
continue
fi
local commit subject
while read -r commit subject; do
# Skip commits that only touch ignored files.
if ignored_commit "$dir" "$commit"; then
continue
fi
if [[ "$subject" =~ '^Merge pull request #([0-9]+) from' ]]; then
local prnum="${BASH_REMATCH[2]}"
local desc="$(git -C "$dir" show --summary --format='tformat:%b' "$commit" | head -1)"
printf -- "- %s (%s)\n" "$desc" "$(pr_link "$repo" "$prnum")"
elif [[ "$subject" =~ '\(#([0-9]+)\)$' ]]; then
local prnum="${BASH_REMATCH[2]}"
printf -- "- %s (%s)\n" "$subject" "$(pr_link "$repo" "$prnum")"
else
printf -- "- %s\n" "$subject"
fi
done
if [[ "$subject" =~ ^Merge\ pull\ request\ \#([0-9]+)\ from ]]; then
local prnum="${BASH_REMATCH[1]}"
local desc
desc="$(git -C "$dir" show --summary --format='tformat:%b' "$commit" | head -1)"
printf -- "- %s (%s)\n" "$desc" "$(pr_link "$repo" "$prnum")"
elif [[ "$subject" =~ \(#([0-9]+)\)$ ]]; then
local prnum="${BASH_REMATCH[1]}"
printf -- "- %s (%s)\n" "$subject" "$(pr_link "$repo" "$prnum")"
else
printf -- "- %s\n" "$subject"
fi
done < <(git -C "$dir" log --format='tformat:%H %s' --first-parent "$start..$end")
}
indent() {
@ -211,7 +570,8 @@ mod_deps() {
}
ensure() {
local repo="$(strip_version "$1")"
local repo
repo="$(strip_version "$1")"
local commit="$2"
local rpath
if [[ "$1" == "github.com/ipfs/kubo" ]]; then
@ -232,14 +592,27 @@ ensure() {
git -C "$rpath" rev-parse --verify "$commit" >/dev/null || return 1
}
# Summarize stats, grouping by GitHub handle (with fallback to email for dedup)
statsummary() {
jq -s 'group_by(.Author)[] | {Author: .[0].Author, Commits: (. | length), Insertions: (map(.Insertions) | add), Deletions: (map(.Deletions) | add), Files: (map(.Files) | add)}' |
jq '. + {Lines: (.Deletions + .Insertions)}'
jq -s '
# Group by GitHub handle if available, otherwise by email
group_by(if .GitHub != "" then .GitHub else .Email end)[] |
{
# Use first non-empty GitHub handle, or fall back to Author name
Author: .[0].Author,
GitHub: (map(select(.GitHub != "")) | .[0].GitHub // ""),
Email: .[0].Email,
Commits: (. | length),
Insertions: (map(.Insertions) | add),
Deletions: (map(.Deletions) | add),
Files: (map(.Files) | add)
}
' | jq '. + {Lines: (.Deletions + .Insertions)}'
}
strip_version() {
local repo="$1"
if [[ "$repo" =~ '.*/v[0-9]+$' ]]; then
if [[ "$repo" =~ .*/v[0-9]+$ ]]; then
repo="$(dirname "$repo")"
fi
echo "$repo"
@ -248,16 +621,24 @@ strip_version() {
recursive_release_log() {
local start="${1:-$(git tag -l | sort -V | grep -v -- '-rc' | grep 'v'| tail -n1)}"
local end="${2:-$(git rev-parse HEAD)}"
local repo_root="$(git rev-parse --show-toplevel)"
local module="$(go list -m)"
local dir="$(go list -m -f '{{.Dir}}')"
local repo_root
repo_root="$(git rev-parse --show-toplevel)"
local module
module="$(go list -m)"
local dir
dir="$(go list -m -f '{{.Dir}}')"
# Load cached GitHub handle mappings
load_github_cache
# Kubo can be run from any directory, dependencies still use GOPATH
(
local result=0
local workspace="$(mktemp -d)"
trap "$(printf 'rm -rf "%q"' "$workspace")" INT TERM EXIT
local workspace
workspace="$(mktemp -d)"
# shellcheck disable=SC2064
trap "rm -rf '$workspace'" INT TERM EXIT
cd "$workspace"
echo "Computing old deps..." >&2
@ -272,6 +653,9 @@ recursive_release_log() {
printf -- "Generating Changelog for %s %s..%s\n" "$module" "$start" "$end" >&2
# Pre-build GitHub mappings for main module
build_github_mappings "$module" "$start" "$end"
echo "### 📝 Changelog"
echo
echo "<details><summary>Full Changelog</summary>"
@ -282,24 +666,26 @@ recursive_release_log() {
statlog "$module" "$start" "$end" > statlog.json
dep_changes old_deps.json new_deps.json |
local dep_module new new_ref old old_ref
while read -r dep_module new new_ref old old_ref; do
if ! ensure "$dep_module" "$new_ref"; then
result=1
local changelog="failed to fetch repo"
else
# Pre-build GitHub mappings for dependency
build_github_mappings "$dep_module" "$old_ref" "$new_ref"
statlog "$dep_module" "$old_ref" "$new_ref" >> statlog.json
local changelog
changelog="$(release_log "$dep_module" "$old_ref" "$new_ref")"
fi
if [[ -n "$changelog" ]]; then
printf -- "- %s (%s -> %s):\n" "$dep_module" "$old" "$new"
echo "$changelog" | indent
fi
done < <(dep_changes old_deps.json new_deps.json |
jq --arg inc "$INCLUDE_REGEX" --arg exc "$EXCLUDE_REGEX" \
'select(.Path | test($inc)) | select(.Path | test($exc) | not)' |
# Compute changelogs
jq -r '"\(.Path) \(.New.Version) \(.New.Ref) \(.Old.Version) \(.Old.Ref // "")"' |
while read module new new_ref old old_ref; do
if ! ensure "$module" "$new_ref"; then
result=1
local changelog="failed to fetch repo"
else
statlog "$module" "$old_ref" "$new_ref" >> statlog.json
local changelog="$(release_log "$module" "$old_ref" "$new_ref")"
fi
if [[ -n "$changelog" ]]; then
printf -- "- %s (%s -> %s):\n" "$module" "$old" "$new"
echo "$changelog" | indent
fi
done
jq -r '"\(.Path) \(.New.Version) \(.New.Ref) \(.Old.Version) \(.Old.Ref // "")"')
echo
echo "</details>"
@ -311,8 +697,18 @@ recursive_release_log() {
echo "|-------------|---------|---------|---------------|"
statsummary <statlog.json |
jq -s 'sort_by(.Lines) | reverse | .[]' |
jq -r '"| \(.Author) | \(.Commits) | +\(.Insertions)/-\(.Deletions) | \(.Files) |"'
return "$status"
jq -r '
if .GitHub != "" then
"| [@\(.GitHub)](https://github.com/\(.GitHub)) | \(.Commits) | +\(.Insertions)/-\(.Deletions) | \(.Files) |"
else
"| \(.Author) | \(.Commits) | +\(.Insertions)/-\(.Deletions) | \(.Files) |"
end
'
# Save cache before exiting
save_github_cache
return "$result"
)
}

View File

@ -515,7 +515,7 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
}
//nolint:staticcheck // intentionally checking deprecated fields
if !cfg.Reprovider.Interval.IsDefault() || !cfg.Reprovider.Strategy.IsDefault() {
log.Fatal("Deprecated configuration detected. Manually migrate 'Reprovider' fields to 'Provide': Reprovider.Strategy -> Provide.Strategy, Reprovider.Interval -> Provide.Interval. Remove 'Reprovider' from your config. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide")
log.Fatal("Deprecated configuration detected. Manually migrate 'Reprovider' fields to 'Provide': Reprovider.Strategy -> Provide.Strategy, Reprovider.Interval -> Provide.DHT.Interval. Remove 'Reprovider' from your config. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide")
}
// Check for deprecated "flat" strategy (should have been migrated to "all")
if cfg.Provide.Strategy.WithDefault("") == "flat" {

View File

@ -8,6 +8,7 @@ import (
"errors"
"fmt"
"io"
"log/slog"
"net"
"net/http"
"os"
@ -33,6 +34,7 @@ import (
"github.com/ipfs/kubo/repo"
"github.com/ipfs/kubo/repo/fsrepo"
"github.com/ipfs/kubo/tracing"
"github.com/libp2p/go-libp2p/gologshim"
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
manet "github.com/multiformats/go-multiaddr/net"
@ -50,6 +52,17 @@ var (
tracer trace.Tracer
)
func init() {
// Set go-log's slog handler as the application-wide default.
// This ensures all slog-based logging uses go-log's formatting.
slog.SetDefault(slog.New(logging.SlogHandler()))
// Wire go-log's slog bridge to go-libp2p's gologshim.
// This provides go-libp2p loggers with the "logger" attribute
// for per-subsystem level control (e.g., `ipfs log level libp2p-swarm debug`).
gologshim.SetDefaultHandler(logging.SlogHandler())
}
// declared as a var for testing purposes.
var dnsResolver = madns.DefaultResolver

View File

@ -12,8 +12,9 @@ const (
DefaultDiagnosticServiceURL = "https://check.ipfs.network"
// Gateway limit defaults from boxo
DefaultRetrievalTimeout = gateway.DefaultRetrievalTimeout
DefaultMaxConcurrentRequests = gateway.DefaultMaxConcurrentRequests
DefaultRetrievalTimeout = gateway.DefaultRetrievalTimeout
DefaultMaxConcurrentRequests = gateway.DefaultMaxConcurrentRequests
DefaultMaxRangeRequestFileSize = 0 // 0 means no limit
)
type GatewaySpec struct {
@ -100,6 +101,12 @@ type Gateway struct {
// A value of 0 disables the limit.
MaxConcurrentRequests *OptionalInteger `json:",omitempty"`
// MaxRangeRequestFileSize limits the maximum file size for HTTP range requests.
// Range requests for files larger than this limit return 501 Not Implemented.
// This protects against CDN issues with large file range requests and prevents
// excessive bandwidth consumption. A value of 0 disables the limit.
MaxRangeRequestFileSize *OptionalBytes `json:",omitempty"`
// DiagnosticServiceURL is the URL for a service to diagnose CID retrievability issues.
// When the gateway returns a 504 Gateway Timeout error, an "Inspect retrievability of CID"
// button will be shown that links to this service with the CID appended as ?cid=<CID-to-diagnose>.

View File

@ -16,8 +16,10 @@ const (
DefaultUnixFSRawLeaves = false
DefaultUnixFSChunker = "size-262144"
DefaultHashFunction = "sha2-256"
DefaultFastProvideRoot = true
DefaultFastProvideWait = false
DefaultUnixFSHAMTDirectorySizeThreshold = "256KiB" // https://github.com/ipfs/boxo/blob/6c5a07602aed248acc86598f30ab61923a54a83e/ipld/unixfs/io/directory.go#L26
DefaultUnixFSHAMTDirectorySizeThreshold = 262144 // 256KiB - https://github.com/ipfs/boxo/blob/6c5a07602aed248acc86598f30ab61923a54a83e/ipld/unixfs/io/directory.go#L26
// DefaultBatchMaxNodes controls the maximum number of nodes in a
// write-batch. The total size of the batch is limited by
@ -45,9 +47,11 @@ type Import struct {
UnixFSFileMaxLinks OptionalInteger
UnixFSDirectoryMaxLinks OptionalInteger
UnixFSHAMTDirectoryMaxFanout OptionalInteger
UnixFSHAMTDirectorySizeThreshold OptionalString
UnixFSHAMTDirectorySizeThreshold OptionalBytes
BatchMaxNodes OptionalInteger
BatchMaxSize OptionalInteger
FastProvideRoot Flag
FastProvideWait Flag
}
// ValidateImportConfig validates the Import configuration according to UnixFS spec requirements.

View File

@ -322,7 +322,7 @@ fetching may be degraded.
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174)
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalString("256KiB")
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("256KiB")
return nil
},
},
@ -336,7 +336,7 @@ fetching may be degraded.
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174)
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalString("256KiB")
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("256KiB")
return nil
},
},
@ -350,7 +350,7 @@ fetching may be degraded.
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(1024)
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0) // no limit here, use size-based Import.UnixFSHAMTDirectorySizeThreshold instead
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(1024)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalString("1MiB") // 1MiB
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("1MiB") // 1MiB
return nil
},
},

View File

@ -15,12 +15,18 @@ const (
// DHT provider defaults
DefaultProvideDHTInterval = 22 * time.Hour // https://github.com/ipfs/kubo/pull/9326
DefaultProvideDHTMaxWorkers = 16 // Unified default for both sweep and legacy providers
DefaultProvideDHTSweepEnabled = false
DefaultProvideDHTSweepEnabled = true
DefaultProvideDHTResumeEnabled = true
DefaultProvideDHTDedicatedPeriodicWorkers = 2
DefaultProvideDHTDedicatedBurstWorkers = 1
DefaultProvideDHTMaxProvideConnsPerWorker = 16
DefaultProvideDHTMaxProvideConnsPerWorker = 20
DefaultProvideDHTKeystoreBatchSize = 1 << 14 // ~544 KiB per batch (1 multihash = 34 bytes)
DefaultProvideDHTOfflineDelay = 2 * time.Hour
// DefaultFastProvideTimeout is the maximum time allowed for fast-provide operations.
// Prevents hanging on network issues when providing root CID.
// 10 seconds is sufficient for DHT operations with sweep provider or accelerated client.
DefaultFastProvideTimeout = 10 * time.Second
)
type ProvideStrategy int
@ -63,7 +69,7 @@ type ProvideDHT struct {
MaxWorkers *OptionalInteger `json:",omitempty"`
// SweepEnabled activates the sweeping reprovider system which spreads
// reprovide operations over time. This will become the default in a future release.
// reprovide operations over time.
// Default: DefaultProvideDHTSweepEnabled
SweepEnabled Flag `json:",omitempty"`
@ -86,6 +92,12 @@ type ProvideDHT struct {
// OfflineDelay sets the delay after which the provider switches from Disconnected to Offline state (sweep mode only).
// Default: DefaultProvideDHTOfflineDelay
OfflineDelay *OptionalDuration `json:",omitempty"`
// ResumeEnabled controls whether the provider resumes from its previous state on restart.
// When enabled, the provider persists its reprovide cycle state and provide queue to the datastore,
// and restores them on restart. When disabled, the provider starts fresh on each restart.
// Default: true
ResumeEnabled Flag `json:",omitempty"`
}
func ParseProvideStrategy(s string) ProvideStrategy {
@ -168,3 +180,25 @@ func ValidateProvideConfig(cfg *Provide) error {
return nil
}
// ShouldProvideForStrategy determines if content should be provided based on the provide strategy
// and content characteristics (pinned status, root status, MFS status).
func ShouldProvideForStrategy(strategy ProvideStrategy, isPinned bool, isPinnedRoot bool, isMFS bool) bool {
if strategy == ProvideStrategyAll {
// 'all' strategy: always provide
return true
}
// For combined strategies, check each component
if strategy&ProvideStrategyPinned != 0 && isPinned {
return true
}
if strategy&ProvideStrategyRoots != 0 && isPinnedRoot {
return true
}
if strategy&ProvideStrategyMFS != 0 && isMFS {
return true
}
return false
}

View File

@ -105,3 +105,87 @@ func TestValidateProvideConfig_MaxWorkers(t *testing.T) {
})
}
}
func TestShouldProvideForStrategy(t *testing.T) {
t.Run("all strategy always provides", func(t *testing.T) {
// ProvideStrategyAll should return true regardless of flags
testCases := []struct{ pinned, pinnedRoot, mfs bool }{
{false, false, false},
{true, true, true},
{true, false, false},
}
for _, tc := range testCases {
assert.True(t, ShouldProvideForStrategy(
ProvideStrategyAll, tc.pinned, tc.pinnedRoot, tc.mfs))
}
})
t.Run("single strategies match only their flag", func(t *testing.T) {
tests := []struct {
name string
strategy ProvideStrategy
pinned, pinnedRoot, mfs bool
want bool
}{
{"pinned: matches when pinned=true", ProvideStrategyPinned, true, false, false, true},
{"pinned: ignores other flags", ProvideStrategyPinned, false, true, true, false},
{"roots: matches when pinnedRoot=true", ProvideStrategyRoots, false, true, false, true},
{"roots: ignores other flags", ProvideStrategyRoots, true, false, true, false},
{"mfs: matches when mfs=true", ProvideStrategyMFS, false, false, true, true},
{"mfs: ignores other flags", ProvideStrategyMFS, true, true, false, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := ShouldProvideForStrategy(tt.strategy, tt.pinned, tt.pinnedRoot, tt.mfs)
assert.Equal(t, tt.want, got)
})
}
})
t.Run("combined strategies use OR logic (else-if bug fix)", func(t *testing.T) {
// CRITICAL: Tests the fix where bitflag combinations (pinned+mfs) didn't work
// because of else-if instead of separate if statements
tests := []struct {
name string
strategy ProvideStrategy
pinned, pinnedRoot, mfs bool
want bool
}{
// pinned|mfs: provide if EITHER matches
{"pinned|mfs when pinned", ProvideStrategyPinned | ProvideStrategyMFS, true, false, false, true},
{"pinned|mfs when mfs", ProvideStrategyPinned | ProvideStrategyMFS, false, false, true, true},
{"pinned|mfs when both", ProvideStrategyPinned | ProvideStrategyMFS, true, false, true, true},
{"pinned|mfs when neither", ProvideStrategyPinned | ProvideStrategyMFS, false, false, false, false},
// roots|mfs
{"roots|mfs when root", ProvideStrategyRoots | ProvideStrategyMFS, false, true, false, true},
{"roots|mfs when mfs", ProvideStrategyRoots | ProvideStrategyMFS, false, false, true, true},
{"roots|mfs when neither", ProvideStrategyRoots | ProvideStrategyMFS, false, false, false, false},
// pinned|roots
{"pinned|roots when pinned", ProvideStrategyPinned | ProvideStrategyRoots, true, false, false, true},
{"pinned|roots when root", ProvideStrategyPinned | ProvideStrategyRoots, false, true, false, true},
{"pinned|roots when neither", ProvideStrategyPinned | ProvideStrategyRoots, false, false, false, false},
// triple combination
{"all-three when any matches", ProvideStrategyPinned | ProvideStrategyRoots | ProvideStrategyMFS, false, false, true, true},
{"all-three when none match", ProvideStrategyPinned | ProvideStrategyRoots | ProvideStrategyMFS, false, false, false, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := ShouldProvideForStrategy(tt.strategy, tt.pinned, tt.pinnedRoot, tt.mfs)
assert.Equal(t, tt.want, got)
})
}
})
t.Run("zero strategy never provides", func(t *testing.T) {
assert.False(t, ShouldProvideForStrategy(ProvideStrategy(0), false, false, false))
assert.False(t, ShouldProvideForStrategy(ProvideStrategy(0), true, true, true))
})
}

View File

@ -118,7 +118,7 @@ type ResourceMgr struct {
Enabled Flag `json:",omitempty"`
Limits swarmLimits `json:",omitempty"`
MaxMemory *OptionalString `json:",omitempty"`
MaxMemory *OptionalBytes `json:",omitempty"`
MaxFileDescriptors *OptionalInteger `json:",omitempty"`
// A list of multiaddrs that can bypass normal system limits (but are still

View File

@ -7,6 +7,8 @@ import (
"io"
"strings"
"time"
humanize "github.com/dustin/go-humanize"
)
// Strings is a helper type that (un)marshals a single string to/from a single
@ -115,6 +117,16 @@ func (f Flag) String() string {
}
}
// ResolveBoolFromConfig returns the resolved boolean value based on:
// - If userSet is true, returns userValue (user explicitly set the flag)
// - Otherwise, uses configFlag.WithDefault(defaultValue) (respects config or falls back to default)
func ResolveBoolFromConfig(userValue bool, userSet bool, configFlag Flag, defaultValue bool) bool {
if userSet {
return userValue
}
return configFlag.WithDefault(defaultValue)
}
var (
_ json.Unmarshaler = (*Flag)(nil)
_ json.Marshaler = (*Flag)(nil)
@ -425,8 +437,79 @@ func (p OptionalString) String() string {
}
var (
_ json.Unmarshaler = (*OptionalInteger)(nil)
_ json.Marshaler = (*OptionalInteger)(nil)
_ json.Unmarshaler = (*OptionalString)(nil)
_ json.Marshaler = (*OptionalString)(nil)
)
// OptionalBytes represents a byte size that has a default value
//
// When encoded in json, Default is encoded as "null".
// Stores the original string representation and parses on access.
// Embeds OptionalString to share common functionality.
type OptionalBytes struct {
OptionalString
}
// NewOptionalBytes returns an OptionalBytes from a string.
func NewOptionalBytes(s string) *OptionalBytes {
return &OptionalBytes{OptionalString{value: &s}}
}
// IsDefault returns if this is a default optional byte value.
func (p *OptionalBytes) IsDefault() bool {
if p == nil {
return true
}
return p.OptionalString.IsDefault()
}
// WithDefault resolves the byte size with the given default.
// Parses the stored string value using humanize.ParseBytes.
func (p *OptionalBytes) WithDefault(defaultValue uint64) (value uint64) {
if p.IsDefault() {
return defaultValue
}
strValue := p.OptionalString.WithDefault("")
bytes, err := humanize.ParseBytes(strValue)
if err != nil {
// This should never happen as values are validated during UnmarshalJSON.
// If it does, it indicates either config corruption or a programming error.
panic(fmt.Sprintf("invalid byte size in OptionalBytes: %q - %v", strValue, err))
}
return bytes
}
// UnmarshalJSON validates the input is a parseable byte size.
func (p *OptionalBytes) UnmarshalJSON(input []byte) error {
switch string(input) {
case "null", "undefined":
*p = OptionalBytes{}
default:
var value interface{}
err := json.Unmarshal(input, &value)
if err != nil {
return err
}
switch v := value.(type) {
case float64:
str := fmt.Sprintf("%.0f", v)
p.value = &str
case string:
_, err := humanize.ParseBytes(v)
if err != nil {
return err
}
p.value = &v
default:
return fmt.Errorf("unable to parse byte size, expected a size string (e.g., \"5GiB\") or a number, but got %T", v)
}
}
return nil
}
var (
_ json.Unmarshaler = (*OptionalBytes)(nil)
_ json.Marshaler = (*OptionalBytes)(nil)
)
type swarmLimits doNotUse

View File

@ -5,6 +5,9 @@ import (
"encoding/json"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestOptionalDuration(t *testing.T) {
@ -509,3 +512,125 @@ func TestOptionalString(t *testing.T) {
}
}
}
func TestOptionalBytes(t *testing.T) {
makeStringPointer := func(v string) *string { return &v }
t.Run("default value", func(t *testing.T) {
var b OptionalBytes
assert.True(t, b.IsDefault())
assert.Equal(t, uint64(0), b.WithDefault(0))
assert.Equal(t, uint64(1024), b.WithDefault(1024))
assert.Equal(t, "default", b.String())
})
t.Run("non-default value", func(t *testing.T) {
b := OptionalBytes{OptionalString{value: makeStringPointer("1MiB")}}
assert.False(t, b.IsDefault())
assert.Equal(t, uint64(1048576), b.WithDefault(512))
assert.Equal(t, "1MiB", b.String())
})
t.Run("JSON roundtrip", func(t *testing.T) {
testCases := []struct {
jsonInput string
jsonOutput string
expectedValue string
}{
{"null", "null", ""},
{"\"256KiB\"", "\"256KiB\"", "256KiB"},
{"\"1MiB\"", "\"1MiB\"", "1MiB"},
{"\"5GiB\"", "\"5GiB\"", "5GiB"},
{"\"256KB\"", "\"256KB\"", "256KB"},
{"1048576", "\"1048576\"", "1048576"},
}
for _, tc := range testCases {
t.Run(tc.jsonInput, func(t *testing.T) {
var b OptionalBytes
err := json.Unmarshal([]byte(tc.jsonInput), &b)
require.NoError(t, err)
if tc.expectedValue == "" {
assert.Nil(t, b.value)
} else {
require.NotNil(t, b.value)
assert.Equal(t, tc.expectedValue, *b.value)
}
out, err := json.Marshal(b)
require.NoError(t, err)
assert.Equal(t, tc.jsonOutput, string(out))
})
}
})
t.Run("parsing byte sizes", func(t *testing.T) {
testCases := []struct {
input string
expected uint64
}{
{"256KiB", 262144},
{"1MiB", 1048576},
{"5GiB", 5368709120},
{"256KB", 256000},
{"1048576", 1048576},
}
for _, tc := range testCases {
t.Run(tc.input, func(t *testing.T) {
var b OptionalBytes
err := json.Unmarshal([]byte("\""+tc.input+"\""), &b)
require.NoError(t, err)
assert.Equal(t, tc.expected, b.WithDefault(0))
})
}
})
t.Run("omitempty", func(t *testing.T) {
type Foo struct {
B *OptionalBytes `json:",omitempty"`
}
out, err := json.Marshal(new(Foo))
require.NoError(t, err)
assert.Equal(t, "{}", string(out))
var foo2 Foo
err = json.Unmarshal(out, &foo2)
require.NoError(t, err)
if foo2.B != nil {
assert.Equal(t, uint64(1024), foo2.B.WithDefault(1024))
assert.True(t, foo2.B.IsDefault())
} else {
// When field is omitted, pointer is nil which is also considered default
t.Log("B is nil, which is acceptable for omitempty")
}
})
t.Run("invalid values", func(t *testing.T) {
invalidInputs := []string{
"\"5XiB\"", "\"invalid\"", "\"\"", "[]", "{}",
}
for _, invalid := range invalidInputs {
t.Run(invalid, func(t *testing.T) {
var b OptionalBytes
err := json.Unmarshal([]byte(invalid), &b)
assert.Error(t, err)
})
}
})
t.Run("panic on invalid stored value", func(t *testing.T) {
// This tests that if somehow an invalid value gets stored
// (bypassing UnmarshalJSON validation), WithDefault will panic
invalidValue := "invalid-size"
b := OptionalBytes{OptionalString{value: &invalidValue}}
assert.Panics(t, func() {
b.WithDefault(1024)
}, "should panic on invalid stored value")
})
}

View File

@ -61,20 +61,45 @@ const (
inlineLimitOptionName = "inline-limit"
toFilesOptionName = "to-files"
preserveModeOptionName = "preserve-mode"
preserveMtimeOptionName = "preserve-mtime"
modeOptionName = "mode"
mtimeOptionName = "mtime"
mtimeNsecsOptionName = "mtime-nsecs"
preserveModeOptionName = "preserve-mode"
preserveMtimeOptionName = "preserve-mtime"
modeOptionName = "mode"
mtimeOptionName = "mtime"
mtimeNsecsOptionName = "mtime-nsecs"
fastProvideRootOptionName = "fast-provide-root"
fastProvideWaitOptionName = "fast-provide-wait"
)
const adderOutChanSize = 8
const (
adderOutChanSize = 8
)
var AddCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Add a file or directory to IPFS.",
ShortDescription: `
Adds the content of <path> to IPFS. Use -r to add directories (recursively).
FAST PROVIDE OPTIMIZATION:
When you add content to IPFS, the sweep provider queues it for efficient
DHT provides over time. While this is resource-efficient, other peers won't
find your content immediately after 'ipfs add' completes.
To make sharing faster, 'ipfs add' does an immediate provide of the root CID
to the DHT in addition to the regular queue. This complements the sweep provider:
fast-provide handles the urgent case (root CIDs that users share and reference),
while the sweep provider efficiently provides all blocks according to
Provide.Strategy over time.
By default, this immediate provide runs in the background without blocking
the command. If you need certainty that the root CID is discoverable before
the command returns (e.g., sharing a link immediately), use --fast-provide-wait
to wait for the provide to complete. Use --fast-provide-root=false to skip
this optimization.
This works best with the sweep provider and accelerated DHT client.
Automatically skipped when DHT is not available.
`,
LongDescription: `
Adds the content of <path> to IPFS. Use -r to add directories.
@ -213,6 +238,8 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
cmds.UintOption(modeOptionName, "Custom POSIX file mode to store in created UnixFS entries. WARNING: experimental, forces dag-pb for root block, disables raw-leaves"),
cmds.Int64Option(mtimeOptionName, "Custom POSIX modification time to store in created UnixFS entries (seconds before or after the Unix Epoch). WARNING: experimental, forces dag-pb for root block, disables raw-leaves"),
cmds.UintOption(mtimeNsecsOptionName, "Custom POSIX modification time (optional time fraction in nanoseconds)"),
cmds.BoolOption(fastProvideRootOptionName, "Immediately provide root CID to DHT in addition to regular queue, for faster discovery. Default: Import.FastProvideRoot"),
cmds.BoolOption(fastProvideWaitOptionName, "Block until the immediate provide completes before returning. Default: Import.FastProvideWait"),
},
PreRun: func(req *cmds.Request, env cmds.Environment) error {
quiet, _ := req.Options[quietOptionName].(bool)
@ -283,6 +310,8 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
mode, _ := req.Options[modeOptionName].(uint)
mtime, _ := req.Options[mtimeOptionName].(int64)
mtimeNsecs, _ := req.Options[mtimeNsecsOptionName].(uint)
fastProvideRoot, fastProvideRootSet := req.Options[fastProvideRootOptionName].(bool)
fastProvideWait, fastProvideWaitSet := req.Options[fastProvideWaitOptionName].(bool)
if chunker == "" {
chunker = cfg.Import.UnixFSChunker.WithDefault(config.DefaultUnixFSChunker)
@ -319,6 +348,9 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
maxHAMTFanout = int(cfg.Import.UnixFSHAMTDirectoryMaxFanout.WithDefault(config.DefaultUnixFSHAMTDirectoryMaxFanout))
}
fastProvideRoot = config.ResolveBoolFromConfig(fastProvideRoot, fastProvideRootSet, cfg.Import.FastProvideRoot, config.DefaultFastProvideRoot)
fastProvideWait = config.ResolveBoolFromConfig(fastProvideWait, fastProvideWaitSet, cfg.Import.FastProvideWait, config.DefaultFastProvideWait)
// Storing optional mode or mtime (UnixFS 1.5) requires root block
// to always be 'dag-pb' and not 'raw'. Below adjusts raw-leaves setting, if possible.
if preserveMode || preserveMtime || mode != 0 || mtime != 0 {
@ -421,11 +453,12 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
}
var added int
var fileAddedToMFS bool
var lastRootCid path.ImmutablePath // Track the root CID for fast-provide
addit := toadd.Entries()
for addit.Next() {
_, dir := addit.Node().(files.Directory)
errCh := make(chan error, 1)
events := make(chan interface{}, adderOutChanSize)
events := make(chan any, adderOutChanSize)
opts[len(opts)-1] = options.Unixfs.Events(events)
go func() {
@ -437,6 +470,9 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
return
}
// Store the root CID for potential fast-provide operation
lastRootCid = pathAdded
// creating MFS pointers when optional --to-files is set
if toFilesSet {
if addit.Name() == "" {
@ -560,12 +596,29 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
return fmt.Errorf("expected a file argument")
}
// Apply fast-provide-root if the flag is enabled
if fastProvideRoot && (lastRootCid != path.ImmutablePath{}) {
cfg, err := ipfsNode.Repo.Config()
if err != nil {
return err
}
if err := cmdenv.ExecuteFastProvide(req.Context, ipfsNode, cfg, lastRootCid.RootCid(), fastProvideWait, dopin, dopin, toFilesSet); err != nil {
return err
}
} else if !fastProvideRoot {
if fastProvideWait {
log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config", "wait-flag-ignored", true)
} else {
log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config")
}
}
return nil
},
PostRun: cmds.PostRunMap{
cmds.CLI: func(res cmds.Response, re cmds.ResponseEmitter) error {
sizeChan := make(chan int64, 1)
outChan := make(chan interface{})
outChan := make(chan any)
req := res.Request()
// Could be slow.

View File

@ -1,15 +1,19 @@
package cmdenv
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/ipfs/kubo/commands"
"github.com/ipfs/kubo/core"
"github.com/ipfs/go-cid"
cmds "github.com/ipfs/go-ipfs-cmds"
logging "github.com/ipfs/go-log/v2"
routing "github.com/libp2p/go-libp2p/core/routing"
"github.com/ipfs/kubo/commands"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/core"
coreiface "github.com/ipfs/kubo/core/coreiface"
options "github.com/ipfs/kubo/core/coreiface/options"
)
@ -86,3 +90,103 @@ func needEscape(s string) bool {
}
return false
}
// provideCIDSync performs a synchronous/blocking provide operation to announce
// the given CID to the DHT.
//
// - If the accelerated DHT client is used, a DHT lookup isn't needed, we
// directly allocate provider records to closest peers.
// - If Provide.DHT.SweepEnabled=true or OptimisticProvide=true, we make an
// optimistic provide call.
// - Else we make a standard provide call (much slower).
//
// IMPORTANT: The caller MUST verify DHT availability using HasActiveDHTClient()
// before calling this function. Calling with a nil or invalid router will cause
// a panic - this is the caller's responsibility to prevent.
func provideCIDSync(ctx context.Context, router routing.Routing, c cid.Cid) error {
return router.Provide(ctx, c, true)
}
// ExecuteFastProvide immediately provides a root CID to the DHT, bypassing the regular
// provide queue for faster content discovery. This function is reusable across commands
// that add or import content, such as ipfs add and ipfs dag import.
//
// Parameters:
// - ctx: context for synchronous provides
// - ipfsNode: the IPFS node instance
// - cfg: node configuration
// - rootCid: the CID to provide
// - wait: whether to block until provide completes (sync mode)
// - isPinned: whether content is pinned
// - isPinnedRoot: whether this is a pinned root CID
// - isMFS: whether content is in MFS
//
// Return value:
// - Returns nil if operation succeeded or was skipped (preconditions not met)
// - Returns error only in sync mode (wait=true) when provide operation fails
// - In async mode (wait=false), always returns nil (errors logged in goroutine)
//
// The function handles all precondition checks (Provide.Enabled, DHT availability,
// strategy matching) and logs appropriately. In async mode, it launches a goroutine
// with a detached context and timeout.
func ExecuteFastProvide(
ctx context.Context,
ipfsNode *core.IpfsNode,
cfg *config.Config,
rootCid cid.Cid,
wait bool,
isPinned bool,
isPinnedRoot bool,
isMFS bool,
) error {
log.Debugw("fast-provide-root: enabled", "wait", wait)
// Check preconditions for providing
switch {
case !cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled):
log.Debugw("fast-provide-root: skipped", "reason", "Provide.Enabled is false")
return nil
case cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) == 0:
log.Debugw("fast-provide-root: skipped", "reason", "Provide.DHT.Interval is 0")
return nil
case !ipfsNode.HasActiveDHTClient():
log.Debugw("fast-provide-root: skipped", "reason", "DHT not available")
return nil
}
// Check if strategy allows providing this content
strategyStr := cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy)
strategy := config.ParseProvideStrategy(strategyStr)
shouldProvide := config.ShouldProvideForStrategy(strategy, isPinned, isPinnedRoot, isMFS)
if !shouldProvide {
log.Debugw("fast-provide-root: skipped", "reason", "strategy does not match content", "strategy", strategyStr, "pinned", isPinned, "pinnedRoot", isPinnedRoot, "mfs", isMFS)
return nil
}
// Execute provide operation
if wait {
// Synchronous mode: block until provide completes, return error on failure
log.Debugw("fast-provide-root: providing synchronously", "cid", rootCid)
if err := provideCIDSync(ctx, ipfsNode.DHTClient, rootCid); err != nil {
log.Warnw("fast-provide-root: sync provide failed", "cid", rootCid, "error", err)
return fmt.Errorf("fast-provide: %w", err)
}
log.Debugw("fast-provide-root: sync provide completed", "cid", rootCid)
return nil
}
// Asynchronous mode (default): fire-and-forget, don't block, always return nil
log.Debugw("fast-provide-root: providing asynchronously", "cid", rootCid)
go func() {
// Use detached context with timeout to prevent hanging on network issues
ctx, cancel := context.WithTimeout(context.Background(), config.DefaultFastProvideTimeout)
defer cancel()
if err := provideCIDSync(ctx, ipfsNode.DHTClient, rootCid); err != nil {
log.Warnw("fast-provide-root: async provide failed", "cid", rootCid, "error", err)
} else {
log.Debugw("fast-provide-root: async provide completed", "cid", rootCid)
}
}()
return nil
}

View File

@ -74,10 +74,13 @@ func PathOrCidPath(str string) (path.Path, error) {
return p, nil
}
// Save the original error before attempting fallback
originalErr := err
if p, err := path.NewPath("/ipfs/" + str); err == nil {
return p, nil
}
// Send back original err.
return nil, err
return nil, originalErr
}

View File

@ -0,0 +1,106 @@
package cmdutils
import (
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestPathOrCidPath(t *testing.T) {
t.Run("valid path is returned as-is", func(t *testing.T) {
validPath := "/ipfs/QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG"
p, err := PathOrCidPath(validPath)
require.NoError(t, err)
assert.Equal(t, validPath, p.String())
})
t.Run("valid CID is converted to /ipfs/ path", func(t *testing.T) {
cid := "QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG"
p, err := PathOrCidPath(cid)
require.NoError(t, err)
assert.Equal(t, "/ipfs/"+cid, p.String())
})
t.Run("valid ipns path is returned as-is", func(t *testing.T) {
validPath := "/ipns/example.com"
p, err := PathOrCidPath(validPath)
require.NoError(t, err)
assert.Equal(t, validPath, p.String())
})
t.Run("returns original error when both attempts fail", func(t *testing.T) {
invalidInput := "invalid!@#path"
_, err := PathOrCidPath(invalidInput)
require.Error(t, err)
// The error should reference the original input attempt.
// This ensures users get meaningful error messages about their actual input.
assert.Contains(t, err.Error(), invalidInput,
"error should mention the original input")
assert.Contains(t, err.Error(), "path does not have enough components",
"error should describe the problem with the original input")
})
t.Run("empty string returns error about original input", func(t *testing.T) {
_, err := PathOrCidPath("")
require.Error(t, err)
// Verify we're not getting an error about "/ipfs/" (the fallback)
errMsg := err.Error()
assert.NotContains(t, errMsg, "/ipfs/",
"error should be about empty input, not the fallback path")
})
t.Run("invalid characters return error about original input", func(t *testing.T) {
invalidInput := "not a valid path or CID with spaces and /@#$%"
_, err := PathOrCidPath(invalidInput)
require.Error(t, err)
// The error message should help debug the original input
assert.True(t, strings.Contains(err.Error(), invalidInput) ||
strings.Contains(err.Error(), "invalid"),
"error should reference original problematic input")
})
t.Run("CID with path is converted correctly", func(t *testing.T) {
cidWithPath := "QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG/file.txt"
p, err := PathOrCidPath(cidWithPath)
require.NoError(t, err)
assert.Equal(t, "/ipfs/"+cidWithPath, p.String())
})
}
func TestValidatePinName(t *testing.T) {
t.Run("valid pin name is accepted", func(t *testing.T) {
err := ValidatePinName("my-pin-name")
assert.NoError(t, err)
})
t.Run("empty pin name is accepted", func(t *testing.T) {
err := ValidatePinName("")
assert.NoError(t, err)
})
t.Run("pin name at max length is accepted", func(t *testing.T) {
maxName := strings.Repeat("a", MaxPinNameBytes)
err := ValidatePinName(maxName)
assert.NoError(t, err)
})
t.Run("pin name exceeding max length is rejected", func(t *testing.T) {
tooLong := strings.Repeat("a", MaxPinNameBytes+1)
err := ValidatePinName(tooLong)
require.Error(t, err)
assert.Contains(t, err.Error(), "max")
})
t.Run("pin name with unicode is counted by bytes", func(t *testing.T) {
// Unicode character can be multiple bytes
unicodeName := strings.Repeat("🔒", MaxPinNameBytes/4+1) // emoji is 4 bytes
err := ValidatePinName(unicodeName)
require.Error(t, err)
assert.Contains(t, err.Error(), "bytes")
})
}

View File

@ -16,10 +16,12 @@ import (
)
const (
pinRootsOptionName = "pin-roots"
progressOptionName = "progress"
silentOptionName = "silent"
statsOptionName = "stats"
pinRootsOptionName = "pin-roots"
progressOptionName = "progress"
silentOptionName = "silent"
statsOptionName = "stats"
fastProvideRootOptionName = "fast-provide-root"
fastProvideWaitOptionName = "fast-provide-wait"
)
// DagCmd provides a subset of commands for interacting with ipld dag objects
@ -189,6 +191,18 @@ Note:
currently present in the blockstore does not represent a complete DAG,
pinning of that individual root will fail.
FAST PROVIDE OPTIMIZATION:
Root CIDs from CAR headers are immediately provided to the DHT in addition
to the regular provide queue, allowing other peers to discover your content
right away. This complements the sweep provider, which efficiently provides
all blocks according to Provide.Strategy over time.
By default, the provide happens in the background without blocking the
command. Use --fast-provide-wait to wait for the provide to complete, or
--fast-provide-root=false to skip it. Works even with --pin-roots=false.
Automatically skipped when DHT is not available.
Maximum supported CAR version: 2
Specification of CAR formats: https://ipld.io/specs/transport/car/
`,
@ -200,6 +214,8 @@ Specification of CAR formats: https://ipld.io/specs/transport/car/
cmds.BoolOption(pinRootsOptionName, "Pin optional roots listed in the .car headers after importing.").WithDefault(true),
cmds.BoolOption(silentOptionName, "No output."),
cmds.BoolOption(statsOptionName, "Output stats."),
cmds.BoolOption(fastProvideRootOptionName, "Immediately provide root CIDs to DHT in addition to regular queue, for faster discovery. Default: Import.FastProvideRoot"),
cmds.BoolOption(fastProvideWaitOptionName, "Block until the immediate provide completes before returning. Default: Import.FastProvideWait"),
cmdutils.AllowBigBlockOption,
},
Type: CarImportOutput{},

View File

@ -11,6 +11,7 @@ import (
cmds "github.com/ipfs/go-ipfs-cmds"
ipld "github.com/ipfs/go-ipld-format"
ipldlegacy "github.com/ipfs/go-ipld-legacy"
logging "github.com/ipfs/go-log/v2"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/core/coreiface/options"
gocarv2 "github.com/ipld/go-car/v2"
@ -19,6 +20,8 @@ import (
"github.com/ipfs/kubo/core/commands/cmdutils"
)
var log = logging.Logger("core/commands")
func dagImport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
node, err := cmdenv.GetNode(env)
if err != nil {
@ -47,6 +50,12 @@ func dagImport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment
doPinRoots, _ := req.Options[pinRootsOptionName].(bool)
fastProvideRoot, fastProvideRootSet := req.Options[fastProvideRootOptionName].(bool)
fastProvideWait, fastProvideWaitSet := req.Options[fastProvideWaitOptionName].(bool)
fastProvideRoot = config.ResolveBoolFromConfig(fastProvideRoot, fastProvideRootSet, cfg.Import.FastProvideRoot, config.DefaultFastProvideRoot)
fastProvideWait = config.ResolveBoolFromConfig(fastProvideWait, fastProvideWaitSet, cfg.Import.FastProvideWait, config.DefaultFastProvideWait)
// grab a pinlock ( which doubles as a GC lock ) so that regardless of the
// size of the streamed-in cars nothing will disappear on us before we had
// a chance to roots that may show up at the very end
@ -191,5 +200,21 @@ func dagImport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment
}
}
// Fast-provide roots for faster discovery
if fastProvideRoot {
err = roots.ForEach(func(c cid.Cid) error {
return cmdenv.ExecuteFastProvide(req.Context, node, cfg, c, fastProvideWait, doPinRoots, doPinRoots, false)
})
if err != nil {
return err
}
} else {
if fastProvideWait {
log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config", "wait-flag-ignored", true)
} else {
log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config")
}
}
return nil
}

View File

@ -56,7 +56,7 @@ var queryDhtCmd = &cmds.Command{
return err
}
if nd.DHTClient == nil {
if !nd.HasActiveDHTClient() {
return ErrNotDHT
}
@ -70,7 +70,7 @@ var queryDhtCmd = &cmds.Command{
ctx, events := routing.RegisterForQueryEvents(ctx)
client := nd.DHTClient
if client == nd.DHT {
if nd.DHT != nil && client == nd.DHT {
client = nd.DHT.WAN
if !nd.DHT.WANActive() {
client = nd.DHT.LAN

View File

@ -1,36 +1,69 @@
package commands
import (
"context"
"errors"
"fmt"
"io"
"strings"
"text/tabwriter"
"time"
"unicode/utf8"
humanize "github.com/dustin/go-humanize"
"github.com/ipfs/boxo/provider"
boxoprovider "github.com/ipfs/boxo/provider"
cid "github.com/ipfs/go-cid"
cmds "github.com/ipfs/go-ipfs-cmds"
"github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/libp2p/go-libp2p-kad-dht/fullrt"
"github.com/libp2p/go-libp2p-kad-dht/provider"
"github.com/libp2p/go-libp2p-kad-dht/provider/buffered"
"github.com/libp2p/go-libp2p-kad-dht/provider/dual"
"github.com/libp2p/go-libp2p-kad-dht/provider/stats"
routing "github.com/libp2p/go-libp2p/core/routing"
"github.com/probe-lab/go-libdht/kad/key"
"golang.org/x/exp/constraints"
)
const (
provideQuietOptionName = "quiet"
provideLanOptionName = "lan"
provideStatAllOptionName = "all"
provideStatCompactOptionName = "compact"
provideStatNetworkOptionName = "network"
provideStatConnectivityOptionName = "connectivity"
provideStatOperationsOptionName = "operations"
provideStatTimingsOptionName = "timings"
provideStatScheduleOptionName = "schedule"
provideStatQueuesOptionName = "queues"
provideStatWorkersOptionName = "workers"
// lowWorkerThreshold is the threshold below which worker availability warnings are shown
lowWorkerThreshold = 2
)
var ProvideCmd = &cmds.Command{
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "Control providing operations",
Tagline: "Control and monitor content providing",
ShortDescription: `
Control providing operations.
NOTE: This command is experimental and not all provide-related commands have
been migrated to this namespace yet. For example, 'ipfs routing
provide|reprovide' are still under the routing namespace, 'ipfs stats
reprovide' provides statistics. Additionally, 'ipfs bitswap reprovide' and
'ipfs stats provide' are deprecated.
OVERVIEW:
The provider system advertises content by publishing provider records,
allowing other nodes to discover which peers have specific content.
Content is reprovided periodically (every Provide.DHT.Interval)
according to Provide.Strategy.
CONFIGURATION:
Learn more: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide
SEE ALSO:
For ad-hoc one-time provide, see 'ipfs routing provide'
`,
},
@ -47,10 +80,18 @@ var provideClearCmd = &cmds.Command{
ShortDescription: `
Clear all CIDs pending to be provided for the first time.
Note: Kubo will automatically clear the queue when it detects a change of
Provide.Strategy upon a restart. For more information about provide
strategies, see:
https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy
BEHAVIOR:
This command removes CIDs from the provide queue that are waiting to be
advertised to the DHT for the first time. It does not affect content that
is already being reprovided on schedule.
AUTOMATIC CLEARING:
Kubo will automatically clear the queue when it detects a change of
Provide.Strategy upon a restart.
Learn: https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy
`,
},
Options: []cmds.Option{
@ -90,25 +131,108 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy
}
type provideStats struct {
provider.ReproviderStats
fullRT bool
Sweep *stats.Stats
Legacy *boxoprovider.ReproviderStats
FullRT bool // only used for legacy stats
}
// extractSweepingProvider extracts a SweepingProvider from the given provider interface.
// It handles unwrapping buffered and dual providers, selecting LAN or WAN as specified.
// Returns nil if the provider is not a sweeping provider type.
func extractSweepingProvider(prov any, useLAN bool) *provider.SweepingProvider {
switch p := prov.(type) {
case *provider.SweepingProvider:
return p
case *dual.SweepingProvider:
if useLAN {
return p.LAN
}
return p.WAN
case *buffered.SweepingProvider:
// Recursively extract from the inner provider
return extractSweepingProvider(p.Provider, useLAN)
default:
return nil
}
}
var provideStatCmd = &cmds.Command{
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "Returns statistics about the node's provider system.",
Tagline: "Show statistics about the provider system",
ShortDescription: `
Returns statistics about the content the node is reproviding every
Provide.DHT.Interval according to Provide.Strategy:
https://github.com/ipfs/kubo/blob/master/docs/config.md#provide
Returns statistics about the node's provider system.
This interface is not stable and may change from release to release.
OVERVIEW:
The provide system advertises content to the DHT by publishing provider
records that map CIDs to your peer ID. These records expire after a fixed
TTL to account for node churn, so content must be reprovided periodically
to stay discoverable.
Two provider types exist:
- Sweep provider: Divides the DHT keyspace into regions and systematically
sweeps through them over the reprovide interval. Batches CIDs allocated
to the same DHT servers, reducing lookups from N (one per CID) to a
small static number based on DHT size (~3k for 10k DHT servers). Spreads
work evenly over time to prevent resource spikes and ensure announcements
happen just before records expire.
- Legacy provider: Processes each CID individually with separate DHT
lookups. Attempts to reprovide all content as quickly as possible at the
start of each cycle. Works well for small datasets but struggles with
large collections.
Learn more:
- Config: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide
- Metrics: https://github.com/ipfs/kubo/blob/master/docs/provide-stats.md
DEFAULT OUTPUT:
Shows a brief summary including queue sizes, scheduled items, average record
holders, ongoing/total provides, and worker warnings.
DETAILED OUTPUT:
Use --all for detailed statistics with these sections: connectivity, queues,
schedule, timings, network, operations, and workers. Individual sections can
be displayed with their flags (e.g., --network, --operations). Multiple flags
can be combined.
Use --compact for monitoring-friendly 2-column output (requires --all).
EXAMPLES:
Monitor provider statistics in real-time with 2-column layout:
watch ipfs provide stat --all --compact
Get statistics in JSON format for programmatic processing:
ipfs provide stat --enc=json | jq
NOTES:
- This interface is experimental and may change between releases
- Legacy provider shows basic stats only (no flags supported)
- "Regions" are keyspace divisions for spreading reprovide work
- For Dual DHT: use --lan for LAN provider stats (default is WAN)
`,
},
Arguments: []cmds.Argument{},
Options: []cmds.Option{},
Options: []cmds.Option{
cmds.BoolOption(provideLanOptionName, "Show stats for LAN DHT only (for Sweep+Dual DHT only)"),
cmds.BoolOption(provideStatAllOptionName, "a", "Display all provide sweep stats"),
cmds.BoolOption(provideStatCompactOptionName, "Display stats in 2-column layout (requires --all)"),
cmds.BoolOption(provideStatConnectivityOptionName, "Display DHT connectivity status"),
cmds.BoolOption(provideStatNetworkOptionName, "Display network stats (peers, reachability, region size)"),
cmds.BoolOption(provideStatScheduleOptionName, "Display reprovide schedule (CIDs/regions scheduled, next reprovide time)"),
cmds.BoolOption(provideStatTimingsOptionName, "Display timing information (uptime, cycle start, reprovide interval)"),
cmds.BoolOption(provideStatWorkersOptionName, "Display worker pool stats (active/available/queued workers)"),
cmds.BoolOption(provideStatOperationsOptionName, "Display operation stats (ongoing/past provides, rates, errors)"),
cmds.BoolOption(provideStatQueuesOptionName, "Display provide and reprovide queue sizes"),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
nd, err := cmdenv.GetNode(env)
if err != nil {
@ -119,35 +243,272 @@ This interface is not stable and may change from release to release.
return ErrNotOnline
}
provideSys, ok := nd.Provider.(provider.System)
if !ok {
return errors.New("stats not available with experimental sweeping provider (Provide.DHT.SweepEnabled=true)")
lanStats, _ := req.Options[provideLanOptionName].(bool)
// Handle legacy provider
if legacySys, ok := nd.Provider.(boxoprovider.System); ok {
if lanStats {
return errors.New("LAN stats only available for Sweep provider with Dual DHT")
}
stats, err := legacySys.Stat()
if err != nil {
return err
}
_, fullRT := nd.DHTClient.(*fullrt.FullRT)
return res.Emit(provideStats{Legacy: &stats, FullRT: fullRT})
}
stats, err := provideSys.Stat()
if err != nil {
return err
}
_, fullRT := nd.DHTClient.(*fullrt.FullRT)
if err := res.Emit(provideStats{stats, fullRT}); err != nil {
return err
// Extract sweeping provider (handles buffered and dual unwrapping)
sweepingProvider := extractSweepingProvider(nd.Provider, lanStats)
if sweepingProvider == nil {
if lanStats {
return errors.New("LAN stats only available for Sweep provider with Dual DHT")
}
return fmt.Errorf("stats not available with current routing system %T", nd.Provider)
}
return nil
s := sweepingProvider.Stats()
return res.Emit(provideStats{Sweep: &s})
},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, s provideStats) error {
wtr := tabwriter.NewWriter(w, 1, 2, 1, ' ', 0)
defer wtr.Flush()
fmt.Fprintf(wtr, "TotalReprovides:\t%s\n", humanNumber(s.TotalReprovides))
fmt.Fprintf(wtr, "AvgReprovideDuration:\t%s\n", humanDuration(s.AvgReprovideDuration))
fmt.Fprintf(wtr, "LastReprovideDuration:\t%s\n", humanDuration(s.LastReprovideDuration))
if !s.LastRun.IsZero() {
fmt.Fprintf(wtr, "LastReprovide:\t%s\n", humanTime(s.LastRun))
if s.fullRT {
fmt.Fprintf(wtr, "NextReprovide:\t%s\n", humanTime(s.LastRun.Add(s.ReprovideInterval)))
all, _ := req.Options[provideStatAllOptionName].(bool)
compact, _ := req.Options[provideStatCompactOptionName].(bool)
connectivity, _ := req.Options[provideStatConnectivityOptionName].(bool)
queues, _ := req.Options[provideStatQueuesOptionName].(bool)
schedule, _ := req.Options[provideStatScheduleOptionName].(bool)
network, _ := req.Options[provideStatNetworkOptionName].(bool)
timings, _ := req.Options[provideStatTimingsOptionName].(bool)
operations, _ := req.Options[provideStatOperationsOptionName].(bool)
workers, _ := req.Options[provideStatWorkersOptionName].(bool)
flagCount := 0
for _, enabled := range []bool{all, connectivity, queues, schedule, network, timings, operations, workers} {
if enabled {
flagCount++
}
}
if s.Legacy != nil {
if flagCount > 0 {
return errors.New("cannot use flags with legacy provide stats")
}
fmt.Fprintf(wtr, "TotalReprovides:\t%s\n", humanNumber(s.Legacy.TotalReprovides))
fmt.Fprintf(wtr, "AvgReprovideDuration:\t%s\n", humanDuration(s.Legacy.AvgReprovideDuration))
fmt.Fprintf(wtr, "LastReprovideDuration:\t%s\n", humanDuration(s.Legacy.LastReprovideDuration))
if !s.Legacy.LastRun.IsZero() {
fmt.Fprintf(wtr, "LastReprovide:\t%s\n", humanTime(s.Legacy.LastRun))
if s.FullRT {
fmt.Fprintf(wtr, "NextReprovide:\t%s\n", humanTime(s.Legacy.LastRun.Add(s.Legacy.ReprovideInterval)))
}
}
return nil
}
if s.Sweep == nil {
return errors.New("no provide stats available")
}
// Sweep provider stats
if s.Sweep.Closed {
fmt.Fprintf(wtr, "Provider is closed\n")
return nil
}
if compact && !all {
return errors.New("--compact requires --all flag")
}
brief := flagCount == 0
showHeadings := flagCount > 1 || all
compactMode := all && compact
var cols [2][]string
col0MaxWidth := 0
// formatLine handles both normal and compact output modes:
// - Normal mode: all lines go to cols[0], col parameter is ignored
// - Compact mode: col 0 for left column, col 1 for right column
formatLine := func(col int, format string, a ...any) {
if compactMode {
s := fmt.Sprintf(format, a...)
cols[col] = append(cols[col], s)
if col == 0 {
col0MaxWidth = max(col0MaxWidth, utf8.RuneCountInString(s))
}
return
}
format = strings.Replace(format, ": ", ":\t", 1)
format = strings.Replace(format, ", ", ",\t", 1)
cols[0] = append(cols[0], fmt.Sprintf(format, a...))
}
addBlankLine := func(col int) {
if !brief {
formatLine(col, "")
}
}
sectionTitle := func(col int, title string) {
if !brief && showHeadings {
//nolint:govet // dynamic format string is intentional
formatLine(col, title+":")
}
}
indent := " "
if brief || !showHeadings {
indent = ""
}
// Connectivity
if all || connectivity || brief && s.Sweep.Connectivity.Status != "online" {
sectionTitle(1, "Connectivity")
since := s.Sweep.Connectivity.Since
if since.IsZero() {
formatLine(1, "%sStatus: %s", indent, s.Sweep.Connectivity.Status)
} else {
formatLine(1, "%sStatus: %s (%s)", indent, s.Sweep.Connectivity.Status, humanTime(since))
}
addBlankLine(1)
}
// Queues
if all || queues || brief {
sectionTitle(1, "Queues")
formatLine(1, "%sProvide queue: %s CIDs, %s regions", indent, humanSI(s.Sweep.Queues.PendingKeyProvides, 1), humanSI(s.Sweep.Queues.PendingRegionProvides, 1))
formatLine(1, "%sReprovide queue: %s regions", indent, humanSI(s.Sweep.Queues.PendingRegionReprovides, 1))
addBlankLine(1)
}
// Schedule
if all || schedule || brief {
sectionTitle(0, "Schedule")
formatLine(0, "%sCIDs scheduled: %s", indent, humanNumber(s.Sweep.Schedule.Keys))
formatLine(0, "%sRegions scheduled: %s", indent, humanNumberOrNA(s.Sweep.Schedule.Regions))
if !brief {
formatLine(0, "%sAvg prefix length: %s", indent, humanFloatOrNA(s.Sweep.Schedule.AvgPrefixLength))
nextPrefix := key.BitString(s.Sweep.Schedule.NextReprovidePrefix)
if nextPrefix == "" {
nextPrefix = "N/A"
}
formatLine(0, "%sNext region prefix: %s", indent, nextPrefix)
nextReprovideAt := s.Sweep.Schedule.NextReprovideAt.Format("15:04:05")
if s.Sweep.Schedule.NextReprovideAt.IsZero() {
nextReprovideAt = "N/A"
}
formatLine(0, "%sNext region reprovide: %s", indent, nextReprovideAt)
}
addBlankLine(0)
}
// Timings
if all || timings {
sectionTitle(1, "Timings")
formatLine(1, "%sUptime: %s (%s)", indent, humanDuration(s.Sweep.Timing.Uptime), humanTime(time.Now().Add(-s.Sweep.Timing.Uptime)))
formatLine(1, "%sCurrent time offset: %s", indent, humanDuration(s.Sweep.Timing.CurrentTimeOffset))
formatLine(1, "%sCycle started: %s", indent, humanTime(s.Sweep.Timing.CycleStart))
formatLine(1, "%sReprovide interval: %s", indent, humanDuration(s.Sweep.Timing.ReprovidesInterval))
addBlankLine(1)
}
// Network
if all || network || brief {
sectionTitle(0, "Network")
formatLine(0, "%sAvg record holders: %s", indent, humanFloatOrNA(s.Sweep.Network.AvgHolders))
if !brief {
formatLine(0, "%sPeers swept: %s", indent, humanInt(s.Sweep.Network.Peers))
formatLine(0, "%sFull keyspace coverage: %t", indent, s.Sweep.Network.CompleteKeyspaceCoverage)
if s.Sweep.Network.Peers > 0 {
formatLine(0, "%sReachable peers: %s (%s%%)", indent, humanInt(s.Sweep.Network.Reachable), humanNumber(100*s.Sweep.Network.Reachable/s.Sweep.Network.Peers))
} else {
formatLine(0, "%sReachable peers: %s", indent, humanInt(s.Sweep.Network.Reachable))
}
formatLine(0, "%sAvg region size: %s", indent, humanFloatOrNA(s.Sweep.Network.AvgRegionSize))
formatLine(0, "%sReplication factor: %s", indent, humanNumber(s.Sweep.Network.ReplicationFactor))
addBlankLine(0)
}
}
// Operations
if all || operations || brief {
sectionTitle(1, "Operations")
// Ongoing operations
formatLine(1, "%sOngoing provides: %s CIDs, %s regions", indent, humanSI(s.Sweep.Operations.Ongoing.KeyProvides, 1), humanSI(s.Sweep.Operations.Ongoing.RegionProvides, 1))
formatLine(1, "%sOngoing reprovides: %s CIDs, %s regions", indent, humanSI(s.Sweep.Operations.Ongoing.KeyReprovides, 1), humanSI(s.Sweep.Operations.Ongoing.RegionReprovides, 1))
// Past operations summary
formatLine(1, "%sTotal CIDs provided: %s", indent, humanNumber(s.Sweep.Operations.Past.KeysProvided))
if !brief {
formatLine(1, "%sTotal records provided: %s", indent, humanNumber(s.Sweep.Operations.Past.RecordsProvided))
formatLine(1, "%sTotal provide errors: %s", indent, humanNumber(s.Sweep.Operations.Past.KeysFailed))
formatLine(1, "%sCIDs provided/min/worker: %s", indent, humanFloatOrNA(s.Sweep.Operations.Past.KeysProvidedPerMinute))
formatLine(1, "%sCIDs reprovided/min/worker: %s", indent, humanFloatOrNA(s.Sweep.Operations.Past.KeysReprovidedPerMinute))
formatLine(1, "%sRegion reprovide duration: %s", indent, humanDurationOrNA(s.Sweep.Operations.Past.RegionReprovideDuration))
formatLine(1, "%sAvg CIDs/reprovide: %s", indent, humanFloatOrNA(s.Sweep.Operations.Past.AvgKeysPerReprovide))
formatLine(1, "%sRegions reprovided (last cycle): %s", indent, humanNumber(s.Sweep.Operations.Past.RegionReprovidedLastCycle))
addBlankLine(1)
}
}
// Workers
displayWorkers := all || workers
if displayWorkers || brief {
availableReservedBurst := max(0, s.Sweep.Workers.DedicatedBurst-s.Sweep.Workers.ActiveBurst)
availableReservedPeriodic := max(0, s.Sweep.Workers.DedicatedPeriodic-s.Sweep.Workers.ActivePeriodic)
availableFreeWorkers := max(0, s.Sweep.Workers.Max-max(s.Sweep.Workers.DedicatedBurst, s.Sweep.Workers.ActiveBurst)-max(s.Sweep.Workers.DedicatedPeriodic, s.Sweep.Workers.ActivePeriodic))
availableBurst := availableFreeWorkers + availableReservedBurst
availablePeriodic := availableFreeWorkers + availableReservedPeriodic
if displayWorkers || availableBurst <= lowWorkerThreshold || availablePeriodic <= lowWorkerThreshold {
// Either we want to display workers information, or we are low on
// available workers and want to warn the user.
sectionTitle(0, "Workers")
specifyWorkers := " workers"
if compactMode {
specifyWorkers = ""
}
formatLine(0, "%sActive%s: %s / %s (max)", indent, specifyWorkers, humanInt(s.Sweep.Workers.Active), humanInt(s.Sweep.Workers.Max))
if brief {
// Brief mode - show condensed worker info
formatLine(0, "%sPeriodic%s: %s active, %s available, %s queued", indent, specifyWorkers,
humanInt(s.Sweep.Workers.ActivePeriodic), humanInt(availablePeriodic), humanInt(s.Sweep.Workers.QueuedPeriodic))
formatLine(0, "%sBurst%s: %s active, %s available, %s queued\n", indent, specifyWorkers,
humanInt(s.Sweep.Workers.ActiveBurst), humanInt(availableBurst), humanInt(s.Sweep.Workers.QueuedBurst))
} else {
formatLine(0, "%sFree%s: %s", indent, specifyWorkers, humanInt(availableFreeWorkers))
formatLine(0, "%s %-14s %-9s %s", indent, "Workers stats:", "Periodic", "Burst")
formatLine(0, "%s %-14s %-9s %s", indent, "Active:", humanInt(s.Sweep.Workers.ActivePeriodic), humanInt(s.Sweep.Workers.ActiveBurst))
formatLine(0, "%s %-14s %-9s %s", indent, "Dedicated:", humanInt(s.Sweep.Workers.DedicatedPeriodic), humanInt(s.Sweep.Workers.DedicatedBurst))
formatLine(0, "%s %-14s %-9s %s", indent, "Available:", humanInt(availablePeriodic), humanInt(availableBurst))
formatLine(0, "%s %-14s %-9s %s", indent, "Queued:", humanInt(s.Sweep.Workers.QueuedPeriodic), humanInt(s.Sweep.Workers.QueuedBurst))
formatLine(0, "%sMax connections/worker: %s", indent, humanInt(s.Sweep.Workers.MaxProvideConnsPerWorker))
addBlankLine(0)
}
}
}
if compactMode {
col0Width := col0MaxWidth + 2
// Print both columns side by side
maxRows := max(len(cols[0]), len(cols[1]))
if maxRows == 0 {
return nil
}
for i := range maxRows - 1 { // last line is empty
var left, right string
if i < len(cols[0]) {
left = cols[0][i]
}
if i < len(cols[1]) {
right = cols[1][i]
}
fmt.Fprintf(wtr, "%-*s %s\n", col0Width, left, right)
}
} else {
if !brief {
cols[0] = cols[0][:len(cols[0])-1] // remove last blank line
}
for _, line := range cols[0] {
fmt.Fprintln(wtr, line)
}
}
return nil
@ -157,10 +518,23 @@ This interface is not stable and may change from release to release.
}
func humanDuration(val time.Duration) string {
if val > time.Second {
return val.Truncate(100 * time.Millisecond).String()
}
return val.Truncate(time.Microsecond).String()
}
func humanDurationOrNA(val time.Duration) string {
if val <= 0 {
return "N/A"
}
return humanDuration(val)
}
func humanTime(val time.Time) string {
if val.IsZero() {
return "N/A"
}
return val.Format("2006-01-02 15:04:05")
}
@ -174,11 +548,49 @@ func humanNumber[T constraints.Float | constraints.Integer](n T) string {
return str
}
func humanSI(val float64, decimals int) string {
v, unit := humanize.ComputeSI(val)
// humanNumberOrNA is like humanNumber but returns "N/A" for non-positive values.
func humanNumberOrNA[T constraints.Float | constraints.Integer](n T) string {
if n <= 0 {
return "N/A"
}
return humanNumber(n)
}
// humanFloatOrNA formats a float with 1 decimal place, returning "N/A" for non-positive values.
// This is separate from humanNumberOrNA because it provides simple decimal formatting for
// continuous metrics (averages, rates) rather than SI unit formatting used for discrete counts.
func humanFloatOrNA(val float64) string {
if val <= 0 {
return "N/A"
}
return humanFull(val, 1)
}
func humanSI[T constraints.Float | constraints.Integer](val T, decimals int) string {
v, unit := humanize.ComputeSI(float64(val))
return fmt.Sprintf("%s%s", humanFull(v, decimals), unit)
}
func humanInt[T constraints.Integer](val T) string {
return humanFull(float64(val), 0)
}
func humanFull(val float64, decimals int) string {
return humanize.CommafWithDigits(val, decimals)
}
// provideCIDSync performs a synchronous/blocking provide operation to announce
// the given CID to the DHT.
//
// - If the accelerated DHT client is used, a DHT lookup isn't needed, we
// directly allocate provider records to closest peers.
// - If Provide.DHT.SweepEnabled=true or OptimisticProvide=true, we make an
// optimistic provide call.
// - Else we make a standard provide call (much slower).
//
// IMPORTANT: The caller MUST verify DHT availability using HasActiveDHTClient()
// before calling this function. Calling with a nil or invalid router will cause
// a panic - this is the caller's responsibility to prevent.
func provideCIDSync(ctx context.Context, router routing.Routing, c cid.Cid) error {
return router.Provide(ctx, c, true)
}

View File

@ -5,20 +5,22 @@ import (
"errors"
"fmt"
"io"
"os"
"runtime"
"strings"
"sync"
"text/tabwriter"
"time"
oldcmds "github.com/ipfs/kubo/commands"
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
coreiface "github.com/ipfs/kubo/core/coreiface"
corerepo "github.com/ipfs/kubo/core/corerepo"
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
"github.com/ipfs/kubo/repo/fsrepo/migrations"
humanize "github.com/dustin/go-humanize"
bstore "github.com/ipfs/boxo/blockstore"
"github.com/ipfs/boxo/path"
cid "github.com/ipfs/go-cid"
cmds "github.com/ipfs/go-ipfs-cmds"
)
@ -226,45 +228,137 @@ Version string The repo version.
},
}
// VerifyProgress reports verification progress to the user.
// It contains either a message about a corrupt block or a progress counter.
type VerifyProgress struct {
Msg string
Progress int
Msg string // Message about a corrupt/healed block (empty for valid blocks)
Progress int // Number of blocks processed so far
}
func verifyWorkerRun(ctx context.Context, wg *sync.WaitGroup, keys <-chan cid.Cid, results chan<- string, bs bstore.Blockstore) {
// verifyState represents the state of a block after verification.
// States track both the verification result and any remediation actions taken.
type verifyState int
const (
verifyStateValid verifyState = iota // Block is valid and uncorrupted
verifyStateCorrupt // Block is corrupt, no action taken
verifyStateCorruptRemoved // Block was corrupt and successfully removed
verifyStateCorruptRemoveFailed // Block was corrupt but removal failed
verifyStateCorruptHealed // Block was corrupt, removed, and successfully re-fetched
verifyStateCorruptHealFailed // Block was corrupt and removed, but re-fetching failed
)
const (
// verifyWorkerMultiplier determines worker pool size relative to CPU count.
// Since block verification is I/O-bound (disk reads + potential network fetches),
// we use more workers than CPU cores to maximize throughput.
verifyWorkerMultiplier = 2
)
// verifyResult contains the outcome of verifying a single block.
// It includes the block's CID, its verification state, and an optional
// human-readable message describing what happened.
type verifyResult struct {
cid cid.Cid // CID of the block that was verified
state verifyState // Final state after verification and any remediation
msg string // Human-readable message (empty for valid blocks)
}
// verifyWorkerRun processes CIDs from the keys channel, verifying their integrity.
// If shouldDrop is true, corrupt blocks are removed from the blockstore.
// If shouldHeal is true (implies shouldDrop), removed blocks are re-fetched from the network.
// The api parameter must be non-nil when shouldHeal is true.
// healTimeout specifies the maximum time to wait for each block heal (0 = no timeout).
func verifyWorkerRun(ctx context.Context, wg *sync.WaitGroup, keys <-chan cid.Cid, results chan<- *verifyResult, bs bstore.Blockstore, api coreiface.CoreAPI, shouldDrop, shouldHeal bool, healTimeout time.Duration) {
defer wg.Done()
sendResult := func(r *verifyResult) bool {
select {
case results <- r:
return true
case <-ctx.Done():
return false
}
}
for k := range keys {
_, err := bs.Get(ctx, k)
if err != nil {
select {
case results <- fmt.Sprintf("block %s was corrupt (%s)", k, err):
case <-ctx.Done():
return
// Block is corrupt
result := &verifyResult{cid: k, state: verifyStateCorrupt}
if !shouldDrop {
result.msg = fmt.Sprintf("block %s was corrupt (%s)", k, err)
if !sendResult(result) {
return
}
continue
}
// Try to delete
if delErr := bs.DeleteBlock(ctx, k); delErr != nil {
result.state = verifyStateCorruptRemoveFailed
result.msg = fmt.Sprintf("block %s was corrupt (%s), failed to remove (%s)", k, err, delErr)
if !sendResult(result) {
return
}
continue
}
if !shouldHeal {
result.state = verifyStateCorruptRemoved
result.msg = fmt.Sprintf("block %s was corrupt (%s), removed", k, err)
if !sendResult(result) {
return
}
continue
}
// Try to heal by re-fetching from network (api is guaranteed non-nil here)
healCtx := ctx
var healCancel context.CancelFunc
if healTimeout > 0 {
healCtx, healCancel = context.WithTimeout(ctx, healTimeout)
}
if _, healErr := api.Block().Get(healCtx, path.FromCid(k)); healErr != nil {
result.state = verifyStateCorruptHealFailed
result.msg = fmt.Sprintf("block %s was corrupt (%s), removed, failed to heal (%s)", k, err, healErr)
} else {
result.state = verifyStateCorruptHealed
result.msg = fmt.Sprintf("block %s was corrupt (%s), removed, healed", k, err)
}
if healCancel != nil {
healCancel()
}
if !sendResult(result) {
return
}
continue
}
select {
case results <- "":
case <-ctx.Done():
// Block is valid
if !sendResult(&verifyResult{cid: k, state: verifyStateValid}) {
return
}
}
}
func verifyResultChan(ctx context.Context, keys <-chan cid.Cid, bs bstore.Blockstore) <-chan string {
results := make(chan string)
// verifyResultChan creates a channel of verification results by spawning multiple worker goroutines
// to process blocks in parallel. It returns immediately with a channel that will receive results.
func verifyResultChan(ctx context.Context, keys <-chan cid.Cid, bs bstore.Blockstore, api coreiface.CoreAPI, shouldDrop, shouldHeal bool, healTimeout time.Duration) <-chan *verifyResult {
results := make(chan *verifyResult)
go func() {
defer close(results)
var wg sync.WaitGroup
for i := 0; i < runtime.NumCPU()*2; i++ {
for i := 0; i < runtime.NumCPU()*verifyWorkerMultiplier; i++ {
wg.Add(1)
go verifyWorkerRun(ctx, &wg, keys, results, bs)
go verifyWorkerRun(ctx, &wg, keys, results, bs, api, shouldDrop, shouldHeal, healTimeout)
}
wg.Wait()
@ -276,6 +370,45 @@ func verifyResultChan(ctx context.Context, keys <-chan cid.Cid, bs bstore.Blocks
var repoVerifyCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Verify all blocks in repo are not corrupted.",
ShortDescription: `
'ipfs repo verify' checks integrity of all blocks in the local datastore.
Each block is read and validated against its CID to ensure data integrity.
Without any flags, this is a SAFE, read-only check that only reports corrupt
blocks without modifying the repository. This can be used as a "dry run" to
preview what --drop or --heal would do.
Use --drop to remove corrupt blocks, or --heal to remove and re-fetch from
the network.
Examples:
ipfs repo verify # safe read-only check, reports corrupt blocks
ipfs repo verify --drop # remove corrupt blocks
ipfs repo verify --heal # remove and re-fetch corrupt blocks
Exit Codes:
0: All blocks are valid, OR all corrupt blocks were successfully remediated
(with --drop or --heal)
1: Corrupt blocks detected (without flags), OR remediation failed (block
removal or healing failed with --drop or --heal)
Note: --heal requires the daemon to be running in online mode with network
connectivity to nodes that have the missing blocks. Make sure the daemon is
online and connected to other peers. Healing will attempt to re-fetch each
corrupt block from the network after removing it. If a block cannot be found
on the network, it will remain deleted.
WARNING: Both --drop and --heal are DESTRUCTIVE operations that permanently
delete corrupt blocks from your repository. Once deleted, blocks cannot be
recovered unless --heal successfully fetches them from the network. Blocks
that cannot be healed will remain permanently deleted. Always backup your
repository before using these options.
`,
},
Options: []cmds.Option{
cmds.BoolOption("drop", "Remove corrupt blocks from datastore (destructive operation)."),
cmds.BoolOption("heal", "Remove corrupt blocks and re-fetch from network (destructive operation, implies --drop)."),
cmds.StringOption("heal-timeout", "Maximum time to wait for each block heal (e.g., \"30s\"). Only applies with --heal.").WithDefault("30s"),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
nd, err := cmdenv.GetNode(env)
@ -283,6 +416,38 @@ var repoVerifyCmd = &cmds.Command{
return err
}
drop, _ := req.Options["drop"].(bool)
heal, _ := req.Options["heal"].(bool)
if heal {
drop = true // heal implies drop
}
// Parse and validate heal-timeout
timeoutStr, _ := req.Options["heal-timeout"].(string)
healTimeout, err := time.ParseDuration(timeoutStr)
if err != nil {
return fmt.Errorf("invalid heal-timeout: %w", err)
}
if healTimeout < 0 {
return errors.New("heal-timeout must be >= 0")
}
// Check online mode and API availability for healing operation
var api coreiface.CoreAPI
if heal {
if !nd.IsOnline {
return ErrNotOnline
}
api, err = cmdenv.GetApi(env, req)
if err != nil {
return err
}
if api == nil {
return fmt.Errorf("healing requested but API is not available - make sure daemon is online and connected to other peers")
}
}
bs := &bstore.ValidatingBlockstore{Blockstore: bstore.NewBlockstore(nd.Repo.Datastore())}
keys, err := bs.AllKeysChan(req.Context)
@ -291,17 +456,47 @@ var repoVerifyCmd = &cmds.Command{
return err
}
results := verifyResultChan(req.Context, keys, bs)
results := verifyResultChan(req.Context, keys, bs, api, drop, heal, healTimeout)
var fails int
// Track statistics for each type of outcome
var corrupted, removed, removeFailed, healed, healFailed int
var i int
for msg := range results {
if msg != "" {
if err := res.Emit(&VerifyProgress{Msg: msg}); err != nil {
for result := range results {
// Update counters based on the block's final state
switch result.state {
case verifyStateCorrupt:
// Block is corrupt but no action was taken (--drop not specified)
corrupted++
case verifyStateCorruptRemoved:
// Block was corrupt and successfully removed (--drop specified)
corrupted++
removed++
case verifyStateCorruptRemoveFailed:
// Block was corrupt but couldn't be removed
corrupted++
removeFailed++
case verifyStateCorruptHealed:
// Block was corrupt, removed, and successfully re-fetched (--heal specified)
corrupted++
removed++
healed++
case verifyStateCorruptHealFailed:
// Block was corrupt and removed, but re-fetching failed
corrupted++
removed++
healFailed++
default:
// verifyStateValid blocks are not counted (they're the expected case)
}
// Emit progress message for corrupt blocks
if result.state != verifyStateValid && result.msg != "" {
if err := res.Emit(&VerifyProgress{Msg: result.msg}); err != nil {
return err
}
fails++
}
i++
if err := res.Emit(&VerifyProgress{Progress: i}); err != nil {
return err
@ -312,8 +507,42 @@ var repoVerifyCmd = &cmds.Command{
return err
}
if fails != 0 {
return errors.New("verify complete, some blocks were corrupt")
if corrupted > 0 {
// Build a summary of what happened with corrupt blocks
summary := fmt.Sprintf("verify complete, %d blocks corrupt", corrupted)
if removed > 0 {
summary += fmt.Sprintf(", %d removed", removed)
}
if removeFailed > 0 {
summary += fmt.Sprintf(", %d failed to remove", removeFailed)
}
if healed > 0 {
summary += fmt.Sprintf(", %d healed", healed)
}
if healFailed > 0 {
summary += fmt.Sprintf(", %d failed to heal", healFailed)
}
// Determine success/failure based on operation mode
shouldFail := false
if !drop {
// Detection-only mode: always fail if corruption found
shouldFail = true
} else if heal {
// Heal mode: fail if any removal or heal failed
shouldFail = (removeFailed > 0 || healFailed > 0)
} else {
// Drop mode: fail if any removal failed
shouldFail = (removeFailed > 0)
}
if shouldFail {
return errors.New(summary)
}
// Success: emit summary as a message instead of error
return res.Emit(&VerifyProgress{Msg: summary})
}
return res.Emit(&VerifyProgress{Msg: "verify complete, all blocks validated."})
@ -322,7 +551,7 @@ var repoVerifyCmd = &cmds.Command{
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, obj *VerifyProgress) error {
if strings.Contains(obj.Msg, "was corrupt") {
fmt.Fprintln(os.Stdout, obj.Msg)
fmt.Fprintln(w, obj.Msg)
return nil
}

View File

@ -0,0 +1,371 @@
//go:build go1.25
package commands
// This file contains unit tests for the --heal-timeout flag functionality
// using testing/synctest to avoid waiting for real timeouts.
//
// End-to-end tests for the full 'ipfs repo verify' command (including --drop
// and --heal flags) are located in test/cli/repo_verify_test.go.
import (
"bytes"
"context"
"errors"
"io"
"sync"
"testing"
"testing/synctest"
"time"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
coreiface "github.com/ipfs/kubo/core/coreiface"
"github.com/ipfs/kubo/core/coreiface/options"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/ipfs/boxo/path"
)
func TestVerifyWorkerHealTimeout(t *testing.T) {
t.Run("heal succeeds before timeout", func(t *testing.T) {
synctest.Test(t, func(t *testing.T) {
const healTimeout = 5 * time.Second
testCID := cid.MustParse("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi")
// Setup channels
keys := make(chan cid.Cid, 1)
keys <- testCID
close(keys)
results := make(chan *verifyResult, 1)
// Mock blockstore that returns error (simulating corruption)
mockBS := &mockBlockstore{
getError: errors.New("corrupt block"),
}
// Mock API where Block().Get() completes before timeout
mockAPI := &mockCoreAPI{
blockAPI: &mockBlockAPI{
getDelay: 2 * time.Second, // Less than healTimeout
data: []byte("healed data"),
},
}
var wg sync.WaitGroup
wg.Add(1)
// Run worker
go verifyWorkerRun(t.Context(), &wg, keys, results, mockBS, mockAPI, true, true, healTimeout)
// Advance time past the mock delay but before timeout
time.Sleep(3 * time.Second)
synctest.Wait()
wg.Wait()
close(results)
// Verify heal succeeded
result := <-results
require.NotNil(t, result)
assert.Equal(t, verifyStateCorruptHealed, result.state)
assert.Contains(t, result.msg, "healed")
})
})
t.Run("heal fails due to timeout", func(t *testing.T) {
synctest.Test(t, func(t *testing.T) {
const healTimeout = 2 * time.Second
testCID := cid.MustParse("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi")
// Setup channels
keys := make(chan cid.Cid, 1)
keys <- testCID
close(keys)
results := make(chan *verifyResult, 1)
// Mock blockstore that returns error (simulating corruption)
mockBS := &mockBlockstore{
getError: errors.New("corrupt block"),
}
// Mock API where Block().Get() takes longer than healTimeout
mockAPI := &mockCoreAPI{
blockAPI: &mockBlockAPI{
getDelay: 5 * time.Second, // More than healTimeout
data: []byte("healed data"),
},
}
var wg sync.WaitGroup
wg.Add(1)
// Run worker
go verifyWorkerRun(t.Context(), &wg, keys, results, mockBS, mockAPI, true, true, healTimeout)
// Advance time past timeout
time.Sleep(3 * time.Second)
synctest.Wait()
wg.Wait()
close(results)
// Verify heal failed due to timeout
result := <-results
require.NotNil(t, result)
assert.Equal(t, verifyStateCorruptHealFailed, result.state)
assert.Contains(t, result.msg, "failed to heal")
assert.Contains(t, result.msg, "context deadline exceeded")
})
})
t.Run("heal with zero timeout still attempts heal", func(t *testing.T) {
synctest.Test(t, func(t *testing.T) {
const healTimeout = 0 // Zero timeout means no timeout
testCID := cid.MustParse("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi")
// Setup channels
keys := make(chan cid.Cid, 1)
keys <- testCID
close(keys)
results := make(chan *verifyResult, 1)
// Mock blockstore that returns error (simulating corruption)
mockBS := &mockBlockstore{
getError: errors.New("corrupt block"),
}
// Mock API that succeeds quickly
mockAPI := &mockCoreAPI{
blockAPI: &mockBlockAPI{
getDelay: 100 * time.Millisecond,
data: []byte("healed data"),
},
}
var wg sync.WaitGroup
wg.Add(1)
// Run worker
go verifyWorkerRun(t.Context(), &wg, keys, results, mockBS, mockAPI, true, true, healTimeout)
// Advance time to let heal complete
time.Sleep(200 * time.Millisecond)
synctest.Wait()
wg.Wait()
close(results)
// Verify heal succeeded even with zero timeout
result := <-results
require.NotNil(t, result)
assert.Equal(t, verifyStateCorruptHealed, result.state)
assert.Contains(t, result.msg, "healed")
})
})
t.Run("multiple blocks with different timeout outcomes", func(t *testing.T) {
synctest.Test(t, func(t *testing.T) {
const healTimeout = 3 * time.Second
testCID1 := cid.MustParse("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi")
testCID2 := cid.MustParse("bafybeihvvulpp4evxj7x7armbqcyg6uezzuig6jp3lktpbovlqfkjtgyby")
// Setup channels
keys := make(chan cid.Cid, 2)
keys <- testCID1
keys <- testCID2
close(keys)
results := make(chan *verifyResult, 2)
// Mock blockstore that always returns error (all blocks corrupt)
mockBS := &mockBlockstore{
getError: errors.New("corrupt block"),
}
// Create two mock block APIs with different delays
// We'll need to alternate which one gets used
// For simplicity, use one that succeeds fast
mockAPI := &mockCoreAPI{
blockAPI: &mockBlockAPI{
getDelay: 1 * time.Second, // Less than healTimeout - will succeed
data: []byte("healed data"),
},
}
var wg sync.WaitGroup
wg.Add(2) // Two workers
// Run two workers
go verifyWorkerRun(t.Context(), &wg, keys, results, mockBS, mockAPI, true, true, healTimeout)
go verifyWorkerRun(t.Context(), &wg, keys, results, mockBS, mockAPI, true, true, healTimeout)
// Advance time to let both complete
time.Sleep(2 * time.Second)
synctest.Wait()
wg.Wait()
close(results)
// Collect results
var healedCount int
for result := range results {
if result.state == verifyStateCorruptHealed {
healedCount++
}
}
// Both should heal successfully (both under timeout)
assert.Equal(t, 2, healedCount)
})
})
t.Run("valid block is not healed", func(t *testing.T) {
synctest.Test(t, func(t *testing.T) {
const healTimeout = 5 * time.Second
testCID := cid.MustParse("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi")
// Setup channels
keys := make(chan cid.Cid, 1)
keys <- testCID
close(keys)
results := make(chan *verifyResult, 1)
// Mock blockstore that returns valid block (no error)
mockBS := &mockBlockstore{
block: blocks.NewBlock([]byte("valid data")),
}
// Mock API (won't be called since block is valid)
mockAPI := &mockCoreAPI{
blockAPI: &mockBlockAPI{},
}
var wg sync.WaitGroup
wg.Add(1)
// Run worker with heal enabled
go verifyWorkerRun(t.Context(), &wg, keys, results, mockBS, mockAPI, false, true, healTimeout)
synctest.Wait()
wg.Wait()
close(results)
// Verify block is marked valid, not healed
result := <-results
require.NotNil(t, result)
assert.Equal(t, verifyStateValid, result.state)
assert.Empty(t, result.msg)
})
})
}
// mockBlockstore implements a minimal blockstore for testing
type mockBlockstore struct {
getError error
block blocks.Block
}
func (m *mockBlockstore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) {
if m.getError != nil {
return nil, m.getError
}
return m.block, nil
}
func (m *mockBlockstore) DeleteBlock(ctx context.Context, c cid.Cid) error {
return nil
}
func (m *mockBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) {
return m.block != nil, nil
}
func (m *mockBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) {
if m.block != nil {
return len(m.block.RawData()), nil
}
return 0, errors.New("block not found")
}
func (m *mockBlockstore) Put(ctx context.Context, b blocks.Block) error {
return nil
}
func (m *mockBlockstore) PutMany(ctx context.Context, bs []blocks.Block) error {
return nil
}
func (m *mockBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
return nil, errors.New("not implemented")
}
func (m *mockBlockstore) HashOnRead(enabled bool) {
}
// mockBlockAPI implements BlockAPI for testing
type mockBlockAPI struct {
getDelay time.Duration
getError error
data []byte
}
func (m *mockBlockAPI) Get(ctx context.Context, p path.Path) (io.Reader, error) {
if m.getDelay > 0 {
select {
case <-time.After(m.getDelay):
// Delay completed
case <-ctx.Done():
return nil, ctx.Err()
}
}
if m.getError != nil {
return nil, m.getError
}
return bytes.NewReader(m.data), nil
}
func (m *mockBlockAPI) Put(ctx context.Context, r io.Reader, opts ...options.BlockPutOption) (coreiface.BlockStat, error) {
return nil, errors.New("not implemented")
}
func (m *mockBlockAPI) Rm(ctx context.Context, p path.Path, opts ...options.BlockRmOption) error {
return errors.New("not implemented")
}
func (m *mockBlockAPI) Stat(ctx context.Context, p path.Path) (coreiface.BlockStat, error) {
return nil, errors.New("not implemented")
}
// mockCoreAPI implements minimal CoreAPI for testing
type mockCoreAPI struct {
blockAPI *mockBlockAPI
}
func (m *mockCoreAPI) Block() coreiface.BlockAPI {
return m.blockAPI
}
func (m *mockCoreAPI) Unixfs() coreiface.UnixfsAPI { return nil }
func (m *mockCoreAPI) Dag() coreiface.APIDagService { return nil }
func (m *mockCoreAPI) Name() coreiface.NameAPI { return nil }
func (m *mockCoreAPI) Key() coreiface.KeyAPI { return nil }
func (m *mockCoreAPI) Pin() coreiface.PinAPI { return nil }
func (m *mockCoreAPI) Object() coreiface.ObjectAPI { return nil }
func (m *mockCoreAPI) Swarm() coreiface.SwarmAPI { return nil }
func (m *mockCoreAPI) PubSub() coreiface.PubSubAPI { return nil }
func (m *mockCoreAPI) Routing() coreiface.RoutingAPI { return nil }
func (m *mockCoreAPI) ResolvePath(ctx context.Context, p path.Path) (path.ImmutablePath, []string, error) {
return path.ImmutablePath{}, nil, errors.New("not implemented")
}
func (m *mockCoreAPI) ResolveNode(ctx context.Context, p path.Path) (ipld.Node, error) {
return nil, errors.New("not implemented")
}
func (m *mockCoreAPI) WithOptions(...options.ApiOption) (coreiface.CoreAPI, error) {
return nil, errors.New("not implemented")
}

View File

@ -211,6 +211,10 @@ var provideRefRoutingCmd = &cmds.Command{
ctx, events := routing.RegisterForQueryEvents(ctx)
var provideErr error
// TODO: not sure if necessary to call StartProviding for `ipfs routing
// provide <cid>`, since either cid is already being provided, or it will
// be garbage collected and not reprovided anyway. So we may simply stick
// with a single (optimistic) provide, and skip StartProviding call.
go func() {
defer cancel()
if rec {
@ -226,6 +230,16 @@ var provideRefRoutingCmd = &cmds.Command{
}
}()
if nd.HasActiveDHTClient() {
// If node has a DHT client, provide immediately the supplied cids before
// returning.
for _, c := range cids {
if err = provideCIDSync(req.Context, nd.DHTClient, c); err != nil {
return fmt.Errorf("error providing cid: %w", err)
}
}
}
for e := range events {
if err := res.Emit(e); err != nil {
return err
@ -300,6 +314,7 @@ func provideCids(prov node.DHTProvider, cids []cid.Cid) error {
for i, c := range cids {
mhs[i] = c.Hash()
}
// providing happens asynchronously
return prov.StartProviding(true, mhs...)
}

View File

@ -75,7 +75,8 @@ This interface is not stable and may change from release to release.
var dht *dht.IpfsDHT
var separateClient bool
if nd.DHTClient != nd.DHT {
// Check if using separate DHT client (e.g., accelerated DHT)
if nd.HasActiveDHTClient() && nd.DHTClient != nd.DHT {
separateClient = true
}

View File

@ -255,7 +255,7 @@ func DetectNewKuboVersion(nd *core.IpfsNode, minPercent int64) (VersionCheckOutp
}
// Amino DHT client keeps information about previously seen peers
if nd.DHTClient != nd.DHT && nd.DHTClient != nil {
if nd.HasActiveDHTClient() && nd.DHTClient != nd.DHT {
client, ok := nd.DHTClient.(*fullrt.FullRT)
if !ok {
return VersionCheckOutput{}, errors.New("could not perform version check due to missing or incompatible DHT configuration")

View File

@ -30,9 +30,11 @@ import (
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log/v2"
ddht "github.com/libp2p/go-libp2p-kad-dht/dual"
"github.com/libp2p/go-libp2p-kad-dht/fullrt"
pubsub "github.com/libp2p/go-libp2p-pubsub"
psrouter "github.com/libp2p/go-libp2p-pubsub-router"
record "github.com/libp2p/go-libp2p-record"
routinghelpers "github.com/libp2p/go-libp2p-routing-helpers"
connmgr "github.com/libp2p/go-libp2p/core/connmgr"
ic "github.com/libp2p/go-libp2p/core/crypto"
p2phost "github.com/libp2p/go-libp2p/core/host"
@ -143,6 +145,42 @@ func (n *IpfsNode) Close() error {
return n.stop()
}
// HasActiveDHTClient checks if the node's DHT client is active and usable for DHT operations.
//
// Returns false for:
// - nil DHTClient
// - typed nil pointers (e.g., (*ddht.DHT)(nil))
// - no-op routers (routinghelpers.Null)
//
// Note: This method only checks for known DHT client types (ddht.DHT, fullrt.FullRT).
// Custom routing.Routing implementations are not explicitly validated.
//
// This method prevents the "typed nil interface" bug where an interface contains
// a nil pointer of a concrete type, which passes nil checks but panics when methods
// are called.
func (n *IpfsNode) HasActiveDHTClient() bool {
if n.DHTClient == nil {
return false
}
// Check for no-op router (Routing.Type=none)
if _, ok := n.DHTClient.(routinghelpers.Null); ok {
return false
}
// Check for typed nil *ddht.DHT (common when Routing.Type=delegated or HTTP-only)
if d, ok := n.DHTClient.(*ddht.DHT); ok && d == nil {
return false
}
// Check for typed nil *fullrt.FullRT (accelerated DHT client)
if f, ok := n.DHTClient.(*fullrt.FullRT); ok && f == nil {
return false
}
return true
}
// Context returns the IpfsNode context
func (n *IpfsNode) Context() context.Context {
if n.ctx == nil {

View File

@ -1,15 +1,28 @@
package core
import (
"os"
"path/filepath"
"testing"
context "context"
"github.com/ipfs/kubo/repo"
"github.com/ipfs/boxo/filestore"
"github.com/ipfs/boxo/keystore"
datastore "github.com/ipfs/go-datastore"
syncds "github.com/ipfs/go-datastore/sync"
config "github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/core/node/libp2p"
golib "github.com/libp2p/go-libp2p"
ddht "github.com/libp2p/go-libp2p-kad-dht/dual"
"github.com/libp2p/go-libp2p-kad-dht/fullrt"
routinghelpers "github.com/libp2p/go-libp2p-routing-helpers"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
pstore "github.com/libp2p/go-libp2p/core/peerstore"
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
)
func TestInitialization(t *testing.T) {
@ -65,3 +78,151 @@ var testIdentity = config.Identity{
PeerID: "QmNgdzLieYi8tgfo2WfTUzNVH5hQK9oAYGVf6dxN12NrHt",
PrivKey: "CAASrRIwggkpAgEAAoICAQCwt67GTUQ8nlJhks6CgbLKOx7F5tl1r9zF4m3TUrG3Pe8h64vi+ILDRFd7QJxaJ/n8ux9RUDoxLjzftL4uTdtv5UXl2vaufCc/C0bhCRvDhuWPhVsD75/DZPbwLsepxocwVWTyq7/ZHsCfuWdoh/KNczfy+Gn33gVQbHCnip/uhTVxT7ARTiv8Qa3d7qmmxsR+1zdL/IRO0mic/iojcb3Oc/PRnYBTiAZFbZdUEit/99tnfSjMDg02wRayZaT5ikxa6gBTMZ16Yvienq7RwSELzMQq2jFA4i/TdiGhS9uKywltiN2LrNDBcQJSN02pK12DKoiIy+wuOCRgs2NTQEhU2sXCk091v7giTTOpFX2ij9ghmiRfoSiBFPJA5RGwiH6ansCHtWKY1K8BS5UORM0o3dYk87mTnKbCsdz4bYnGtOWafujYwzueGx8r+IWiys80IPQKDeehnLW6RgoyjszKgL/2XTyP54xMLSW+Qb3BPgDcPaPO0hmop1hW9upStxKsefW2A2d46Ds4HEpJEry7PkS5M4gKL/zCKHuxuXVk14+fZQ1rstMuvKjrekpAC2aVIKMI9VRA3awtnje8HImQMdj+r+bPmv0N8rTTr3eS4J8Yl7k12i95LLfK+fWnmUh22oTNzkRlaiERQrUDyE4XNCtJc0xs1oe1yXGqazCIAQIDAQABAoICAQCk1N/ftahlRmOfAXk//8wNl7FvdJD3le6+YSKBj0uWmN1ZbUSQk64chr12iGCOM2WY180xYjy1LOS44PTXaeW5bEiTSnb3b3SH+HPHaWCNM2EiSogHltYVQjKW+3tfH39vlOdQ9uQ+l9Gh6iTLOqsCRyszpYPqIBwi1NMLY2Ej8PpVU7ftnFWouHZ9YKS7nAEiMoowhTu/7cCIVwZlAy3AySTuKxPMVj9LORqC32PVvBHZaMPJ+X1Xyijqg6aq39WyoztkXg3+Xxx5j5eOrK6vO/Lp6ZUxaQilHDXoJkKEJjgIBDZpluss08UPfOgiWAGkW+L4fgUxY0qDLDAEMhyEBAn6KOKVL1JhGTX6GjhWziI94bddSpHKYOEIDzUy4H8BXnKhtnyQV6ELS65C2hj9D0IMBTj7edCF1poJy0QfdK0cuXgMvxHLeUO5uc2YWfbNosvKxqygB9rToy4b22YvNwsZUXsTY6Jt+p9V2OgXSKfB5VPeRbjTJL6xqvvUJpQytmII/C9JmSDUtCbYceHj6X9jgigLk20VV6nWHqCTj3utXD6NPAjoycVpLKDlnWEgfVELDIk0gobxUqqSm3jTPEKRPJgxkgPxbwxYumtw++1UY2y35w3WRDc2xYPaWKBCQeZy+mL6ByXp9bWlNvxS3Knb6oZp36/ovGnf2pGvdQKCAQEAyKpipz2lIUySDyE0avVWAmQb2tWGKXALPohzj7AwkcfEg2GuwoC6GyVE2sTJD1HRazIjOKn3yQORg2uOPeG7sx7EKHxSxCKDrbPawkvLCq8JYSy9TLvhqKUVVGYPqMBzu2POSLEA81QXas+aYjKOFWA2Zrjq26zV9ey3+6Lc6WULePgRQybU8+RHJc6fdjUCCfUxgOrUO2IQOuTJ+FsDpVnrMUGlokmWn23OjL4qTL9wGDnWGUs2pjSzNbj3qA0d8iqaiMUyHX/D/VS0wpeT1osNBSm8suvSibYBn+7wbIApbwXUxZaxMv2OHGz3empae4ckvNZs7r8wsI9UwFt8mwKCAQEA4XK6gZkv9t+3YCcSPw2ensLvL/xU7i2bkC9tfTGdjnQfzZXIf5KNdVuj/SerOl2S1s45NMs3ysJbADwRb4ahElD/V71nGzV8fpFTitC20ro9fuX4J0+twmBolHqeH9pmeGTjAeL1rvt6vxs4FkeG/yNft7GdXpXTtEGaObn8Mt0tPY+aB3UnKrnCQoQAlPyGHFrVRX0UEcp6wyyNGhJCNKeNOvqCHTFObhbhO+KWpWSN0MkVHnqaIBnIn1Te8FtvP/iTwXGnKc0YXJUG6+LM6LmOguW6tg8ZqiQeYyyR+e9eCFH4csLzkrTl1GxCxwEsoSLIMm7UDcjttW6tYEghkwKCAQEAmeCO5lCPYImnN5Lu71ZTLmI2OgmjaANTnBBnDbi+hgv61gUCToUIMejSdDCTPfwv61P3TmyIZs0luPGxkiKYHTNqmOE9Vspgz8Mr7fLRMNApESuNvloVIY32XVImj/GEzh4rAfM6F15U1sN8T/EUo6+0B/Glp+9R49QzAfRSE2g48/rGwgf1JVHYfVWFUtAzUA+GdqWdOixo5cCsYJbqpNHfWVZN/bUQnBFIYwUwysnC29D+LUdQEQQ4qOm+gFAOtrWU62zMkXJ4iLt8Ify6kbrvsRXgbhQIzzGS7WH9XDarj0eZciuslr15TLMC1Azadf+cXHLR9gMHA13mT9vYIQKCAQA/DjGv8cKCkAvf7s2hqROGYAs6Jp8yhrsN1tYOwAPLRhtnCs+rLrg17M2vDptLlcRuI/vIElamdTmylRpjUQpX7yObzLO73nfVhpwRJVMdGU394iBIDncQ+JoHfUwgqJskbUM40dvZdyjbrqc/Q/4z+hbZb+oN/GXb8sVKBATPzSDMKQ/xqgisYIw+wmDPStnPsHAaIWOtni47zIgilJzD0WEk78/YjmPbUrboYvWziK5JiRRJFA1rkQqV1c0M+OXixIm+/yS8AksgCeaHr0WUieGcJtjT9uE8vyFop5ykhRiNxy9wGaq6i7IEecsrkd6DqxDHWkwhFuO1bSE83q/VAoIBAEA+RX1i/SUi08p71ggUi9WFMqXmzELp1L3hiEjOc2AklHk2rPxsaTh9+G95BvjhP7fRa/Yga+yDtYuyjO99nedStdNNSg03aPXILl9gs3r2dPiQKUEXZJ3FrH6tkils/8BlpOIRfbkszrdZIKTO9GCdLWQ30dQITDACs8zV/1GFGrHFrqnnMe/NpIFHWNZJ0/WZMi8wgWO6Ik8jHEpQtVXRiXLqy7U6hk170pa4GHOzvftfPElOZZjy9qn7KjdAQqy6spIrAE94OEL+fBgbHQZGLpuTlj6w6YGbMtPU8uo7sXKoc6WOCb68JWft3tejGLDa1946HAWqVM9B/UcneNc=",
}
// mockHostOption creates a HostOption that uses the provided mocknet.
// Inlined to avoid import cycle with core/mock package.
func mockHostOption(mn mocknet.Mocknet) libp2p.HostOption {
return func(id peer.ID, ps pstore.Peerstore, opts ...golib.Option) (host.Host, error) {
var cfg golib.Config
if err := cfg.Apply(opts...); err != nil {
return nil, err
}
// The mocknet does not use the provided libp2p.Option. This options include
// the listening addresses we want our peer listening on. Therefore, we have
// to manually parse the configuration and add them here.
ps.AddAddrs(id, cfg.ListenAddrs, pstore.PermanentAddrTTL)
return mn.AddPeerWithPeerstore(id, ps)
}
}
func TestHasActiveDHTClient(t *testing.T) {
// Test 1: nil DHTClient
t.Run("nil DHTClient", func(t *testing.T) {
node := &IpfsNode{
DHTClient: nil,
}
if node.HasActiveDHTClient() {
t.Error("Expected false for nil DHTClient")
}
})
// Test 2: Typed nil *ddht.DHT (common case when Routing.Type=delegated)
t.Run("typed nil ddht.DHT", func(t *testing.T) {
node := &IpfsNode{
DHTClient: (*ddht.DHT)(nil),
}
if node.HasActiveDHTClient() {
t.Error("Expected false for typed nil *ddht.DHT")
}
})
// Test 3: Typed nil *fullrt.FullRT (accelerated DHT client)
t.Run("typed nil fullrt.FullRT", func(t *testing.T) {
node := &IpfsNode{
DHTClient: (*fullrt.FullRT)(nil),
}
if node.HasActiveDHTClient() {
t.Error("Expected false for typed nil *fullrt.FullRT")
}
})
// Test 4: routinghelpers.Null no-op router (Routing.Type=none)
t.Run("routinghelpers.Null", func(t *testing.T) {
node := &IpfsNode{
DHTClient: routinghelpers.Null{},
}
if node.HasActiveDHTClient() {
t.Error("Expected false for routinghelpers.Null")
}
})
// Test 5: Valid standard dual DHT (Routing.Type=auto/dht/dhtclient)
t.Run("valid standard dual DHT", func(t *testing.T) {
ctx := context.Background()
mn := mocknet.New()
defer mn.Close()
ds := syncds.MutexWrap(datastore.NewMapDatastore())
c := config.Config{}
c.Identity = testIdentity
c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001"}
r := &repo.Mock{
C: c,
D: ds,
K: keystore.NewMemKeystore(),
F: filestore.NewFileManager(ds, filepath.Dir(os.TempDir())),
}
node, err := NewNode(ctx, &BuildCfg{
Routing: libp2p.DHTServerOption,
Repo: r,
Host: mockHostOption(mn),
Online: true,
})
if err != nil {
t.Fatalf("Failed to create node with DHT: %v", err)
}
defer node.Close()
// First verify test setup created the expected DHT type
if node.DHTClient == nil {
t.Fatalf("Test setup failed: DHTClient is nil")
}
if _, ok := node.DHTClient.(*ddht.DHT); !ok {
t.Fatalf("Test setup failed: expected DHTClient to be *ddht.DHT, got %T", node.DHTClient)
}
// Now verify HasActiveDHTClient() correctly identifies it as active
if !node.HasActiveDHTClient() {
t.Error("Expected true for valid dual DHT client")
}
})
// Test 6: Valid accelerated DHT client (Routing.Type=autoclient)
t.Run("valid accelerated DHT client", func(t *testing.T) {
ctx := context.Background()
mn := mocknet.New()
defer mn.Close()
ds := syncds.MutexWrap(datastore.NewMapDatastore())
c := config.Config{}
c.Identity = testIdentity
c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001"}
c.Routing.AcceleratedDHTClient = config.True
r := &repo.Mock{
C: c,
D: ds,
K: keystore.NewMemKeystore(),
F: filestore.NewFileManager(ds, filepath.Dir(os.TempDir())),
}
node, err := NewNode(ctx, &BuildCfg{
Routing: libp2p.DHTOption,
Repo: r,
Host: mockHostOption(mn),
Online: true,
})
if err != nil {
t.Fatalf("Failed to create node with accelerated DHT: %v", err)
}
defer node.Close()
// First verify test setup created the expected accelerated DHT type
if node.DHTClient == nil {
t.Fatalf("Test setup failed: DHTClient is nil")
}
if _, ok := node.DHTClient.(*fullrt.FullRT); !ok {
t.Fatalf("Test setup failed: expected DHTClient to be *fullrt.FullRT, got %T", node.DHTClient)
}
// Now verify HasActiveDHTClient() correctly identifies it as active
if !node.HasActiveDHTClient() {
t.Error("Expected true for valid accelerated DHT client")
}
})
}

View File

@ -111,9 +111,10 @@ func Libp2pGatewayOption() ServeOption {
PublicGateways: nil,
Menu: nil,
// Apply timeout and concurrency limits from user config
RetrievalTimeout: cfg.Gateway.RetrievalTimeout.WithDefault(config.DefaultRetrievalTimeout),
MaxConcurrentRequests: int(cfg.Gateway.MaxConcurrentRequests.WithDefault(int64(config.DefaultMaxConcurrentRequests))),
DiagnosticServiceURL: "", // Not used since DisableHTMLErrors=true
RetrievalTimeout: cfg.Gateway.RetrievalTimeout.WithDefault(config.DefaultRetrievalTimeout),
MaxConcurrentRequests: int(cfg.Gateway.MaxConcurrentRequests.WithDefault(int64(config.DefaultMaxConcurrentRequests))),
MaxRangeRequestFileSize: int64(cfg.Gateway.MaxRangeRequestFileSize.WithDefault(uint64(config.DefaultMaxRangeRequestFileSize))),
DiagnosticServiceURL: "", // Not used since DisableHTMLErrors=true
}
handler := gateway.NewHandler(gwConfig, &offlineGatewayErrWrapper{gwimpl: backend})
@ -266,13 +267,14 @@ func getGatewayConfig(n *core.IpfsNode) (gateway.Config, map[string][]string, er
// Initialize gateway configuration, with empty PublicGateways, handled after.
gwCfg := gateway.Config{
DeserializedResponses: cfg.Gateway.DeserializedResponses.WithDefault(config.DefaultDeserializedResponses),
DisableHTMLErrors: cfg.Gateway.DisableHTMLErrors.WithDefault(config.DefaultDisableHTMLErrors),
NoDNSLink: cfg.Gateway.NoDNSLink,
PublicGateways: map[string]*gateway.PublicGateway{},
RetrievalTimeout: cfg.Gateway.RetrievalTimeout.WithDefault(config.DefaultRetrievalTimeout),
MaxConcurrentRequests: int(cfg.Gateway.MaxConcurrentRequests.WithDefault(int64(config.DefaultMaxConcurrentRequests))),
DiagnosticServiceURL: cfg.Gateway.DiagnosticServiceURL.WithDefault(config.DefaultDiagnosticServiceURL),
DeserializedResponses: cfg.Gateway.DeserializedResponses.WithDefault(config.DefaultDeserializedResponses),
DisableHTMLErrors: cfg.Gateway.DisableHTMLErrors.WithDefault(config.DefaultDisableHTMLErrors),
NoDNSLink: cfg.Gateway.NoDNSLink,
PublicGateways: map[string]*gateway.PublicGateway{},
RetrievalTimeout: cfg.Gateway.RetrievalTimeout.WithDefault(config.DefaultRetrievalTimeout),
MaxConcurrentRequests: int(cfg.Gateway.MaxConcurrentRequests.WithDefault(int64(config.DefaultMaxConcurrentRequests))),
MaxRangeRequestFileSize: int64(cfg.Gateway.MaxRangeRequestFileSize.WithDefault(uint64(config.DefaultMaxRangeRequestFileSize))),
DiagnosticServiceURL: cfg.Gateway.DiagnosticServiceURL.WithDefault(config.DefaultDiagnosticServiceURL),
}
// Add default implicit known gateways, such as subdomain gateway on localhost.

View File

@ -12,11 +12,12 @@ import (
)
// WebUI version confirmed to work with this Kubo version
const WebUIPath = "/ipfs/bafybeicg7e6o2eszkfdzxg5233gmuip2a7kfzoloh7voyvt2r6ivdet54u" // v4.9.1
const WebUIPath = "/ipfs/bafybeidsjptidvb6wf6benznq2pxgnt5iyksgtecpmjoimlmswhtx2u5ua" // v4.10.0
// WebUIPaths is a list of all past webUI paths.
var WebUIPaths = []string{
WebUIPath,
"/ipfs/bafybeicg7e6o2eszkfdzxg5233gmuip2a7kfzoloh7voyvt2r6ivdet54u", // v4.9.1
"/ipfs/bafybeifplj2s3yegn7ko7tdnwpoxa4c5uaqnk2ajnw5geqm34slcj6b6mu", // v4.8.0
"/ipfs/bafybeibfd5kbebqqruouji6ct5qku3tay273g7mt24mmrfzrsfeewaal5y", // v4.7.0
"/ipfs/bafybeibpaa5kqrj4gkemiswbwndjqiryl65cks64ypwtyerxixu56gnvvm", // v4.6.0

View File

@ -240,14 +240,27 @@ func (tp *TestSuite) TestRoutingProvide(t *testing.T) {
t.Fatal(err)
}
out, err = apis[2].Routing().FindProviders(ctx, p, options.Routing.NumProviders(1))
if err != nil {
t.Fatal(err)
maxAttempts := 5
success := false
for range maxAttempts {
// We may need to try again as Provide() doesn't block until the CID is
// actually provided.
out, err = apis[2].Routing().FindProviders(ctx, p, options.Routing.NumProviders(1))
if err != nil {
t.Fatal(err)
}
provider := <-out
if provider.ID.String() == self0.ID().String() {
success = true
break
}
if len(provider.ID.String()) > 0 {
t.Errorf("got wrong provider: %s != %s", provider.ID.String(), self0.ID().String())
}
time.Sleep(time.Second)
}
provider := <-out
if provider.ID.String() != self0.ID().String() {
t.Errorf("got wrong provider: %s != %s", provider.ID.String(), self0.ID().String())
if !success {
t.Errorf("missing provider after %d attempts", maxAttempts)
}
}

View File

@ -8,7 +8,6 @@ import (
"strings"
"time"
"github.com/dustin/go-humanize"
blockstore "github.com/ipfs/boxo/blockstore"
offline "github.com/ipfs/boxo/exchange/offline"
uio "github.com/ipfs/boxo/ipld/unixfs/io"
@ -423,7 +422,10 @@ func IPFS(ctx context.Context, bcfg *BuildCfg) fx.Option {
logger.Fatal(msg) // conflicting values, hard fail
}
logger.Error(msg)
cfg.Import.UnixFSHAMTDirectorySizeThreshold = *cfg.Internal.UnixFSShardingSizeThreshold
// Migrate the old OptionalString value to the new OptionalBytes field.
// Since OptionalBytes embeds OptionalString, we can construct it directly
// with the old value, preserving the user's original string (e.g., "256KiB").
cfg.Import.UnixFSHAMTDirectorySizeThreshold = config.OptionalBytes{OptionalString: *cfg.Internal.UnixFSShardingSizeThreshold}
}
// Validate Import configuration
@ -437,11 +439,7 @@ func IPFS(ctx context.Context, bcfg *BuildCfg) fx.Option {
}
// Auto-sharding settings
shardingThresholdString := cfg.Import.UnixFSHAMTDirectorySizeThreshold.WithDefault(config.DefaultUnixFSHAMTDirectorySizeThreshold)
shardSingThresholdInt, err := humanize.ParseBytes(shardingThresholdString)
if err != nil {
return fx.Error(err)
}
shardSingThresholdInt := cfg.Import.UnixFSHAMTDirectorySizeThreshold.WithDefault(config.DefaultUnixFSHAMTDirectorySizeThreshold)
shardMaxFanout := cfg.Import.UnixFSHAMTDirectoryMaxFanout.WithDefault(config.DefaultUnixFSHAMTDirectoryMaxFanout)
// TODO: avoid overriding this globally, see if we can extend Directory interface like Get/SetMaxLinks from https://github.com/ipfs/boxo/pull/906
uio.HAMTShardingSize = int(shardSingThresholdInt)

View File

@ -55,12 +55,24 @@ func Host(mctx helpers.MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (out P2PHo
return out, err
}
// Optimistic provide is enabled either via dedicated expierimental flag, or when DHT Provide Sweep is enabled.
// When DHT Provide Sweep is enabled, all provide operations go through the
// `SweepingProvider`, hence the provides don't use the optimistic provide
// logic. Provides use `SweepingProvider.StartProviding()` and not
// `IpfsDHT.Provide()`, which is where the optimistic provide logic is
// implemented. However, `IpfsDHT.Provide()` is used to quickly provide roots
// when user manually adds content with the `--fast-provide` flag enabled. In
// this case we want to use optimistic provide logic to quickly announce the
// content to the network. This should be the only use case of
// `IpfsDHT.Provide()` when DHT Provide Sweep is enabled.
optimisticProvide := cfg.Experimental.OptimisticProvide || cfg.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled)
routingOptArgs := RoutingOptionArgs{
Ctx: ctx,
Datastore: params.Repo.Datastore(),
Validator: params.Validator,
BootstrapPeers: bootstrappers,
OptimisticProvide: cfg.Experimental.OptimisticProvide,
OptimisticProvide: optimisticProvide,
OptimisticProvideJobsPoolSize: cfg.Experimental.OptimisticProvideJobsPoolSize,
LoopbackAddressesOnLanDHT: cfg.Routing.LoopbackAddressesOnLanDHT.WithDefault(config.DefaultLoopbackAddressesOnLanDHT),
}

View File

@ -19,12 +19,8 @@ var infiniteResourceLimits = rcmgr.InfiniteLimits.ToPartialLimitConfig().System
// The defaults follow the documentation in docs/libp2p-resource-management.md.
// Any changes in the logic here should be reflected there.
func createDefaultLimitConfig(cfg config.SwarmConfig) (limitConfig rcmgr.ConcreteLimitConfig, logMessageForStartup string, err error) {
maxMemoryDefaultString := humanize.Bytes(uint64(memory.TotalMemory()) / 2)
maxMemoryString := cfg.ResourceMgr.MaxMemory.WithDefault(maxMemoryDefaultString)
maxMemory, err := humanize.ParseBytes(maxMemoryString)
if err != nil {
return rcmgr.ConcreteLimitConfig{}, "", err
}
maxMemoryDefault := uint64(memory.TotalMemory()) / 2
maxMemory := cfg.ResourceMgr.MaxMemory.WithDefault(maxMemoryDefault)
maxMemoryMB := maxMemory / (1024 * 1024)
maxFD := int(cfg.ResourceMgr.MaxFileDescriptors.WithDefault(int64(fd.GetNumFDs()) / 2))
@ -142,7 +138,7 @@ Computed default go-libp2p Resource Manager limits based on:
These can be inspected with 'ipfs swarm resources'.
`, maxMemoryString, maxFD)
`, humanize.Bytes(maxMemory), maxFD)
// We already have a complete value thus pass in an empty ConcreteLimitConfig.
return partialLimits.Build(rcmgr.ConcreteLimitConfig{}), msg, nil

View File

@ -14,6 +14,7 @@ import (
"github.com/ipfs/boxo/provider"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
"github.com/ipfs/go-datastore/query"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/repo"
@ -36,13 +37,30 @@ import (
"go.uber.org/fx"
)
// The size of a batch that will be used for calculating average announcement
// time per CID, inside of boxo/provider.ThroughputReport
// and in 'ipfs stats provide' report.
const sampledBatchSize = 1000
const (
// The size of a batch that will be used for calculating average announcement
// time per CID, inside of boxo/provider.ThroughputReport
// and in 'ipfs stats provide' report.
// Used when Provide.DHT.SweepEnabled=false
sampledBatchSize = 1000
// Datastore key used to store previous reprovide strategy.
const reprovideStrategyKey = "/reprovideStrategy"
// Datastore key used to store previous reprovide strategy.
reprovideStrategyKey = "/reprovideStrategy"
// Datastore namespace prefix for provider data.
providerDatastorePrefix = "provider"
// Datastore path for the provider keystore.
keystoreDatastorePath = "keystore"
)
// Interval between reprovide queue monitoring checks for slow reprovide alerts.
// Used when Provide.DHT.SweepEnabled=true
const reprovideAlertPollInterval = 15 * time.Minute
// Number of consecutive polling intervals with sustained queue growth before
// triggering a slow reprovide alert (3 intervals = 45 minutes).
// Used when Provide.DHT.SweepEnabled=true
const consecutiveAlertsThreshold = 3
// DHTProvider is an interface for providing keys to a DHT swarm. It holds a
// state of keys to be advertised, and is responsible for periodically
@ -98,6 +116,7 @@ type DHTProvider interface {
// `OfflineDelay`). The schedule depends on the network size, hence recent
// network connectivity is essential.
RefreshSchedule() error
Close() error
}
var (
@ -116,6 +135,7 @@ func (r *NoopProvider) StartProviding(bool, ...mh.Multihash) error { return nil
func (r *NoopProvider) ProvideOnce(...mh.Multihash) error { return nil }
func (r *NoopProvider) Clear() int { return 0 }
func (r *NoopProvider) RefreshSchedule() error { return nil }
func (r *NoopProvider) Close() error { return nil }
// LegacyProvider is a wrapper around the boxo/provider.System that implements
// the DHTProvider interface. This provider manages reprovides using a burst
@ -314,10 +334,10 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
Repo repo.Repo
}
sweepingReprovider := fx.Provide(func(in providerInput) (DHTProvider, *keystore.ResettableKeystore, error) {
ds := in.Repo.Datastore()
ds := namespace.Wrap(in.Repo.Datastore(), datastore.NewKey(providerDatastorePrefix))
ks, err := keystore.NewResettableKeystore(ds,
keystore.WithPrefixBits(16),
keystore.WithDatastorePath("/provider/keystore"),
keystore.WithDatastorePath(keystoreDatastorePath),
keystore.WithBatchSize(int(cfg.Provide.DHT.KeystoreBatchSize.WithDefault(config.DefaultProvideDHTKeystoreBatchSize))),
)
if err != nil {
@ -360,6 +380,8 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
if inDht != nil {
prov, err := ddhtprovider.New(inDht,
ddhtprovider.WithKeystore(ks),
ddhtprovider.WithDatastore(ds),
ddhtprovider.WithResumeCycle(cfg.Provide.DHT.ResumeEnabled.WithDefault(config.DefaultProvideDHTResumeEnabled)),
ddhtprovider.WithReprovideInterval(reprovideInterval),
ddhtprovider.WithMaxReprovideDelay(time.Hour),
@ -393,7 +415,9 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
}
opts := []dhtprovider.Option{
dhtprovider.WithKeystore(ks),
dhtprovider.WithPeerID(impl.Host().ID()),
dhtprovider.WithDatastore(ds),
dhtprovider.WithResumeCycle(cfg.Provide.DHT.ResumeEnabled.WithDefault(config.DefaultProvideDHTResumeEnabled)),
dhtprovider.WithHost(impl.Host()),
dhtprovider.WithRouter(impl),
dhtprovider.WithMessageSender(impl.MessageSender()),
dhtprovider.WithSelfAddrs(selfAddrsFunc),
@ -501,16 +525,168 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
case <-ctx.Done():
return ctx.Err()
}
// Keystore data isn't purged, on close, but it will be overwritten
// when the node starts again.
// Keystore will be closed by ensureProviderClosesBeforeKeystore hook
// to guarantee provider closes before keystore.
return nil
},
})
})
// ensureProviderClosesBeforeKeystore manages the shutdown order between
// provider and keystore to prevent race conditions.
//
// The provider's worker goroutines may call keystore methods during their
// operation. If keystore closes while these operations are in-flight, we get
// "keystore is closed" errors. By closing the provider first, we ensure all
// worker goroutines exit and complete any pending keystore operations before
// the keystore itself closes.
type providerKeystoreShutdownInput struct {
fx.In
Provider DHTProvider
Keystore *keystore.ResettableKeystore
}
ensureProviderClosesBeforeKeystore := fx.Invoke(func(lc fx.Lifecycle, in providerKeystoreShutdownInput) {
// Skip for NoopProvider
if _, ok := in.Provider.(*NoopProvider); ok {
return
}
lc.Append(fx.Hook{
OnStop: func(ctx context.Context) error {
// Close provider first - waits for all worker goroutines to exit.
// This ensures no code can access keystore after this returns.
if err := in.Provider.Close(); err != nil {
logger.Errorw("error closing provider during shutdown", "error", err)
}
// Close keystore - safe now, provider is fully shut down
return in.Keystore.Close()
},
})
})
// extractSweepingProvider extracts a SweepingProvider from the given provider interface.
// It handles unwrapping buffered and dual providers, always selecting WAN for dual DHT.
// Returns nil if the provider is not a sweeping provider type.
var extractSweepingProvider func(prov any) *dhtprovider.SweepingProvider
extractSweepingProvider = func(prov any) *dhtprovider.SweepingProvider {
switch p := prov.(type) {
case *dhtprovider.SweepingProvider:
return p
case *ddhtprovider.SweepingProvider:
return p.WAN
case *buffered.SweepingProvider:
// Recursively extract from the inner provider
return extractSweepingProvider(p.Provider)
default:
return nil
}
}
type alertInput struct {
fx.In
Provider DHTProvider
}
reprovideAlert := fx.Invoke(func(lc fx.Lifecycle, in alertInput) {
prov := extractSweepingProvider(in.Provider)
if prov == nil {
return
}
var (
cancel context.CancelFunc
done = make(chan struct{})
)
lc.Append(fx.Hook{
OnStart: func(ctx context.Context) error {
gcCtx, c := context.WithCancel(context.Background())
cancel = c
go func() {
defer close(done)
ticker := time.NewTicker(reprovideAlertPollInterval)
defer ticker.Stop()
var (
queueSize, prevQueueSize int64
queuedWorkers, prevQueuedWorkers bool
count int
)
for {
select {
case <-gcCtx.Done():
return
case <-ticker.C:
}
stats := prov.Stats()
queuedWorkers = stats.Workers.QueuedPeriodic > 0
queueSize = int64(stats.Queues.PendingRegionReprovides)
// Alert if reprovide queue keeps growing and all periodic workers are busy.
// Requires consecutiveAlertsThreshold intervals of sustained growth.
if prevQueuedWorkers && queuedWorkers && queueSize > prevQueueSize {
count++
if count >= consecutiveAlertsThreshold {
logger.Errorf(`
🔔🔔🔔 Reprovide Operations Too Slow 🔔🔔🔔
Your node is falling behind on DHT reprovides, which will affect content availability.
Keyspace regions enqueued for reprovide:
%s ago:\t%d
Now:\t%d
All periodic workers are busy!
Active workers:\t%d / %d (max)
Active workers types:\t%d periodic, %d burst
Dedicated workers:\t%d periodic, %d burst
Solutions (try in order):
1. Increase Provide.DHT.MaxWorkers (current %d)
2. Increase Provide.DHT.DedicatedPeriodicWorkers (current %d)
3. Set Provide.DHT.SweepEnabled=false and Routing.AcceleratedDHTClient=true (last resort, not recommended)
See how the reprovide queue is processed in real-time with 'watch ipfs provide stat --all --compact'
See docs: https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtmaxworkers`,
reprovideAlertPollInterval.Truncate(time.Minute).String(), prevQueueSize, queueSize,
stats.Workers.Active, stats.Workers.Max,
stats.Workers.ActivePeriodic, stats.Workers.ActiveBurst,
stats.Workers.DedicatedPeriodic, stats.Workers.DedicatedBurst,
stats.Workers.Max, stats.Workers.DedicatedPeriodic)
}
} else if !queuedWorkers {
count = 0
}
prevQueueSize, prevQueuedWorkers = queueSize, queuedWorkers
}
}()
return nil
},
OnStop: func(ctx context.Context) error {
// Cancel the alert loop
if cancel != nil {
cancel()
}
select {
case <-done:
case <-ctx.Done():
return ctx.Err()
}
return nil
},
})
})
return fx.Options(
sweepingReprovider,
initKeystore,
ensureProviderClosesBeforeKeystore,
reprovideAlert,
)
}

View File

@ -1,4 +1,4 @@
<!-- Last updated during [v0.37.0 release](https://github.com/ipfs/kubo/issues/10867) -->
<!-- Last updated during [v0.38.0 release](https://github.com/ipfs/kubo/issues/10884) -->
# ✅ Release Checklist (vX.Y.Z[-rcN])
@ -80,18 +80,18 @@ If you're making a release for the first time, do pair programming and have the
- [ ] Update [ipshipyard/waterworks-infra](https://github.com/ipshipyard/waterworks-infra)
- [ ] Update Kubo staging environment ([Running Kubo tests on staging](https://www.notion.so/Running-Kubo-tests-on-staging-488578bb46154f9bad982e4205621af8))
- [ ] **RC:** Test last release against current RC
- [ ] **FINAL:** Test last release against current one
- [ ] Update collab cluster boxes to the tagged release
- [ ] Update libp2p bootstrappers to the tagged release
- [ ] **FINAL:** Latest release on both boxes
- [ ] **FINAL:** Update collab cluster boxes to the tagged release
- [ ] **FINAL:** Update libp2p bootstrappers to the tagged release
- [ ] Smoke test with [IPFS Companion Browser Extension](https://docs.ipfs.tech/install/ipfs-companion/)
- [ ] Update [ipfs-desktop](https://github.com/ipfs/ipfs-desktop)
- [ ] Create PR updating kubo version in `package.json` and `package-lock.json`
- [ ] **FINAL only:** Merge and create/request new release
- [ ] **FINAL:** Merge PR and ship new ipfs-desktop release
- [ ] **FINAL only:** Update [docs.ipfs.tech](https://docs.ipfs.tech/): run [update-on-new-ipfs-tag.yml](https://github.com/ipfs/ipfs-docs/actions/workflows/update-on-new-ipfs-tag.yml) workflow and merge the PR
### Promotion
- [ ] Create [IPFS Discourse](https://discuss.ipfs.tech) topic ([RC example](https://discuss.ipfs.tech/t/kubo-v0-16-0-rc1-release-candidate-is-out/15248), [FINAL example](https://discuss.ipfs.tech/t/kubo-v0-37-0-is-out/19673))
- [ ] Create [IPFS Discourse](https://discuss.ipfs.tech) topic ([RC example](https://discuss.ipfs.tech/t/kubo-v0-38-0-rc2-is-out/19772), [FINAL example](https://discuss.ipfs.tech/t/kubo-v0-38-0-is-out/19795))
- [ ] Title: `Kubo vX.Y.Z(-rcN) is out!`, tag: `kubo`
- [ ] Use title as heading (`##`) in description
- [ ] Include: GitHub release link, IPNS binaries, docker pull command, release notes

View File

@ -59,6 +59,9 @@ A new experimental DHT provider is available as an alternative to both the defau
**Monitoring and debugging:** Legacy mode (`SweepEnabled=false`) tracks `provider_reprovider_provide_count` and `provider_reprovider_reprovide_count`, while sweep mode (`SweepEnabled=true`) tracks `total_provide_count_total`. Enable debug logging with `GOLOG_LOG_LEVEL=error,provider=debug,dht/provider=debug` to see detailed logs from either system.
> [!IMPORTANT]
> The metric `total_provide_count_total` was renamed to `provider_provides_total` in Kubo v0.39 to follow OpenTelemetry naming conventions. If you have dashboards or alerts monitoring this metric, update them accordingly.
> [!NOTE]
> This feature is experimental and opt-in. In the future, it will become the default and replace the legacy system. Some commands like `ipfs stats provide` and `ipfs routing provide` are not yet available with sweep mode. Run `ipfs provide --help` for alternatives.
@ -68,6 +71,9 @@ For configuration details, see [`Provide.DHT`](https://github.com/ipfs/kubo/blob
Kubo now exposes DHT metrics from [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht/), including `total_provide_count_total` for sweep provider operations and RPC metrics prefixed with `rpc_inbound_` and `rpc_outbound_` for DHT message traffic. See [Kubo metrics documentation](https://github.com/ipfs/kubo/blob/master/docs/metrics.md) for details.
> [!IMPORTANT]
> The metric `total_provide_count_total` was renamed to `provider_provides_total` in Kubo v0.39 to follow OpenTelemetry naming conventions. If you have dashboards or alerts monitoring this metric, update them accordingly.
#### 🚨 Improved gateway error pages with diagnostic tools
Gateway error pages now provide more actionable information during content retrieval failures. When a 504 Gateway Timeout occurs, users see detailed retrieval state information including which phase failed and a sample of providers that were attempted:

363
docs/changelogs/v0.39.md Normal file
View File

@ -0,0 +1,363 @@
# Kubo changelog v0.39
<a href="https://ipshipyard.com/"><img align="right" src="https://github.com/user-attachments/assets/39ed3504-bb71-47f6-9bf8-cb9a1698f272" /></a>
This release was brought to you by the [Shipyard](https://ipshipyard.com/) team.
- [v0.39.0](#v0390)
## v0.39.0
[<img align="right" width="256px" src="https://github.com/user-attachments/assets/427702e8-b6b8-4ac2-8425-18069626c321" />](https://github.com/user-attachments/assets/427702e8-b6b8-4ac2-8425-18069626c321)
- [Overview](#overview)
- [🔦 Highlights](#-highlights)
- [🎯 DHT Sweep provider is now the default](#-dht-sweep-provider-is-now-the-default)
- [⚡ Fast root CID providing for immediate content discovery](#-fast-root-cid-providing-for-immediate-content-discovery)
- [⏯️ Provider state persists across restarts](#-provider-state-persists-across-restarts)
- [📊 Detailed statistics with `ipfs provide stat`](#-detailed-statistics-with-ipfs-provide-stat)
- [🔔 Slow reprovide warnings](#-slow-reprovide-warnings)
- [📊 Metric rename: `provider_provides_total`](#-metric-rename-provider_provides_total)
- [🔧 Automatic UPnP recovery after router restarts](#-automatic-upnp-recovery-after-router-restarts)
- [🪦 Deprecated `go-ipfs` name no longer published](#-deprecated-go-ipfs-name-no-longer-published)
- [🚦 Gateway range request limits for CDN compatibility](#-gateway-range-request-limits-for-cdn-compatibility)
- [🖥️ RISC-V support with prebuilt binaries](#-risc-v-support-with-prebuilt-binaries)
- [📦️ Important dependency updates](#-important-dependency-updates)
- [📝 Changelog](#-changelog)
- [👨‍👩‍👧‍👦 Contributors](#-contributors)
### Overview
Kubo 0.39 makes self-hosting practical on consumer hardware and home networks. The DHT sweep provider (now default) announces your content to the network without traffic spikes that overwhelm residential connections. Automatic UPnP recovery means your node stays reachable after router restarts without manual intervention.
New content becomes findable immediately after `ipfs add`. The provider system persists state across restarts, alerts you when falling behind, and exposes detailed stats for monitoring. This release also finalizes the deprecation of the legacy `go-ipfs` name.
### 🔦 Highlights
#### 🎯 DHT Sweep provider is now the default
The Amino DHT Sweep provider system, introduced as experimental in v0.38, is now enabled by default (`Provide.DHT.SweepEnabled=true`).
**What this means:** All nodes now benefit from efficient keyspace-sweeping content announcements that reduce memory overhead and create predictable network patterns, especially for nodes providing large content collections.
**Migration:** The transition is automatic on upgrade. Your existing configuration is preserved:
- If you explicitly set `Provide.DHT.SweepEnabled=false` in v0.38, you'll continue using the legacy provider
- If you were using the default settings, you'll automatically get the sweep provider
- To opt out and return to legacy behavior: `ipfs config --json Provide.DHT.SweepEnabled false`
- Providers with medium to large datasets may need to adjust defaults; see [Capacity Planning](https://github.com/ipfs/kubo/blob/master/docs/provide-stats.md#capacity-planning)
**New features available with sweep mode:**
- Detailed statistics via `ipfs provide stat` ([see below](#-detailed-statistics-with-ipfs-provide-stat))
- Automatic resume after restarts with persistent state ([see below](#-provider-state-persists-across-restarts))
- Proactive alerts when reproviding falls behind ([see below](#-slow-reprovide-warnings))
- Better metrics for monitoring (`provider_provides_total`) ([see below](#-metric-rename-provider_provides_total))
- Fast optimistic provide of new root CIDs ([see below](#-fast-root-cid-providing-for-immediate-content-discovery))
For background on the sweep provider design and motivations, see [`Provide.DHT.SweepEnabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtsweepenabled) and Shipyard's blogpost [Provide Sweep: Solving the DHT Provide Bottleneck](https://ipshipyard.com/blog/2025-dht-provide-sweep/).
#### ⚡ Fast root CID providing for immediate content discovery
When you add content to IPFS, the sweep provider queues it for efficient DHT provides over time. While this is resource-efficient, other peers won't find your content immediately after `ipfs add` or `ipfs dag import` completes.
To make sharing faster, `ipfs add` and `ipfs dag import` now do an immediate provide of root CIDs to the DHT in addition to the regular queue (controlled by the new `--fast-provide-root` flag, enabled by default). This complements the sweep provider system: fast-provide handles the urgent case (root CIDs that users share and reference), while the sweep provider efficiently provides all blocks according to `Provide.Strategy` over time.
This closes the gap between command completion and content shareability: root CIDs typically become discoverable on the network in under a second (compared to 30+ seconds previously). The feature uses optimistic DHT operations, which are significantly faster with the sweep provider (now enabled by default).
By default, this immediate provide runs in the background without blocking the command. For use cases requiring guaranteed discoverability before the command returns (e.g., sharing a link immediately), use `--fast-provide-wait` to block until the provide completes.
**Simple examples:**
```bash
ipfs add file.txt # Root provided immediately, blocks queued for sweep provider
ipfs add file.txt --fast-provide-wait # Wait for root provide to complete
ipfs dag import file.car # Same for CAR imports
```
**Configuration:** Set defaults via `Import.FastProvideRoot` (default: `true`) and `Import.FastProvideWait` (default: `false`). See `ipfs add --help` and `ipfs dag import --help` for more details and examples.
This optimization works best with the sweep provider and accelerated DHT client, where provide operations are significantly faster. Automatically skipped when DHT is unavailable (e.g., `Routing.Type=none` or delegated-only configurations).
#### ⏯️ Provider state persists across restarts
The Sweep provider now persists the reprovide cycle state and automatically resumes where it left off after a restart. This brings several improvements:
- **Persistent progress**: The provider saves its position in the reprovide cycle to the datastore. On restart, it continues from where it stopped instead of starting from scratch.
- **Catch-up reproviding**: If the node was offline for an extended period, all CIDs that haven't been reprovided within the configured reprovide interval are immediately queued for reproviding when the node starts up. This ensures content availability is maintained even after downtime.
- **Persistent provide queue**: The provide queue is persisted to the datastore on shutdown. When the node restarts, queued CIDs are restored and provided as expected, preventing loss of pending provide operations.
- **Resume control**: The resume behavior is controlled via [`Provide.DHT.ResumeEnabled`](https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtresumeenabled) (default: `true`). Set to `false` if you don't want to keep the persisted provider state from a previous run.
This feature improves reliability for nodes that experience intermittent connectivity or restarts.
#### 📊 Detailed statistics with `ipfs provide stat`
The Sweep provider system now exposes detailed statistics through `ipfs provide stat`, helping you monitor provider health and troubleshoot issues.
Run `ipfs provide stat` for a quick summary, or use `--all` to see complete metrics including connectivity status, queue sizes, reprovide schedules, network statistics, operation rates, and worker utilization. For real-time monitoring, use `watch ipfs provide stat --all --compact` to observe changes in a 2-column layout. Individual sections can be displayed with flags like `--network`, `--operations`, or `--workers`.
For Dual DHT configurations, use `--lan` to view LAN DHT statistics instead of the default WAN DHT stats.
For more information, run `ipfs provide stat --help` or see the [Provide Stats documentation](https://github.com/ipfs/kubo/blob/master/docs/provide-stats.md), including [Capacity Planning](https://github.com/ipfs/kubo/blob/master/docs/provide-stats.md#capacity-planning).
> [!NOTE]
> Legacy provider (when `Provide.DHT.SweepEnabled=false`) shows basic statistics without flag support.
#### 🔔 Slow reprovide warnings
Kubo now monitors DHT reprovide operations when `Provide.DHT.SweepEnabled=true`
and alerts you if your node is falling behind on reprovides.
When the reprovide queue consistently grows and all periodic workers are busy,
a warning displays with:
- Queue size and worker utilization details
- Recommended solutions: increase `Provide.DHT.MaxWorkers` or `Provide.DHT.DedicatedPeriodicWorkers`
- Command to monitor real-time progress: `watch ipfs provide stat --all --compact`
The alert polls every 15 minutes (to avoid alert fatigue while catching
persistent issues) and only triggers after sustained growth across multiple
intervals. The legacy provider is unaffected by this change.
#### 📊 Metric rename: `provider_provides_total`
The Amino DHT Sweep provider metric has been renamed from `total_provide_count_total` to `provider_provides_total` to follow OpenTelemetry naming conventions and maintain consistency with other kad-dht metrics (which use dot notation like `rpc.inbound.messages`, `rpc.outbound.requests`, etc.).
**Migration:** If you have Prometheus queries, dashboards, or alerts monitoring the old `total_provide_count_total` metric, update them to use `provider_provides_total` instead. This affects all nodes using sweep mode, which is now the default in v0.39 (previously opt-in experimental in v0.38).
#### 🔧 Automatic UPnP recovery after router restarts
Kubo now automatically recovers UPnP port mappings when routers restart or
become temporarily unavailable, fixing a critical connectivity issue that
affected self-hosted nodes behind NAT.
**Previous behavior:** When a UPnP-enabled router restarted, Kubo would lose
its port mapping and fail to re-establish it automatically. Nodes would become
unreachable to the network until the daemon was manually restarted, forcing
reliance on relay connections which degraded performance.
**New behavior:** The upgraded go-libp2p (v0.44.0) includes [Shipyard's fix](https://github.com/libp2p/go-libp2p/pull/3367)
for self-healing NAT mappings that automatically rediscover and re-establish
port forwarding after router events. Nodes now maintain public connectivity
without manual intervention.
> [!NOTE]
> If your node runs behind a router and you haven't manually configured port
> forwarding, make sure [`Swarm.DisableNatPortMap=false`](https://github.com/ipfs/kubo/blob/master/docs/config.md#swarmdisablenatportmap)
> so UPnP can automatically handle port mapping (this is the default).
This significantly improves reliability for desktop and self-hosted IPFS nodes
using UPnP for NAT traversal.
#### 🪦 Deprecated `go-ipfs` name no longer published
The `go-ipfs` name was deprecated in 2022 and renamed to `kubo`. Starting with this release, the legacy Docker image name has been replaced with a stub that displays an error message directing users to switch to `ipfs/kubo`.
**Docker images:** The `ipfs/go-ipfs` image tags now contain only a stub script that exits with an error, instructing users to update their Docker configurations to use [`ipfs/kubo`](https://hub.docker.com/r/ipfs/kubo) instead. This ensures users are aware of the deprecation while allowing existing automation to fail explicitly rather than silently using outdated images.
**Distribution binaries:** Download Kubo from <https://dist.ipfs.tech/kubo/> or <https://github.com/ipfs/kubo/releases>. The legacy `go-ipfs` distribution path should no longer be used.
All users should migrate to the `kubo` name in their scripts and configurations.
#### 🚦 Gateway range request limits for CDN compatibility
The new [`Gateway.MaxRangeRequestFileSize`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewaymaxrangerequestfilesize) configuration protects against CDN range request limitations that cause bandwidth overcharges on deserialized responses. Some CDNs convert range requests over large files into full file downloads, causing clients requesting small byte ranges to unknowingly download entire multi-gigabyte files.
This only impacts deserialized responses. Clients using verifiable block requests (`application/vnd.ipld.raw`) are not affected. See the [configuration documentation](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewaymaxrangerequestfilesize) for details.
#### 🖥️ RISC-V support with prebuilt binaries
Kubo provides official `linux-riscv64` prebuilt binaries, bringing IPFS to [RISC-V](https://en.wikipedia.org/wiki/RISC-V) open hardware.
As RISC-V single-board computers and embedded systems become more accessible, the distributed web is now supported on open hardware architectures - a natural pairing of open technologies.
Download from <https://dist.ipfs.tech/kubo/> or <https://github.com/ipfs/kubo/releases> and look for the `linux-riscv64` archive.
### 📦️ Important dependency updates
- update `go-libp2p` to [v0.45.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.45.0) (incl. [v0.44.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.44.0)) with self-healing UPnP port mappings and go-log/slog interop fixes
- update `quic-go` to [v0.55.0](https://github.com/quic-go/quic-go/releases/tag/v0.55.0)
- update `go-log` to [v2.9.0](https://github.com/ipfs/go-log/releases/tag/v2.9.0) with slog integration for go-libp2p
- update `go-ds-pebble` to [v0.5.7](https://github.com/ipfs/go-ds-pebble/releases/tag/v0.5.7) (includes pebble [v2.1.2](https://github.com/cockroachdb/pebble/releases/tag/v2.1.2))
- update `boxo` to [v0.35.2](https://github.com/ipfs/boxo/releases/tag/v0.35.2) (includes boxo [v0.35.1](https://github.com/ipfs/boxo/releases/tag/v0.35.1))
- update `ipfs-webui` to [v4.10.0](https://github.com/ipfs/ipfs-webui/releases/tag/v4.10.0)
- update `go-libp2p-kad-dht` to [v0.36.0](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.36.0)
### 📝 Changelog
<details><summary>Full Changelog</summary>
- github.com/ipfs/kubo:
- docs: mkreleaselog for 0.39
- chore: version 0.39.0
- bin/mkreleaselog: add github handle resolution and deduplication
- docs: restructure v0.39 changelog for clarity
- upgrade go-libp2p-kad-dht to v0.36.0 (#11079) ([ipfs/kubo#11079](https://github.com/ipfs/kubo/pull/11079))
- fix(docker): include symlinks in scanning for init scripts (#11077) ([ipfs/kubo#11077](https://github.com/ipfs/kubo/pull/11077))
- Update deprecation message for Reprovider fields (#11072) ([ipfs/kubo#11072](https://github.com/ipfs/kubo/pull/11072))
- chore: release v0.39.0-rc1
- test: add regression tests for config secrets protection (#11061) ([ipfs/kubo#11061](https://github.com/ipfs/kubo/pull/11061))
- test: add regression tests for API.Authorizations (#11060) ([ipfs/kubo#11060](https://github.com/ipfs/kubo/pull/11060))
- test: verifyWorkerRun and helptext (#11063) ([ipfs/kubo#11063](https://github.com/ipfs/kubo/pull/11063))
- test(cmdutils): add tests for PathOrCidPath and ValidatePinName (#11062) ([ipfs/kubo#11062](https://github.com/ipfs/kubo/pull/11062))
- fix: return original error in PathOrCidPath fallback (#11059) ([ipfs/kubo#11059](https://github.com/ipfs/kubo/pull/11059))
- feat: fast provide support in `dag import` (#11058) ([ipfs/kubo#11058](https://github.com/ipfs/kubo/pull/11058))
- feat(cli/rpc/add): fast provide of root CID (#11046) ([ipfs/kubo#11046](https://github.com/ipfs/kubo/pull/11046))
- feat(telemetry): collect high level provide DHT sweep settings (#11056) ([ipfs/kubo#11056](https://github.com/ipfs/kubo/pull/11056))
- feat: enable DHT Provide Sweep by default (#10955) ([ipfs/kubo#10955](https://github.com/ipfs/kubo/pull/10955))
- feat(config): optional Gateway.MaxRangeRequestFileSize (#10997) ([ipfs/kubo#10997](https://github.com/ipfs/kubo/pull/10997))
- docs: clarify provide stats metric types and calculations (#11041) ([ipfs/kubo#11041](https://github.com/ipfs/kubo/pull/11041))
- Upgrade to Boxo v0.35.2 (#11050) ([ipfs/kubo#11050](https://github.com/ipfs/kubo/pull/11050))
- fix(go-log@2.9/go-libp2p@0.45): dynamic log level control and tail (#11039) ([ipfs/kubo#11039](https://github.com/ipfs/kubo/pull/11039))
- chore: update webui to v4.10.0 (#11048) ([ipfs/kubo#11048](https://github.com/ipfs/kubo/pull/11048))
- fix(provider/stats): number format (#11045) ([ipfs/kubo#11045](https://github.com/ipfs/kubo/pull/11045))
- provider: protect libp2p connections (#11028) ([ipfs/kubo#11028](https://github.com/ipfs/kubo/pull/11028))
- Merge release v0.38.2 ([ipfs/kubo#11044](https://github.com/ipfs/kubo/pull/11044))
- Upgrade to Boxo v0.35.1 (#11043) ([ipfs/kubo#11043](https://github.com/ipfs/kubo/pull/11043))
- feat(provider): resume cycle (#11031) ([ipfs/kubo#11031](https://github.com/ipfs/kubo/pull/11031))
- chore: upgrade pebble to v2.1.1 (#11040) ([ipfs/kubo#11040](https://github.com/ipfs/kubo/pull/11040))
- fix(cli): provide stat cosmetics (#11034) ([ipfs/kubo#11034](https://github.com/ipfs/kubo/pull/11034))
- fix: go-libp2p v0.44 with self-healing UPnP port mappings (#11032) ([ipfs/kubo#11032](https://github.com/ipfs/kubo/pull/11032))
- feat(provide): slow reprovide alerts when SweepEnabled (#11021) ([ipfs/kubo#11021](https://github.com/ipfs/kubo/pull/11021))
- feat: trace delegated routing http client (#11017) ([ipfs/kubo#11017](https://github.com/ipfs/kubo/pull/11017))
- feat(provide): detailed `ipfs provide stat` (#11019) ([ipfs/kubo#11019](https://github.com/ipfs/kubo/pull/11019))
- config: increase default Provide.DHT.MaxProvideConnsPerWorker (#11016) ([ipfs/kubo#11016](https://github.com/ipfs/kubo/pull/11016))
- docs: update release checklist based on v0.38.0 learnings (#11007) ([ipfs/kubo#11007](https://github.com/ipfs/kubo/pull/11007))
- chore: merge release v0.38.1 ([ipfs/kubo#11020](https://github.com/ipfs/kubo/pull/11020))
- fix: migrations for Windows (#11010) ([ipfs/kubo#11010](https://github.com/ipfs/kubo/pull/11010))
- Upgrade go-ds-pebble to v0.5.3 (#11011) ([ipfs/kubo#11011](https://github.com/ipfs/kubo/pull/11011))
- Merge release v0.38.0 ([ipfs/kubo#11006](https://github.com/ipfs/kubo/pull/11006))
- feat: add docker stub for deprecated ipfs/go-ipfs name (#10998) ([ipfs/kubo#10998](https://github.com/ipfs/kubo/pull/10998))
- docs: add sweeping provide worker count recommendation (#11001) ([ipfs/kubo#11001](https://github.com/ipfs/kubo/pull/11001))
- chore: bump go-libp2p-kad-dht to v0.35.0 (#11002) ([ipfs/kubo#11002](https://github.com/ipfs/kubo/pull/11002))
- upgrade go-ds-pebble to v0.5.2 (#11000) ([ipfs/kubo#11000](https://github.com/ipfs/kubo/pull/11000))
- Upgrade to Boxo v0.35.0 (#10999) ([ipfs/kubo#10999](https://github.com/ipfs/kubo/pull/10999))
- Non-functional changes (#10996) ([ipfs/kubo#10996](https://github.com/ipfs/kubo/pull/10996))
- chore: update boxo and kad-dht dependencies (#10995) ([ipfs/kubo#10995](https://github.com/ipfs/kubo/pull/10995))
- fix: update webui to v4.9.1 (#10994) ([ipfs/kubo#10994](https://github.com/ipfs/kubo/pull/10994))
- fix: provider merge conflicts (#10989) ([ipfs/kubo#10989](https://github.com/ipfs/kubo/pull/10989))
- fix(mfs): add soft limit for `--flush=false` (#10985) ([ipfs/kubo#10985](https://github.com/ipfs/kubo/pull/10985))
- fix: provide Filestore nodes (#10990) ([ipfs/kubo#10990](https://github.com/ipfs/kubo/pull/10990))
- feat: limit pin names to 255 bytes (#10981) ([ipfs/kubo#10981](https://github.com/ipfs/kubo/pull/10981))
- fix: SweepingProvider slow start (#10980) ([ipfs/kubo#10980](https://github.com/ipfs/kubo/pull/10980))
- chore: start v0.39.0 release cycle
- github.com/gammazero/deque (v1.1.0 -> v1.2.0):
- add slice operation functions (#40) ([gammazero/deque#40](https://github.com/gammazero/deque/pull/40))
- maintain base capacity after IterPop iteration (#44) ([gammazero/deque#44](https://github.com/gammazero/deque/pull/44))
- github.com/ipfs/boxo (v0.35.1 -> v0.35.2):
- Release v0.35.2 ([ipfs/boxo#1068](https://github.com/ipfs/boxo/pull/1068))
- fix(logs): upgrade go-libp2p to v0.45.0 and go-log to v2.9.0 ([ipfs/boxo#1066](https://github.com/ipfs/boxo/pull/1066))
- github.com/ipfs/go-cid (v0.5.0 -> v0.6.0):
- v0.6.0 bump (#178) ([ipfs/go-cid#178](https://github.com/ipfs/go-cid/pull/178))
- github.com/ipfs/go-ds-pebble (v0.5.3 -> v0.5.7):
- new version (#74) ([ipfs/go-ds-pebble#74](https://github.com/ipfs/go-ds-pebble/pull/74))
- do not override logger if logger is provided (#72) ([ipfs/go-ds-pebble#72](https://github.com/ipfs/go-ds-pebble/pull/72))
- new version (#70) ([ipfs/go-ds-pebble#70](https://github.com/ipfs/go-ds-pebble/pull/70))
- new-version (#68) ([ipfs/go-ds-pebble#68](https://github.com/ipfs/go-ds-pebble/pull/68))
- Do not allow batch to be reused after commit (#67) ([ipfs/go-ds-pebble#67](https://github.com/ipfs/go-ds-pebble/pull/67))
- new version (#66) ([ipfs/go-ds-pebble#66](https://github.com/ipfs/go-ds-pebble/pull/66))
- Make pebble write options configurable ([ipfs/go-ds-pebble#63](https://github.com/ipfs/go-ds-pebble/pull/63))
- github.com/ipfs/go-dsqueue (v0.1.0 -> v0.1.1):
- new version (#26) ([ipfs/go-dsqueue#26](https://github.com/ipfs/go-dsqueue/pull/26))
- update deque package and add stress test (#25) ([ipfs/go-dsqueue#25](https://github.com/ipfs/go-dsqueue/pull/25))
- github.com/ipfs/go-log/v2 (v2.8.2 -> v2.9.0):
- chore: release v2.9.0 (#177) ([ipfs/go-log#177](https://github.com/ipfs/go-log/pull/177))
- fix: go-libp2p and slog interop (#176) ([ipfs/go-log#176](https://github.com/ipfs/go-log/pull/176))
- github.com/libp2p/go-libp2p (v0.43.0 -> v0.45.0):
- Release v0.45.0 (#3424) ([libp2p/go-libp2p#3424](https://github.com/libp2p/go-libp2p/pull/3424))
- feat(gologshim): Add SetDefaultHandler (#3418) ([libp2p/go-libp2p#3418](https://github.com/libp2p/go-libp2p/pull/3418))
- Update Drips ownedBy address in FUNDING.json
- fix(websocket): use debug level for http.Server errors
- chore: release v0.44.0
- autonatv2: fix normalization for websocket addrs
- autonatv2: remove dependency on webrtc and webtransport
- quicreuse: update libp2p/go-netroute (#3405) ([libp2p/go-libp2p#3405](https://github.com/libp2p/go-libp2p/pull/3405))
- basichost: don't advertise unreachable addrs. (#3357) ([libp2p/go-libp2p#3357](https://github.com/libp2p/go-libp2p/pull/3357))
- basichost: improve autonatv2 reachability logic (#3356) ([libp2p/go-libp2p#3356](https://github.com/libp2p/go-libp2p/pull/3356))
- basichost: fix lint error
- basichost: move EvtLocalAddrsChanged to addrs_manager (#3355) ([libp2p/go-libp2p#3355](https://github.com/libp2p/go-libp2p/pull/3355))
- chore: gitignore go.work files
- refactor!: move insecure transport outside of core
- refactor: drop go-varint dependency
- refactor!: move canonicallog package outside of core
- fix: assignment to entry in nil map
- docs: Update contribute section with mailing list and irc (#3387) ([libp2p/go-libp2p#3387](https://github.com/libp2p/go-libp2p/pull/3387))
- README: remove Drand from notable users section
- chore: add help comment
- refactor: replace context.WithCancel with t.Context
- feat(network): Add Conn.As
- Skip mdns tests on macOS in CI
- fix: deduplicate NAT port mapping requests
- fix: heal NAT mappings after router restart
- feat: relay: add option for custom filter function
- docs: remove broken link (#3375) ([libp2p/go-libp2p#3375](https://github.com/libp2p/go-libp2p/pull/3375))
- AI tooling must be disclosed for contributions (#3372) ([libp2p/go-libp2p#3372](https://github.com/libp2p/go-libp2p/pull/3372))
- feat: Migrate to log/slog (#3364) ([libp2p/go-libp2p#3364](https://github.com/libp2p/go-libp2p/pull/3364))
- basichost: move observed address manager to basichost (#3332) ([libp2p/go-libp2p#3332](https://github.com/libp2p/go-libp2p/pull/3332))
- chore: support Go 1.24 & 1.25 (#3366) ([libp2p/go-libp2p#3366](https://github.com/libp2p/go-libp2p/pull/3366))
- feat(simlibp2p): Simulated libp2p Networks (#3262) ([libp2p/go-libp2p#3262](https://github.com/libp2p/go-libp2p/pull/3262))
- bandwidthcounter: add Reset and TrimIdle methods to reporter interface (#3343) ([libp2p/go-libp2p#3343](https://github.com/libp2p/go-libp2p/pull/3343))
- network: rename NAT Types (#3331) ([libp2p/go-libp2p#3331](https://github.com/libp2p/go-libp2p/pull/3331))
- refactor(quicreuse): use errors.Join in Close method (#3363) ([libp2p/go-libp2p#3363](https://github.com/libp2p/go-libp2p/pull/3363))
- swarm: move AddCertHashes to swarm (#3330) ([libp2p/go-libp2p#3330](https://github.com/libp2p/go-libp2p/pull/3330))
- quicreuse: clean up associations for closed listeners. (#3306) ([libp2p/go-libp2p#3306](https://github.com/libp2p/go-libp2p/pull/3306))
- github.com/libp2p/go-libp2p-kad-dht (v0.35.1 -> v0.36.0):
- new version (#1204) ([libp2p/go-libp2p-kad-dht#1204](https://github.com/libp2p/go-libp2p-kad-dht/pull/1204))
- update dependencies (#1205) ([libp2p/go-libp2p-kad-dht#1205](https://github.com/libp2p/go-libp2p-kad-dht/pull/1205))
- fix(provider): protect `SweepingProvider.wg` (#1200) ([libp2p/go-libp2p-kad-dht#1200](https://github.com/libp2p/go-libp2p-kad-dht/pull/1200))
- fix(ResettableKeystore): race when closing during reset (#1201) ([libp2p/go-libp2p-kad-dht#1201](https://github.com/libp2p/go-libp2p-kad-dht/pull/1201))
- fix(provider): conflict resolution (#1199) ([libp2p/go-libp2p-kad-dht#1199](https://github.com/libp2p/go-libp2p-kad-dht/pull/1199))
- fix(provider): remove from trie by pruning prefix (#1198) ([libp2p/go-libp2p-kad-dht#1198](https://github.com/libp2p/go-libp2p-kad-dht/pull/1198))
- fix(provider): rename metric to follow OpenTelemetry conventions (#1195) ([libp2p/go-libp2p-kad-dht#1195](https://github.com/libp2p/go-libp2p-kad-dht/pull/1195))
- fix(provider): resume cycle from persisted keystore (#1193) ([libp2p/go-libp2p-kad-dht#1193](https://github.com/libp2p/go-libp2p-kad-dht/pull/1193))
- feat(provider): connectivity callbacks (#1194) ([libp2p/go-libp2p-kad-dht#1194](https://github.com/libp2p/go-libp2p-kad-dht/pull/1194))
- feat(provider): trie iterators (#1189) ([libp2p/go-libp2p-kad-dht#1189](https://github.com/libp2p/go-libp2p-kad-dht/pull/1189))
- refactor(provider): optimize memory when allocating keys to peers (#1187) ([libp2p/go-libp2p-kad-dht#1187](https://github.com/libp2p/go-libp2p-kad-dht/pull/1187))
- refactor(keystore): track size (#1181) ([libp2p/go-libp2p-kad-dht#1181](https://github.com/libp2p/go-libp2p-kad-dht/pull/1181))
- Remove go-libp2p-maintainers from codeowners (#1192) ([libp2p/go-libp2p-kad-dht#1192](https://github.com/libp2p/go-libp2p-kad-dht/pull/1192))
- switch to bit256.NewKeyFromArray (#1188) ([libp2p/go-libp2p-kad-dht#1188](https://github.com/libp2p/go-libp2p-kad-dht/pull/1188))
- fix(provider): `RegionsFromPeers` may return multiple regions (#1185) ([libp2p/go-libp2p-kad-dht#1185](https://github.com/libp2p/go-libp2p-kad-dht/pull/1185))
- feat(provider): skip bootstrap reprovide (#1186) ([libp2p/go-libp2p-kad-dht#1186](https://github.com/libp2p/go-libp2p-kad-dht/pull/1186))
- refactor(provider): use adaptive deadline for CycleStats cleanup (#1183) ([libp2p/go-libp2p-kad-dht#1183](https://github.com/libp2p/go-libp2p-kad-dht/pull/1183))
- refactor(provider/stats): use int64 to avoid overflows (#1182) ([libp2p/go-libp2p-kad-dht#1182](https://github.com/libp2p/go-libp2p-kad-dht/pull/1182))
- provider: trigger connectivity check when missing libp2p addresses (#1180) ([libp2p/go-libp2p-kad-dht#1180](https://github.com/libp2p/go-libp2p-kad-dht/pull/1180))
- fix(provider): resume cycle (#1176) ([libp2p/go-libp2p-kad-dht#1176](https://github.com/libp2p/go-libp2p-kad-dht/pull/1176))
- tests: fix flaky TestProvidesExpire (#1179) ([libp2p/go-libp2p-kad-dht#1179](https://github.com/libp2p/go-libp2p-kad-dht/pull/1179))
- tests: fix flaky TestFindPeerWithQueryFilter (#1178) ([libp2p/go-libp2p-kad-dht#1178](https://github.com/libp2p/go-libp2p-kad-dht/pull/1178))
- tests: fix #1175 (#1177) ([libp2p/go-libp2p-kad-dht#1177](https://github.com/libp2p/go-libp2p-kad-dht/pull/1177))
- feat(provider): exit early region exploration if no new peers discovered (#1174) ([libp2p/go-libp2p-kad-dht#1174](https://github.com/libp2p/go-libp2p-kad-dht/pull/1174))
- provider: protect connections (#1172) ([libp2p/go-libp2p-kad-dht#1172](https://github.com/libp2p/go-libp2p-kad-dht/pull/1172))
- feat(provider): resume reprovides (#1170) ([libp2p/go-libp2p-kad-dht#1170](https://github.com/libp2p/go-libp2p-kad-dht/pull/1170))
- fix(provider): custom logger name (#1173) ([libp2p/go-libp2p-kad-dht#1173](https://github.com/libp2p/go-libp2p-kad-dht/pull/1173))
- feat(provider): persist provide queue (#1167) ([libp2p/go-libp2p-kad-dht#1167](https://github.com/libp2p/go-libp2p-kad-dht/pull/1167))
- provider: stats (#1144) ([libp2p/go-libp2p-kad-dht#1144](https://github.com/libp2p/go-libp2p-kad-dht/pull/1144))
- github.com/probe-lab/go-libdht (v0.3.0 -> v0.4.0):
- chore: release v0.4.0 (#26) ([probe-lab/go-libdht#26](https://github.com/probe-lab/go-libdht/pull/26))
- feat(key/bit256): memory optimized constructor (#25) ([probe-lab/go-libdht#25](https://github.com/probe-lab/go-libdht/pull/25))
- refactor(trie): AddMany memory optimization (#24) ([probe-lab/go-libdht#24](https://github.com/probe-lab/go-libdht/pull/24))
</details>
### 👨‍👩‍👧‍👦 Contributors
| Contributor | Commits | Lines ± | Files Changed |
|-------------|---------|---------|---------------|
| [@guillaumemichel](https://github.com/guillaumemichel) | 41 | +9906/-1383 | 170 |
| [@lidel](https://github.com/lidel) | 30 | +6652/-694 | 97 |
| [@sukunrt](https://github.com/sukunrt) | 9 | +1618/-1524 | 39 |
| [@MarcoPolo](https://github.com/MarcoPolo) | 17 | +1665/-1452 | 160 |
| [@gammazero](https://github.com/gammazero) | 23 | +514/-53 | 29 |
| [@Prabhat1308](https://github.com/Prabhat1308) | 1 | +197/-67 | 4 |
| [@peterargue](https://github.com/peterargue) | 3 | +82/-25 | 5 |
| [@cargoedit](https://github.com/cargoedit) | 1 | +35/-72 | 14 |
| [@hsanjuan](https://github.com/hsanjuan) | 2 | +66/-29 | 5 |
| [@shoriwe](https://github.com/shoriwe) | 1 | +68/-21 | 3 |
| [@dennis-tra](https://github.com/dennis-tra) | 2 | +27/-2 | 2 |
| [@Lil-Duckling-22](https://github.com/Lil-Duckling-22) | 1 | +4/-1 | 1 |
| [@crStiv](https://github.com/crStiv) | 1 | +1/-3 | 1 |
| [@cpeliciari](https://github.com/cpeliciari) | 1 | +3/-0 | 1 |
| [@rvagg](https://github.com/rvagg) | 1 | +1/-1 | 1 |
| [@p-shahi](https://github.com/p-shahi) | 1 | +1/-1 | 1 |
| [@lbarrettanderson](https://github.com/lbarrettanderson) | 1 | +1/-1 | 1 |
| [@filipremb](https://github.com/filipremb) | 1 | +1/-1 | 1 |
| [@marten-seemann](https://github.com/marten-seemann) | 1 | +0/-1 | 1 |

View File

@ -66,6 +66,7 @@ config file at runtime.
- [`Gateway.DisableHTMLErrors`](#gatewaydisablehtmlerrors)
- [`Gateway.ExposeRoutingAPI`](#gatewayexposeroutingapi)
- [`Gateway.RetrievalTimeout`](#gatewayretrievaltimeout)
- [`Gateway.MaxRangeRequestFileSize`](#gatewaymaxrangerequestfilesize)
- [`Gateway.MaxConcurrentRequests`](#gatewaymaxconcurrentrequests)
- [`Gateway.HTTPHeaders`](#gatewayhttpheaders)
- [`Gateway.RootRedirect`](#gatewayrootredirect)
@ -132,6 +133,7 @@ config file at runtime.
- [`Provide.DHT.MaxWorkers`](#providedhtmaxworkers)
- [`Provide.DHT.Interval`](#providedhtinterval)
- [`Provide.DHT.SweepEnabled`](#providedhtsweepenabled)
- [`Provide.DHT.ResumeEnabled`](#providedhtresumeenabled)
- [`Provide.DHT.DedicatedPeriodicWorkers`](#providedhtdedicatedperiodicworkers)
- [`Provide.DHT.DedicatedBurstWorkers`](#providedhtdedicatedburstworkers)
- [`Provide.DHT.MaxProvideConnsPerWorker`](#providedhtmaxprovideconnsperworker)
@ -228,6 +230,8 @@ config file at runtime.
- [`Import.UnixFSRawLeaves`](#importunixfsrawleaves)
- [`Import.UnixFSChunker`](#importunixfschunker)
- [`Import.HashFunction`](#importhashfunction)
- [`Import.FastProvideRoot`](#importfastprovideroot)
- [`Import.FastProvideWait`](#importfastprovidewait)
- [`Import.BatchMaxNodes`](#importbatchmaxnodes)
- [`Import.BatchMaxSize`](#importbatchmaxsize)
- [`Import.UnixFSFileMaxLinks`](#importunixfsfilemaxlinks)
@ -1158,6 +1162,27 @@ Default: `30s`
Type: `optionalDuration`
### `Gateway.MaxRangeRequestFileSize`
Maximum file size for HTTP range requests on deserialized responses. Range requests for files larger than this limit return 501 Not Implemented.
**Why this exists:**
Some CDNs like Cloudflare intercept HTTP range requests and convert them to full file downloads when files exceed their cache bucket limits. Cloudflare's default plan only caches range requests for files up to 5GiB. Files larger than this receive HTTP 200 with the entire file instead of HTTP 206 with the requested byte range. A client requesting 1MB from a 40GiB file would unknowingly download all 40GiB, causing bandwidth overcharges for the gateway operator, unexpected data costs for the client, and potential browser crashes.
This only affects deserialized responses. Clients fetching verifiable blocks as `application/vnd.ipld.raw` are not impacted because they work with small chunks that stay well below CDN cache limits.
**How to use:**
Set this to your CDN's range request cache limit (e.g., `"5GiB"` for Cloudflare's default plan). The gateway returns 501 Not Implemented for range requests over files larger than this limit, with an error message suggesting verifiable block requests as an alternative.
> [!NOTE]
> Cloudflare users running open gateway hosting deserialized responses should deploy additional protection via Cloudflare Snippets (requires Enterprise plan). The Kubo configuration alone is not sufficient because Cloudflare has already intercepted and cached the response by the time it reaches your origin. See [boxo#856](https://github.com/ipfs/boxo/issues/856#issuecomment-3523944976) for a snippet that aborts HTTP 200 responses when Content-Length exceeds the limit.
Default: `0` (no limit)
Type: [`optionalBytes`](#optionalbytes)
### `Gateway.MaxConcurrentRequests`
Limits concurrent HTTP requests. Requests beyond limit receive 429 Too Many Requests.
@ -1910,10 +1935,17 @@ Type: `duration`
## `Provide`
Configures CID announcements to the routing system, including both immediate
announcements for new content (provide) and periodic re-announcements
(reprovide) on systems that require it, like Amino DHT. While designed to support
multiple routing systems in the future, the current default configuration only supports providing to the Amino DHT.
Configures how your node advertises content to make it discoverable by other
peers.
**What is providing?** When your node stores content, it publishes provider
records to the routing system announcing "I have this content". These records
map CIDs to your peer ID, enabling content discovery across the network.
While designed to support multiple routing systems in the future, the current
default configuration only supports [providing to the Amino DHT](#providedht).
<!-- TODO: See the [Reprovide Sweep blog post](https://github.com/ipshipyard/ipshipyard.com/pull/8) for detailed performance comparisons. -->
### `Provide.Enabled`
@ -1964,13 +1996,39 @@ Type: `optionalString` (unset for the default)
Configuration for providing data to Amino DHT peers.
**Provider record lifecycle:** On the Amino DHT, provider records expire after
[`amino.DefaultProvideValidity`](https://github.com/libp2p/go-libp2p-kad-dht/blob/v0.34.0/amino/defaults.go#L40-L43).
Your node must re-announce (reprovide) content periodically to keep it
discoverable. The [`Provide.DHT.Interval`](#providedhtinterval) setting
controls this timing, with the default ensuring records refresh well before
expiration or negative churn effects kick in.
**Two provider systems:**
- **Sweep provider**: Divides the DHT keyspace into regions and systematically
sweeps through them over the reprovide interval. This batches CIDs allocated
to the same DHT servers, dramatically reducing the number of DHT lookups and
PUTs needed. Spreads work evenly over time with predictable resource usage.
- **Legacy provider**: Processes each CID individually with separate DHT
lookups. Works well for small content collections but struggles to complete
reprovide cycles when managing thousands of CIDs.
#### Monitoring Provide Operations
You can monitor the effectiveness of your provide configuration through metrics exposed at the Prometheus endpoint: `{Addresses.API}/debug/metrics/prometheus` (default: `http://127.0.0.1:5001/debug/metrics/prometheus`).
**Quick command-line monitoring:** Use `ipfs provide stat` to view the current
state of the provider system. For real-time monitoring, run
`watch ipfs provide stat --all --compact` to see detailed statistics refreshed
continuously in a 2-column layout.
Different metrics are available depending on whether you use legacy mode (`SweepEnabled=false`) or sweep mode (`SweepEnabled=true`). See [Provide metrics documentation](https://github.com/ipfs/kubo/blob/master/docs/metrics.md#provide) for details.
**Long-term monitoring:** For in-depth or long-term monitoring, metrics are
exposed at the Prometheus endpoint: `{Addresses.API}/debug/metrics/prometheus`
(default: `http://127.0.0.1:5001/debug/metrics/prometheus`). Different metrics
are available depending on whether you use legacy mode (`SweepEnabled=false`) or
sweep mode (`SweepEnabled=true`). See [Provide metrics documentation](https://github.com/ipfs/kubo/blob/master/docs/metrics.md#provide)
for details.
To enable detailed debug logging for both providers, set:
**Debug logging:** For troubleshooting, enable detailed logging by setting:
```sh
GOLOG_LOG_LEVEL=error,provider=debug,dht/provider=debug
@ -1982,12 +2040,24 @@ GOLOG_LOG_LEVEL=error,provider=debug,dht/provider=debug
#### `Provide.DHT.Interval`
Sets how often to re-announce content to the DHT. Provider records on Amino DHT
expire after [`amino.DefaultProvideValidity`](https://github.com/libp2p/go-libp2p-kad-dht/blob/v0.34.0/amino/defaults.go#L40-L43),
also known as Provider Record Expiration Interval.
expire after [`amino.DefaultProvideValidity`](https://github.com/libp2p/go-libp2p-kad-dht/blob/v0.34.0/amino/defaults.go#L40-L43).
An interval of about half the expiration window ensures provider records
are refreshed well before they expire. This keeps your content continuously
discoverable accounting for network churn without overwhelming the network with too frequent announcements.
**Why this matters:** The interval must be shorter than the expiration window to
ensure provider records refresh before they expire. The default value is
approximately half of [`amino.DefaultProvideValidity`](https://github.com/libp2p/go-libp2p-kad-dht/blob/v0.34.0/amino/defaults.go#L40-L43),
which accounts for network churn and ensures records stay alive without
overwhelming the network with unnecessary announcements.
**With sweep mode enabled
([`Provide.DHT.SweepEnabled`](#providedhtsweepenabled)):** The system spreads
reprovide operations smoothly across this entire interval. Each keyspace region
is reprovided at scheduled times throughout the period, ensuring each region's
announcements complete before records expire.
**With legacy mode:** The system attempts to reprovide all CIDs as quickly as
possible at the start of each interval. If reproviding takes longer than this
interval (common with large datasets), the next cycle is skipped and provider
records may expire.
- If unset, it uses the implicit safe default.
- If set to the value `"0"` it will disable content reproviding to DHT.
@ -2048,6 +2118,7 @@ connections this setting can generate.
> users. The system will only use workers as needed - unused resources won't be
> consumed. Ensure you adjust the swarm [connection manager](#swarmconnmgr) and
> [resource manager](#swarmresourcemgr) configuration accordingly.
> See [Capacity Planning](https://github.com/ipfs/kubo/blob/master/docs/provide-stats.md#capacity-planning) for more details.
Default: `16`
@ -2055,32 +2126,55 @@ Type: `optionalInteger` (non-negative; `0` means unlimited number of workers)
#### `Provide.DHT.SweepEnabled`
Whether Provide Sweep is enabled. If not enabled, the legacy
[`boxo/provider`](https://github.com/ipfs/boxo/tree/main/provider) is used for
both provides and reprovides.
Enables the sweep provider for efficient content announcements. When disabled,
the legacy [`boxo/provider`](https://github.com/ipfs/boxo/tree/main/provider) is
used instead.
Provide Sweep is a resource efficient technique for advertising content to
the Amino DHT swarm. The Provide Sweep module tracks the keys that should be periodically reprovided in
the `Keystore`. It splits the keys into DHT keyspace regions by proximity (XOR
distance), and schedules when reprovides should happen in order to spread the
reprovide operation over time to avoid a spike in resource utilization. It
basically sweeps the keyspace _from left to right_ over the
[`Provide.DHT.Interval`](#providedhtinterval) time period, and reprovides keys
matching to the visited keyspace region.
**The legacy provider problem:** The legacy system processes CIDs one at a
time, requiring a separate DHT lookup (10-20 seconds each) to find the 20
closest peers for each CID. This sequential approach typically handles less
than 10,000 CID over 22h ([`Provide.DHT.Interval`](#providedhtinterval)). If
your node has more CIDs than can be reprovided within
[`Provide.DHT.Interval`](#providedhtinterval), provider records start expiring
after
[`amino.DefaultProvideValidity`](https://github.com/libp2p/go-libp2p-kad-dht/blob/v0.34.0/amino/defaults.go#L40-L43),
making content undiscoverable.
Provide Sweep aims at replacing the inefficient legacy `boxo/provider`
module, and is currently opt-in. You can compare the effectiveness of sweep mode vs legacy mode by monitoring the appropriate metrics (see [Monitoring Provide Operations](#monitoring-provide-operations) above).
**How sweep mode works:** The sweep provider divides the DHT keyspace into
regions based on keyspace prefixes. It estimates the Amino DHT size, calculates
how many regions are needed (sized to contain at least 20 peers each), then
schedules region processing evenly across
[`Provide.DHT.Interval`](#providedhtinterval). When processing a region, it
discovers the peers in that region once, then sends all provider records for
CIDs allocated to those peers in a batch. This batching is the key efficiency:
instead of N lookups for N CIDs, the number of lookups is bounded by a constant
fraction of the Amino DHT size (e.g., ~3,000 lookups when there are ~10,000 DHT
servers), regardless of how many CIDs you're providing.
Whenever new keys should be advertised to the Amino DHT, `kubo` calls
`StartProviding()`, triggering an initial `provide` operation for the given
keys. The keys will be added to the `Keystore` tracking which keys should be
reprovided and when they should be reprovided. Calling `StopProviding()`
removes the keys from the `Keystore`. However, it is currently tricky for
`kubo` to detect when a key should stop being advertised. Hence, `kubo` will
periodically refresh the `Keystore` at each [`Provide.DHT.Interval`](#providedhtinterval)
by providing it a channel of all the keys it is expected to contain according
to the [`Provide.Strategy`](#providestrategy). During this operation,
all keys in the `Keystore` are purged, and only the given ones remain scheduled.
**Efficiency gains:** For a node providing 100,000 CIDs, sweep mode reduces
lookups by 97% compared to legacy. The work spreads smoothly over time rather
than completing in bursts, preventing resource spikes and duplicate
announcements. Long-running nodes reprovide systematically just before records
would expire, keeping content continuously discoverable without wasting
bandwidth.
**Implementation details:** The sweep provider tracks CIDs in a persistent
keystore. New content added via `StartProviding()` enters the provide queue and
gets batched by keyspace region. The keystore is periodically refreshed at each
[`Provide.DHT.Interval`](#providedhtinterval) with CIDs matching
[`Provide.Strategy`](#providestrategy) to ensure only current content remains
scheduled. This handles cases where content is unpinned or removed.
**Persistent reprovide cycle state:** When Provide Sweep is enabled, the
reprovide cycle state is persisted to the datastore by default. On restart, Kubo
automatically resumes from where it left off. If the node was offline for an
extended period, all CIDs that haven't been reprovided within the configured
[`Provide.DHT.Interval`](#providedhtinterval) are immediately queued for
reproviding. Additionally, the provide queue is persisted on shutdown and
restored on startup, ensuring no pending provide operations are lost. If you
don't want to keep the persisted provider state from a previous run, you can
disable this behavior by setting [`Provide.DHT.ResumeEnabled`](#providedhtresumeenabled)
to `false`.
> <picture>
> <source media="(prefers-color-scheme: dark)" srcset="https://github.com/user-attachments/assets/f6e06b08-7fee-490c-a681-1bf440e16e27">
@ -2088,25 +2182,59 @@ all keys in the `Keystore` are purged, and only the given ones remain scheduled.
> <img alt="Reprovide Cycle Comparison" src="https://github.com/user-attachments/assets/e1662d7c-f1be-4275-a9ed-f2752fcdcabe">
> </picture>
>
> The diagram above visualizes the performance patterns:
> The diagram compares performance patterns:
>
> - **Legacy mode**: Individual (slow) provides per CID, can struggle with large datasets
> - **Sweep mode**: Even distribution matching the keyspace sweep described with low resource usage
> - **Accelerated DHT**: Hourly traffic spikes with high resource usage
> - **Legacy mode**: Sequential processing, one lookup per CID, struggles with large datasets
> - **Sweep mode**: Smooth distribution over time, batched lookups by keyspace region, predictable resource usage
> - **Accelerated DHT**: Hourly network crawls creating traffic spikes, high resource usage
>
> Sweep mode provides similar effectiveness to Accelerated DHT but with steady resource usage - better for machines with limited CPU, memory, or network bandwidth.
> Sweep mode achieves similar effectiveness to the Accelerated DHT client but with steady resource consumption.
You can compare the effectiveness of sweep mode vs legacy mode by monitoring the appropriate metrics (see [Monitoring Provide Operations](#monitoring-provide-operations) above).
> [!NOTE]
> This feature is opt-in for now, but will become the default in a future release.
> Eventually, this configuration flag will be removed once the feature is stable.
> This is the default provider system as of Kubo v0.39. To use the legacy provider instead, set `Provide.DHT.SweepEnabled=false`.
Default: `false`
Default: `true`
Type: `flag`
#### `Provide.DHT.ResumeEnabled`
Controls whether the provider resumes from its previous state on restart. Only
applies when `Provide.DHT.SweepEnabled` is true.
When enabled (the default), the provider persists its reprovide cycle state and
provide queue to the datastore, and restores them on restart. This ensures:
- The reprovide cycle continues from where it left off instead of starting over
- Any CIDs in the provide queue during shutdown are restored and provided after
restart
- CIDs that missed their reprovide window while the node was offline are queued
for immediate reproviding
When disabled, the provider starts fresh on each restart, discarding any
previous reprovide cycle state and provide queue. On a fresh start, all CIDs
matching the [`Provide.Strategy`](#providestrategy) will be provided ASAP (as
burst provides), and then keyspace regions are reprovided according to the
regular schedule starting from the beginning of the reprovide cycle.
> [!NOTE]
> Disabling this option means the provider will provide all content matching
> your strategy on every restart (which can be resource-intensive for large
> datasets), then start from the beginning of the reprovide cycle. For nodes
> with large datasets or frequent restarts, keeping this enabled (the default)
> is recommended for better resource efficiency and more consistent reproviding
> behavior.
Default: `true`
Type: `flag`
#### `Provide.DHT.DedicatedPeriodicWorkers`
Number of workers dedicated to periodic keyspace region reprovides. Only applies when `Provide.DHT.SweepEnabled` is true.
Number of workers dedicated to periodic keyspace region reprovides. Only
applies when `Provide.DHT.SweepEnabled` is true.
Among the [`Provide.DHT.MaxWorkers`](#providedhtmaxworkers), this
number of workers will be dedicated to the periodic region reprovide only. The sum of
@ -2167,7 +2295,13 @@ from that keyspace region until all provider records are assigned.
This option defines how many such connections can be open concurrently by a
single worker.
Default: `16`
> [!NOTE]
> Increasing this value can speed up the provide operation, at the cost of
> opening more simultaneous connections to DHT servers. A keyspace typically
> has less than 60 peers, so you may hit a performance ceiling beyond which
> increasing this value has no effect.
Default: `20`
Type: `optionalInteger` (non-negative)
@ -3035,7 +3169,7 @@ It is possible to inspect the runtime limits via `ipfs swarm resources --help`.
> To set memory limit for the entire Kubo process, use [`GOMEMLIMIT` environment variable](http://web.archive.org/web/20240222201412/https://kupczynski.info/posts/go-container-aware/) which all Go programs recognize, and then set `Swarm.ResourceMgr.MaxMemory` to less than your custom `GOMEMLIMIT`.
Default: `[TOTAL_SYSTEM_MEMORY]/2`
Type: `optionalBytes`
Type: [`optionalBytes`](#optionalbytes)
#### `Swarm.ResourceMgr.MaxFileDescriptors`
@ -3488,6 +3622,38 @@ Default: `sha2-256`
Type: `optionalString`
### `Import.FastProvideRoot`
Immediately provide root CIDs to the DHT in addition to the regular provide queue.
This complements the sweep provider system: fast-provide handles the urgent case (root CIDs that users share and reference), while the sweep provider efficiently provides all blocks according to the `Provide.Strategy` over time. Together, they optimize for both immediate discoverability of newly imported content and efficient resource usage for complete DAG provides.
When disabled, only the sweep provider's queue is used.
This setting applies to both `ipfs add` and `ipfs dag import` commands and can be overridden per-command with the `--fast-provide-root` flag.
Ignored when DHT is not available for routing (e.g., `Routing.Type=none` or delegated-only configurations).
Default: `true`
Type: `flag`
### `Import.FastProvideWait`
Wait for the immediate root CID provide to complete before returning.
When enabled, the command blocks until the provide completes, ensuring guaranteed discoverability before returning. When disabled (default), the provide happens asynchronously in the background without blocking the command.
Use this when you need certainty that content is discoverable before the command returns (e.g., sharing a link immediately after adding).
This setting applies to both `ipfs add` and `ipfs dag import` commands and can be overridden per-command with the `--fast-provide-wait` flag.
Ignored when DHT is not available for routing (e.g., `Routing.Type=none` or delegated-only configurations).
Default: `false`
Type: `flag`
### `Import.BatchMaxNodes`
The maximum number of nodes in a write-batch. The total size of the batch is limited by `BatchMaxnodes` and `BatchMaxSize`.
@ -3588,7 +3754,7 @@ Commands affected: `ipfs add`, `ipfs daemon` (globally overrides [`boxo/ipld/uni
Default: `256KiB` (may change, inspect `DefaultUnixFSHAMTDirectorySizeThreshold` to confirm)
Type: `optionalBytes`
Type: [`optionalBytes`](#optionalbytes)
## `Version`
@ -3905,6 +4071,7 @@ an implicit default when missing from the config file:
- a string value indicating the number of bytes, including human readable representations:
- [SI sizes](https://en.wikipedia.org/wiki/Metric_prefix#List_of_SI_prefixes) (metric units, powers of 1000), e.g. `1B`, `2kB`, `3MB`, `4GB`, `5TB`, …)
- [IEC sizes](https://en.wikipedia.org/wiki/Binary_prefix#IEC_prefixes) (binary units, powers of 1024), e.g. `1B`, `2KiB`, `3MiB`, `4GiB`, `5TiB`, …)
- a raw number (will be interpreted as bytes, e.g. `1048576` for 1MiB)
### `optionalString`

View File

@ -7,9 +7,9 @@ go 1.25
replace github.com/ipfs/kubo => ./../../..
require (
github.com/ipfs/boxo v0.35.1
github.com/ipfs/boxo v0.35.2
github.com/ipfs/kubo v0.0.0-00010101000000-000000000000
github.com/libp2p/go-libp2p v0.43.0
github.com/libp2p/go-libp2p v0.45.0
github.com/multiformats/go-multiaddr v0.16.1
)
@ -34,7 +34,7 @@ require (
github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b // indirect
github.com/cockroachdb/errors v1.11.3 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble/v2 v2.1.0 // indirect
github.com/cockroachdb/pebble/v2 v2.1.2 // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
@ -53,7 +53,7 @@ require (
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.10 // indirect
github.com/gammazero/chanqueue v1.1.1 // indirect
github.com/gammazero/deque v1.1.0 // indirect
github.com/gammazero/deque v1.2.0 // indirect
github.com/getsentry/sentry-go v0.27.0 // indirect
github.com/go-jose/go-jose/v4 v4.1.1 // indirect
github.com/go-logr/logr v1.4.3 // indirect
@ -75,15 +75,15 @@ require (
github.com/ipfs/bbloom v0.0.4 // indirect
github.com/ipfs/go-bitfield v1.1.0 // indirect
github.com/ipfs/go-block-format v0.2.3 // indirect
github.com/ipfs/go-cid v0.5.0 // indirect
github.com/ipfs/go-cid v0.6.0 // indirect
github.com/ipfs/go-cidutil v0.1.0 // indirect
github.com/ipfs/go-datastore v0.9.0 // indirect
github.com/ipfs/go-ds-badger v0.3.4 // indirect
github.com/ipfs/go-ds-flatfs v0.5.5 // indirect
github.com/ipfs/go-ds-leveldb v0.5.2 // indirect
github.com/ipfs/go-ds-measure v0.2.2 // indirect
github.com/ipfs/go-ds-pebble v0.5.3 // indirect
github.com/ipfs/go-dsqueue v0.1.0 // indirect
github.com/ipfs/go-ds-pebble v0.5.7 // indirect
github.com/ipfs/go-dsqueue v0.1.1 // indirect
github.com/ipfs/go-fs-lock v0.1.1 // indirect
github.com/ipfs/go-ipfs-cmds v0.15.0 // indirect
github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect
@ -93,7 +93,7 @@ require (
github.com/ipfs/go-ipld-format v0.6.3 // indirect
github.com/ipfs/go-ipld-git v0.1.1 // indirect
github.com/ipfs/go-ipld-legacy v0.2.2 // indirect
github.com/ipfs/go-log/v2 v2.8.2 // indirect
github.com/ipfs/go-log/v2 v2.9.0 // indirect
github.com/ipfs/go-metrics-interface v0.3.0 // indirect
github.com/ipfs/go-peertaskqueue v0.8.2 // indirect
github.com/ipfs/go-test v0.2.3 // indirect
@ -115,7 +115,7 @@ require (
github.com/libp2p/go-doh-resolver v0.5.0 // indirect
github.com/libp2p/go-flow-metrics v0.3.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
github.com/libp2p/go-libp2p-kad-dht v0.35.1 // indirect
github.com/libp2p/go-libp2p-kad-dht v0.36.0 // indirect
github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect
github.com/libp2p/go-libp2p-pubsub v0.14.2 // indirect
github.com/libp2p/go-libp2p-pubsub-router v0.6.0 // indirect
@ -171,7 +171,7 @@ require (
github.com/pion/webrtc/v4 v4.1.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/polydawn/refmt v0.89.0 // indirect
github.com/probe-lab/go-libdht v0.3.0 // indirect
github.com/probe-lab/go-libdht v0.4.0 // indirect
github.com/prometheus/client_golang v1.23.2 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.66.1 // indirect
@ -212,14 +212,14 @@ require (
go.uber.org/zap/exp v0.3.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
golang.org/x/crypto v0.43.0 // indirect
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b // indirect
golang.org/x/crypto v0.45.0 // indirect
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect
golang.org/x/mod v0.29.0 // indirect
golang.org/x/net v0.46.0 // indirect
golang.org/x/sync v0.17.0 // indirect
golang.org/x/sys v0.37.0 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 // indirect
golang.org/x/text v0.30.0 // indirect
golang.org/x/text v0.31.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/tools v0.38.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect

View File

@ -95,8 +95,8 @@ github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZe
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA=
github.com/cockroachdb/pebble/v2 v2.1.0 h1:6KZvjSpWcEXZUvlLzTRC7T1A2G7r+bFskIzggklxixo=
github.com/cockroachdb/pebble/v2 v2.1.0/go.mod h1:Aza05DCCc05ghIJZkB4Q/axv/JK9wx5cFwWcnhG0eGw=
github.com/cockroachdb/pebble/v2 v2.1.2 h1:IwYt+Y2Cdw6egblwk1kWzdmJvD2680t5VK/3i0BJ6IA=
github.com/cockroachdb/pebble/v2 v2.1.2/go.mod h1:Aza05DCCc05ghIJZkB4Q/axv/JK9wx5cFwWcnhG0eGw=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 h1:Nua446ru3juLHLZd4AwKNzClZgL1co3pUPGv3o8FlcA=
@ -165,8 +165,8 @@ github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIp
github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/gammazero/chanqueue v1.1.1 h1:n9Y+zbBxw2f7uUE9wpgs0rOSkP/I/yhDLiNuhyVjojQ=
github.com/gammazero/chanqueue v1.1.1/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc=
github.com/gammazero/deque v1.1.0 h1:OyiyReBbnEG2PP0Bnv1AASLIYvyKqIFN5xfl1t8oGLo=
github.com/gammazero/deque v1.1.0/go.mod h1:JVrR+Bj1NMQbPnYclvDlvSX0nVGReLrQZ0aUMuWLctg=
github.com/gammazero/deque v1.2.0 h1:scEFO8Uidhw6KDU5qg1HA5fYwM0+us2qdeJqm43bitU=
github.com/gammazero/deque v1.2.0/go.mod h1:JVrR+Bj1NMQbPnYclvDlvSX0nVGReLrQZ0aUMuWLctg=
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM=
@ -291,8 +291,8 @@ github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 h1:OqNqsGZPX8zh3eFMO8Lf8EHRRnSGBMqcd
github.com/ipfs-shipyard/nopfs/ipfs v0.25.0/go.mod h1:BxhUdtBgOXg1B+gAPEplkg/GpyTZY+kCMSfsJvvydqU=
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
github.com/ipfs/boxo v0.35.1 h1:MGL3aaaxnu/h9KKq+X/6FxapI/qlDmnRNk33U7tz/fQ=
github.com/ipfs/boxo v0.35.1/go.mod h1:/p1XZVp+Yzv78RuKjb3BESBYEQglRgDrWvmN5mFrsus=
github.com/ipfs/boxo v0.35.2 h1:0QZJJh6qrak28abENOi5OA8NjBnZM4p52SxeuIDqNf8=
github.com/ipfs/boxo v0.35.2/go.mod h1:bZn02OFWwJtY8dDW9XLHaki59EC5o+TGDECXEbe1w8U=
github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA=
github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU=
github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk=
@ -301,8 +301,8 @@ github.com/ipfs/go-block-format v0.2.3/go.mod h1:WJaQmPAKhD3LspLixqlqNFxiZ3BZ3xg
github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M=
github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
github.com/ipfs/go-cid v0.6.0 h1:DlOReBV1xhHBhhfy/gBNNTSyfOM6rLiIx9J7A4DGf30=
github.com/ipfs/go-cid v0.6.0/go.mod h1:NC4kS1LZjzfhK40UGmpXv5/qD2kcMzACYJNntCUiDhQ=
github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q=
github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA=
github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
@ -321,10 +321,10 @@ github.com/ipfs/go-ds-leveldb v0.5.2 h1:6nmxlQ2zbp4LCNdJVsmHfs9GP0eylfBNxpmY1csp
github.com/ipfs/go-ds-leveldb v0.5.2/go.mod h1:2fAwmcvD3WoRT72PzEekHBkQmBDhc39DJGoREiuGmYo=
github.com/ipfs/go-ds-measure v0.2.2 h1:4kwvBGbbSXNYe4ANlg7qTIYoZU6mNlqzQHdVqICkqGI=
github.com/ipfs/go-ds-measure v0.2.2/go.mod h1:b/87ak0jMgH9Ylt7oH0+XGy4P8jHx9KG09Qz+pOeTIs=
github.com/ipfs/go-ds-pebble v0.5.3 h1:4esRt82+LkenUnIWyUCghR1gzRfqeCYGGKX/hRmabro=
github.com/ipfs/go-ds-pebble v0.5.3/go.mod h1:pn2bxYkAE7JRkbAF7D8xuEEFD3oOQ7QqQZPWkAVBs58=
github.com/ipfs/go-dsqueue v0.1.0 h1:OrahKDtT/Q+iMgKaM9XWdxrYPVASFpTuLah8QpKjboc=
github.com/ipfs/go-dsqueue v0.1.0/go.mod h1:iLNkodSOSKTLn0gCvL9ikArz5rZfNh8F9/BRvHe7RbY=
github.com/ipfs/go-ds-pebble v0.5.7 h1:4PQI46y3fjjxUTgHwYqcOVyoxiU6v1sqN6ONeRXGQTM=
github.com/ipfs/go-ds-pebble v0.5.7/go.mod h1:rsIgXE2qN+VfHKBin2cOOGFTZ/Agor6i8wBWA6ihbr0=
github.com/ipfs/go-dsqueue v0.1.1 h1:6PQlHDyf9PSTN69NmwUir5+0is3tU0vRJj8zLlgK8Mc=
github.com/ipfs/go-dsqueue v0.1.1/go.mod h1:Xxg353WSwwzYn3FGSzZ+taSQII3pIZ+EJC8/oWRDM10=
github.com/ipfs/go-fs-lock v0.1.1 h1:TecsP/Uc7WqYYatasreZQiP9EGRy4ZnKoG4yXxR33nw=
github.com/ipfs/go-fs-lock v0.1.1/go.mod h1:2goSXMCw7QfscHmSe09oXiR34DQeUdm+ei+dhonqly0=
github.com/ipfs/go-ipfs-cmds v0.15.0 h1:nQDgKadrzyiFyYoZMARMIoVoSwe3gGTAfGvrWLeAQbQ=
@ -349,8 +349,8 @@ github.com/ipfs/go-ipld-git v0.1.1/go.mod h1:+VyMqF5lMcJh4rwEppV0e6g4nCCHXThLYYD
github.com/ipfs/go-ipld-legacy v0.2.2 h1:DThbqCPVLpWBcGtU23KDLiY2YRZZnTkXQyfz8aOfBkQ=
github.com/ipfs/go-ipld-legacy v0.2.2/go.mod h1:hhkj+b3kG9b2BcUNw8IFYAsfeNo8E3U7eYlWeAOPyDU=
github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
github.com/ipfs/go-log/v2 v2.8.2 h1:nVG4nNHUwwI/sTs9Bi5iE8sXFQwXs3AjkkuWhg7+Y2I=
github.com/ipfs/go-log/v2 v2.8.2/go.mod h1:UhIYAwMV7Nb4ZmihUxfIRM2Istw/y9cAk3xaK+4Zs2c=
github.com/ipfs/go-log/v2 v2.9.0 h1:l4b06AwVXwldIzbVPZy5z7sKp9lHFTX0KWfTBCtHaOk=
github.com/ipfs/go-log/v2 v2.9.0/go.mod h1:UhIYAwMV7Nb4ZmihUxfIRM2Istw/y9cAk3xaK+4Zs2c=
github.com/ipfs/go-metrics-interface v0.3.0 h1:YwG7/Cy4R94mYDUuwsBfeziJCVm9pBMJ6q/JR9V40TU=
github.com/ipfs/go-metrics-interface v0.3.0/go.mod h1:OxxQjZDGocXVdyTPocns6cOLwHieqej/jos7H4POwoY=
github.com/ipfs/go-peertaskqueue v0.8.2 h1:PaHFRaVFdxQk1Qo3OKiHPYjmmusQy7gKQUaL8JDszAU=
@ -424,14 +424,14 @@ github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZ
github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784=
github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo=
github.com/libp2p/go-libp2p v0.43.0 h1:b2bg2cRNmY4HpLK8VHYQXLX2d3iND95OjodLFymvqXU=
github.com/libp2p/go-libp2p v0.43.0/go.mod h1:IiSqAXDyP2sWH+J2gs43pNmB/y4FOi2XQPbsb+8qvzc=
github.com/libp2p/go-libp2p v0.45.0 h1:Pdhr2HsFXaYjtfiNcBP4CcRUONvbMFdH3puM9vV4Tiw=
github.com/libp2p/go-libp2p v0.45.0/go.mod h1:NovCojezAt4dnDd4fH048K7PKEqH0UFYYqJRjIIu8zc=
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g=
github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw=
github.com/libp2p/go-libp2p-kad-dht v0.35.1 h1:RQglhc9OxqDwlFFdhQMwKxIPBIBfGsleROnK5hqVsoE=
github.com/libp2p/go-libp2p-kad-dht v0.35.1/go.mod h1:1oCXzkkBiYh3d5cMWLpInSOZ6am2AlpC4G+GDcZFcE0=
github.com/libp2p/go-libp2p-kad-dht v0.36.0 h1:7QuXhV36+Vyj+L6A7mrYkn2sYLrbRcbjvsYDu/gXhn8=
github.com/libp2p/go-libp2p-kad-dht v0.36.0/go.mod h1:O24LxTH9Rt3I5XU8nmiA9VynS4TrTwAyj+zBJKB05vQ=
github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio=
github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s=
github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4=
@ -465,6 +465,8 @@ github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/marcopolo/simnet v0.0.1 h1:rSMslhPz6q9IvJeFWDoMGxMIrlsbXau3NkuIXHGJxfg=
github.com/marcopolo/simnet v0.0.1/go.mod h1:WDaQkgLAjqDUEBAOXz22+1j6wXKfGlC5sD5XWt3ddOs=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
@ -628,8 +630,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4=
github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw=
github.com/probe-lab/go-libdht v0.3.0 h1:Q3ZXK8wCjZvgeHSTtRrppXobXY/KHPLZJfc+cdTTyqA=
github.com/probe-lab/go-libdht v0.3.0/go.mod h1:hamw22kI6YkPQFGy5P6BrWWDrgE9ety5Si8iWAyuDvc=
github.com/probe-lab/go-libdht v0.4.0 h1:LAqHuko/owRW6+0cs5wmJXbHzg09EUMJEh5DI37yXqo=
github.com/probe-lab/go-libdht v0.4.0/go.mod h1:hamw22kI6YkPQFGy5P6BrWWDrgE9ety5Si8iWAyuDvc=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
@ -840,8 +842,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -850,8 +852,8 @@ golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b h1:18qgiDvlvH7kk8Ioa8Ov+K6xCi0GMvmGfGW0sgd/SYA=
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY=
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -911,8 +913,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -931,8 +933,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -979,8 +981,8 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 h1:LvzTn0GQhWuvKH/kVRS3R3bVAsdQWI7hvfLHGgh9+lU=
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
@ -1002,8 +1004,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=

View File

@ -59,7 +59,7 @@ Metrics for the legacy provider system when `Provide.DHT.SweepEnabled=false`:
Metrics for the DHT provider system when `Provide.DHT.SweepEnabled=true`:
- `total_provide_count_total` - Counter: total successful provide operations since node startup (includes both one-time provides and periodic provides done on `Provide.DHT.Interval`)
- `provider_provides_total` - Counter: total successful provide operations since node startup (includes both one-time provides and periodic provides done on `Provide.DHT.Interval`)
> [!NOTE]
> These metrics are exposed by [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht/). You can enable debug logging for DHT provider activity with `GOLOG_LOG_LEVEL=dht/provider=debug`.

285
docs/provide-stats.md Normal file
View File

@ -0,0 +1,285 @@
# Provide Stats
The `ipfs provide stat` command gives you statistics about your local provide
system. This file provides a detailed explanation of the metrics reported by
this command.
## Understanding the Metrics
The statistics are organized into three types of measurements:
### Per-worker rates
Metrics like "CIDs reprovided/min/worker" measure the throughput of a single
worker processing one region. To estimate total system throughput, multiply by
the number of active workers of that type (see [Workers stats](#workers-stats)).
Example: If "CIDs reprovided/min/worker" shows 100 and you have 10 active
periodic workers, your total reprovide throughput is approximately 1,000
CIDs/min.
### Per-region averages
Metrics like "Avg CIDs/reprovide" measure properties of the work units (keyspace
regions). These represent the average size or characteristics of a region, not a
rate. Do NOT multiply these by worker count.
Example: "Avg CIDs/reprovide: 250,000" means each region contains an average of
250,000 CIDs that get reprovided together as a batch.
### System totals
Metrics like "Total CIDs provided" are cumulative counts since node startup.
These aggregate all work across all workers over time.
## Connectivity
### Status
Current connectivity status (`online`, `disconnected`, or `offline`) and when
it last changed (see [provide connectivity
status](./config.md#providedhtofflinedelay)).
## Queues
### Provide queue
Number of CIDs waiting for initial provide, and the number of keyspace regions
they're grouped into.
### Reprovide queue
Number of regions with overdue reprovides. These regions missed their scheduled
reprovide time and will be processed as soon as possible. If decreasing, the
node is recovering from downtime. If increasing, either the node is offline or
the provide system needs more workers (see
[`Provide.DHT.MaxWorkers`](./config.md#providedhtmaxworkers)
and
[`Provide.DHT.DedicatedPeriodicWorkers`](./config.md#providedhtdedicatedperiodicworkers)).
## Schedule
### CIDs scheduled
Total CIDs scheduled for reprovide.
### Regions scheduled
Number of keyspace regions scheduled for reprovide. Each CID is mapped to a
specific region, and all CIDs within the same region are reprovided together as
a batch for efficient processing.
### Avg prefix length
Average length of binary prefixes identifying the scheduled regions. Each
keyspace region is identified by a binary prefix, and this shows the average
prefix length across all regions in the schedule. Longer prefixes indicate the
keyspace is divided into more regions (because there are more DHT servers in the
swarm to distribute records across).
### Next region prefix
Keyspace prefix of the next region to be reprovided.
### Next region reprovide
When the next region is scheduled to be reprovided.
## Timings
### Uptime
How long the provide system has been running since Kubo started, along with the
start timestamp.
### Current time offset
Elapsed time in the current reprovide cycle, showing cycle progress (e.g., '11h'
means 11 hours into a 22-hour cycle, roughly halfway through).
### Cycle started
When the current reprovide cycle began.
### Reprovide interval
How often each CID is reprovided (the complete cycle duration).
## Network
### Avg record holders
Average number of provider records successfully sent for each CID to distinct
DHT servers. In practice, this is often lower than the [replication
factor](#replication-factor) due to unreachable peers or timeouts. Matching the
replication factor would indicate all DHT servers are reachable.
Note: this counts successful sends; some DHT servers may have gone offline
afterward, so actual availability may be lower.
### Peers swept
Number of DHT servers to which we tried to send provider records in the last
reprovide cycle (sweep). Excludes peers contacted during initial provides or
DHT lookups.
### Full keyspace coverage
Whether provider records were sent to all DHT servers in the swarm during the
last reprovide cycle. If true, [peers swept](#peers-swept) approximates the
total DHT swarm size over the last [reprovide interval](#reprovide-interval).
### Reachable peers
Number and percentage of peers to which we successfully sent all provider
records assigned to them during the last reprovide cycle.
### Avg region size
Average number of DHT servers per keyspace region.
### Replication factor
Target number of DHT servers to receive each provider record.
## Operations
### Ongoing provides
Number of CIDs and regions currently being provided for the first time. More
CIDs than regions indicates efficient batching. Each region provide uses a
[burst
worker](./config.md#providedhtdedicatedburstworkers).
### Ongoing reprovides
Number of CIDs and regions currently being reprovided. Each region reprovide
uses a [periodic
worker](./config.md#providedhtdedicatedperiodicworkers).
### Total CIDs provided
Total number of provide operations since node startup (includes both provides
and reprovides).
### Total records provided
Total provider records successfully sent to DHT servers since startup (includes
reprovides).
### Total provide errors
Number of failed region provide/reprovide operations since startup. Failed
regions are automatically retried unless the node is offline.
### CIDs provided/min/worker
Average rate of initial provides per minute per worker during the last
reprovide cycle (excludes reprovides). Each worker handles one keyspace region
at a time, providing all CIDs in that region. This measures the throughput of a
single worker only.
To estimate total system provide throughput, multiply by the number of active
burst workers shown in [Workers stats](#workers-stats) (Burst > Active).
Note: This rate only counts active time when initial provides are being
processed. If workers are idle, actual throughput may be lower.
### CIDs reprovided/min/worker
Average rate of reprovides per minute per worker during the last reprovide
cycle (excludes initial provides). Each worker handles one keyspace region at a
time, reproviding all CIDs in that region. This measures the throughput of a
single worker only.
To estimate total system reprovide throughput, multiply by the number of active
periodic workers shown in [Workers stats](#workers-stats) (Periodic > Active).
Example: If this shows 100 CIDs/min and you have 10 active periodic workers,
your total reprovide throughput is approximately 1,000 CIDs/min.
Note: This rate only counts active time when regions are being reprovided. If
workers are idle due to network issues or queue exhaustion, actual throughput
may be lower.
### Region reprovide duration
Average time to reprovide all CIDs in a region during the last cycle.
### Avg CIDs/reprovide
Average number of CIDs per region during the last reprovide cycle.
This measures the average size of a region (how many CIDs are batched together),
not a throughput rate. Do NOT multiply this by worker count.
Combined with [Region reprovide duration](#region-reprovide-duration), this
helps estimate per-worker throughput: dividing Avg CIDs/reprovide by Region
reprovide duration gives CIDs/min/worker.
### Regions reprovided (last cycle)
Number of regions reprovided in the last cycle.
## Workers
### Active workers
Number of workers currently processing provide or reprovide operations.
### Free workers
Number of idle workers not reserved for periodic or burst tasks.
### Workers stats
Breakdown of worker status by type (periodic for scheduled reprovides, burst for
initial provides). For each type:
- **Active**: Currently processing operations (use this count when calculating total throughput from per-worker rates)
- **Dedicated**: Reserved for this type
- **Available**: Idle dedicated workers + [free workers](#free-workers)
- **Queued**: 0 or 1 (workers acquired only when needed)
The number of active workers determines your total system throughput. For
example, if you have 10 active periodic workers, multiply
[CIDs reprovided/min/worker](#cids-reprovidedminworker) by 10 to estimate total
reprovide throughput.
See [provide queue](#provide-queue) and [reprovide queue](#reprovide-queue) for
regions waiting to be processed.
### Max connections/worker
Maximum concurrent DHT server connections per worker when sending provider
records for a region.
## Capacity Planning
### Estimating if your system can keep up with the reprovide schedule
To check if your provide system has sufficient capacity:
1. Calculate required throughput:
- Required CIDs/min = [CIDs scheduled](#cids-scheduled) / ([Reprovide interval](#reprovide-interval) in minutes)
- Example: 67M CIDs / (22 hours × 60 min) = 50,758 CIDs/min needed
2. Calculate actual throughput:
- Actual CIDs/min = [CIDs reprovided/min/worker](#cids-reprovidedminworker) × Active periodic workers
- Example: 100 CIDs/min/worker × 256 active workers = 25,600 CIDs/min
3. Compare:
- If actual < required: System is underprovisioned, increase [MaxWorkers](./config.md#providedhtmaxworkers) or [DedicatedPeriodicWorkers](./config.md#providedhtdedicatedperiodicworkers)
- If actual > required: System has excess capacity
- If [Reprovide queue](#reprovide-queue) is growing: System is falling behind
### Understanding worker utilization
- High active workers with growing reprovide queue: Need more workers or network connectivity is limiting throughput
- Low active workers with non-empty reprovide queue: Workers may be waiting for network or DHT operations
- Check [Reachable peers](#reachable-peers) to diagnose network connectivity issues
## See Also
- [Provide configuration reference](./config.md#provide)
- [Provide metrics for Prometheus](./metrics.md#provide)

View File

@ -47,25 +47,51 @@ Or in your IPFS config file:
The telemetry plugin collects the following anonymized data:
### General Information
- **Agent version**: The version of Kubo being used.
- **Platform details**: Operating system, architecture, and container status.
- **Uptime**: How long the node has been running, categorized into buckets.
- **Repo size**: Categorized into buckets (e.g., 1GB, 5GB, 10GB, etc.).
- **UUID**: Anonymous identifier for this node
- **Agent version**: Kubo version string
- **Private network**: Whether running in a private IPFS network
- **Repository size**: Categorized into privacy-preserving buckets (1GB, 5GB, 10GB, 100GB, 500GB, 1TB, 10TB, >10TB)
- **Uptime**: Categorized into privacy-preserving buckets (1d, 2d, 3d, 7d, 14d, 30d, >30d)
### Routing & Discovery
- **Custom bootstrap peers**: Whether custom `Bootstrap` peers are configured
- **Routing type**: The `Routing.Type` configured for the node
- **Accelerated DHT client**: Whether `Routing.AcceleratedDHTClient` is enabled
- **Delegated routing count**: Number of `Routing.DelegatedRouters` configured
- **AutoConf enabled**: Whether `AutoConf.Enabled` is set
- **Custom AutoConf URL**: Whether custom `AutoConf.URL` is configured
- **mDNS**: Whether `Discovery.MDNS.Enabled` is set
### Content Providing
- **Provide and Reprovide strategy**: The `Provide.Strategy` configured
- **Sweep-based provider**: Whether `Provide.DHT.SweepEnabled` is set
- **Custom Interval**: Whether custom `Provide.DHT.Interval` is configured
- **Custom MaxWorkers**: Whether custom `Provide.DHT.MaxWorkers` is configured
### Network Configuration
- **Private network**: Whether the node is running in a private network.
- **Bootstrap peers**: Whether custom bootstrap peers are used.
- **Routing type**: Whether the node uses DHT, IPFS, or a custom routing setup.
- **AutoNAT settings**: Whether AutoNAT is enabled and its reachability status.
- **AutoConf settings**: Whether AutoConf is enabled and whether a custom URL is used.
- **Swarm settings**: Whether hole punching is enabled, and whether public IP addresses are used.
### TLS and Discovery
- **AutoTLS settings**: Whether WSS is enabled and whether a custom domain suffix is used.
- **Discovery settings**: Whether mDNS is enabled.
- **AutoNAT service mode**: The `AutoNAT.ServiceMode` configured
- **AutoNAT reachability**: Current reachability status determined by AutoNAT
- **Hole punching**: Whether `Swarm.EnableHolePunching` is enabled
- **Circuit relay addresses**: Whether the node advertises circuit relay addresses
- **Public IPv4 addresses**: Whether the node has public IPv4 addresses
- **Public IPv6 addresses**: Whether the node has public IPv6 addresses
- **AutoWSS**: Whether `AutoTLS.AutoWSS` is enabled
- **Custom domain suffix**: Whether custom `AutoTLS.DomainSuffix` is configured
### Reprovider Strategy
- The strategy used for reprovider (e.g., "all", "pinned"...).
### Platform Information
- **Operating system**: The OS the node is running on
- **CPU architecture**: The architecture the node is running on
- **Container detection**: Whether the node is running inside a container
- **VM detection**: Whether the node is running inside a virtual machine
### Code Reference
Data is organized in the `LogEvent` struct at [`plugin/plugins/telemetry/telemetry.go`](https://github.com/ipfs/kubo/blob/master/plugin/plugins/telemetry/telemetry.go). This struct is the authoritative source of truth for all telemetry data, including privacy-preserving buckets for repository size and uptime. Note that this documentation may not always be up-to-date - refer to the code for the current implementation.
---

View File

@ -528,13 +528,6 @@ func (d *Directory) Rename(ctx context.Context, req *fuse.RenameRequest, newDir
return nil
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
// to check that out Node implements all the interfaces we want.
type ipnsRoot interface {
fs.Node

34
go.mod
View File

@ -11,7 +11,7 @@ require (
github.com/cenkalti/backoff/v4 v4.3.0
github.com/ceramicnetwork/go-dag-jose v0.1.1
github.com/cheggaaa/pb v1.0.29
github.com/cockroachdb/pebble/v2 v2.1.0
github.com/cockroachdb/pebble/v2 v2.1.2
github.com/coreos/go-systemd/v22 v22.5.0
github.com/dustin/go-humanize v1.0.1
github.com/elgris/jsondiff v0.0.0-20160530203242-765b5c24c302
@ -22,9 +22,9 @@ require (
github.com/hashicorp/go-version v1.7.0
github.com/ipfs-shipyard/nopfs v0.0.14
github.com/ipfs-shipyard/nopfs/ipfs v0.25.0
github.com/ipfs/boxo v0.35.1
github.com/ipfs/boxo v0.35.2
github.com/ipfs/go-block-format v0.2.3
github.com/ipfs/go-cid v0.5.0
github.com/ipfs/go-cid v0.6.0
github.com/ipfs/go-cidutil v0.1.0
github.com/ipfs/go-datastore v0.9.0
github.com/ipfs/go-detect-race v0.0.1
@ -32,14 +32,14 @@ require (
github.com/ipfs/go-ds-flatfs v0.5.5
github.com/ipfs/go-ds-leveldb v0.5.2
github.com/ipfs/go-ds-measure v0.2.2
github.com/ipfs/go-ds-pebble v0.5.3
github.com/ipfs/go-ds-pebble v0.5.7
github.com/ipfs/go-fs-lock v0.1.1
github.com/ipfs/go-ipfs-cmds v0.15.0
github.com/ipfs/go-ipld-cbor v0.2.1
github.com/ipfs/go-ipld-format v0.6.3
github.com/ipfs/go-ipld-git v0.1.1
github.com/ipfs/go-ipld-legacy v0.2.2
github.com/ipfs/go-log/v2 v2.8.2
github.com/ipfs/go-log/v2 v2.9.0
github.com/ipfs/go-metrics-interface v0.3.0
github.com/ipfs/go-metrics-prometheus v0.1.0
github.com/ipfs/go-test v0.2.3
@ -51,9 +51,9 @@ require (
github.com/jbenet/go-temp-err-catcher v0.1.0
github.com/julienschmidt/httprouter v1.3.0
github.com/libp2p/go-doh-resolver v0.5.0
github.com/libp2p/go-libp2p v0.43.0
github.com/libp2p/go-libp2p v0.45.0
github.com/libp2p/go-libp2p-http v0.5.0
github.com/libp2p/go-libp2p-kad-dht v0.35.1
github.com/libp2p/go-libp2p-kad-dht v0.36.0
github.com/libp2p/go-libp2p-kbucket v0.8.0
github.com/libp2p/go-libp2p-pubsub v0.14.2
github.com/libp2p/go-libp2p-pubsub-router v0.6.0
@ -69,6 +69,7 @@ require (
github.com/multiformats/go-multihash v0.2.3
github.com/opentracing/opentracing-go v1.2.0
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
github.com/probe-lab/go-libdht v0.4.0
github.com/prometheus/client_golang v1.23.2
github.com/stretchr/testify v1.11.1
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d
@ -87,11 +88,11 @@ require (
go.uber.org/dig v1.19.0
go.uber.org/fx v1.24.0
go.uber.org/zap v1.27.0
golang.org/x/crypto v0.43.0
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b
golang.org/x/crypto v0.45.0
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546
golang.org/x/mod v0.29.0
golang.org/x/sync v0.17.0
golang.org/x/sys v0.37.0
golang.org/x/sync v0.18.0
golang.org/x/sys v0.38.0
google.golang.org/protobuf v1.36.10
)
@ -129,7 +130,7 @@ require (
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/gabriel-vasile/mimetype v1.4.10 // indirect
github.com/gammazero/chanqueue v1.1.1 // indirect
github.com/gammazero/deque v1.1.0 // indirect
github.com/gammazero/deque v1.2.0 // indirect
github.com/getsentry/sentry-go v0.27.0 // indirect
github.com/go-jose/go-jose/v4 v4.1.1 // indirect
github.com/go-kit/log v0.2.1 // indirect
@ -150,7 +151,7 @@ require (
github.com/huin/goupnp v1.3.0 // indirect
github.com/ipfs/bbloom v0.0.4 // indirect
github.com/ipfs/go-bitfield v1.1.0 // indirect
github.com/ipfs/go-dsqueue v0.1.0 // indirect
github.com/ipfs/go-dsqueue v0.1.1 // indirect
github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect
github.com/ipfs/go-ipfs-pq v0.0.3 // indirect
github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect
@ -215,7 +216,6 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/polydawn/refmt v0.89.0 // indirect
github.com/probe-lab/go-libdht v0.3.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.66.1 // indirect
github.com/prometheus/procfs v0.17.0 // indirect
@ -256,11 +256,11 @@ require (
go.uber.org/zap/exp v0.3.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
golang.org/x/net v0.46.0 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/oauth2 v0.32.0 // indirect
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 // indirect
golang.org/x/term v0.36.0 // indirect
golang.org/x/text v0.30.0 // indirect
golang.org/x/term v0.37.0 // indirect
golang.org/x/text v0.31.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/tools v0.38.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect

70
go.sum
View File

@ -126,8 +126,8 @@ github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZe
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA=
github.com/cockroachdb/pebble/v2 v2.1.0 h1:6KZvjSpWcEXZUvlLzTRC7T1A2G7r+bFskIzggklxixo=
github.com/cockroachdb/pebble/v2 v2.1.0/go.mod h1:Aza05DCCc05ghIJZkB4Q/axv/JK9wx5cFwWcnhG0eGw=
github.com/cockroachdb/pebble/v2 v2.1.2 h1:IwYt+Y2Cdw6egblwk1kWzdmJvD2680t5VK/3i0BJ6IA=
github.com/cockroachdb/pebble/v2 v2.1.2/go.mod h1:Aza05DCCc05ghIJZkB4Q/axv/JK9wx5cFwWcnhG0eGw=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 h1:Nua446ru3juLHLZd4AwKNzClZgL1co3pUPGv3o8FlcA=
@ -203,8 +203,8 @@ github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIp
github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/gammazero/chanqueue v1.1.1 h1:n9Y+zbBxw2f7uUE9wpgs0rOSkP/I/yhDLiNuhyVjojQ=
github.com/gammazero/chanqueue v1.1.1/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc=
github.com/gammazero/deque v1.1.0 h1:OyiyReBbnEG2PP0Bnv1AASLIYvyKqIFN5xfl1t8oGLo=
github.com/gammazero/deque v1.1.0/go.mod h1:JVrR+Bj1NMQbPnYclvDlvSX0nVGReLrQZ0aUMuWLctg=
github.com/gammazero/deque v1.2.0 h1:scEFO8Uidhw6KDU5qg1HA5fYwM0+us2qdeJqm43bitU=
github.com/gammazero/deque v1.2.0/go.mod h1:JVrR+Bj1NMQbPnYclvDlvSX0nVGReLrQZ0aUMuWLctg=
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM=
@ -358,8 +358,8 @@ github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 h1:OqNqsGZPX8zh3eFMO8Lf8EHRRnSGBMqcd
github.com/ipfs-shipyard/nopfs/ipfs v0.25.0/go.mod h1:BxhUdtBgOXg1B+gAPEplkg/GpyTZY+kCMSfsJvvydqU=
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
github.com/ipfs/boxo v0.35.1 h1:MGL3aaaxnu/h9KKq+X/6FxapI/qlDmnRNk33U7tz/fQ=
github.com/ipfs/boxo v0.35.1/go.mod h1:/p1XZVp+Yzv78RuKjb3BESBYEQglRgDrWvmN5mFrsus=
github.com/ipfs/boxo v0.35.2 h1:0QZJJh6qrak28abENOi5OA8NjBnZM4p52SxeuIDqNf8=
github.com/ipfs/boxo v0.35.2/go.mod h1:bZn02OFWwJtY8dDW9XLHaki59EC5o+TGDECXEbe1w8U=
github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA=
github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU=
github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk=
@ -368,8 +368,8 @@ github.com/ipfs/go-block-format v0.2.3/go.mod h1:WJaQmPAKhD3LspLixqlqNFxiZ3BZ3xg
github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M=
github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
github.com/ipfs/go-cid v0.6.0 h1:DlOReBV1xhHBhhfy/gBNNTSyfOM6rLiIx9J7A4DGf30=
github.com/ipfs/go-cid v0.6.0/go.mod h1:NC4kS1LZjzfhK40UGmpXv5/qD2kcMzACYJNntCUiDhQ=
github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q=
github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA=
github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
@ -388,10 +388,10 @@ github.com/ipfs/go-ds-leveldb v0.5.2 h1:6nmxlQ2zbp4LCNdJVsmHfs9GP0eylfBNxpmY1csp
github.com/ipfs/go-ds-leveldb v0.5.2/go.mod h1:2fAwmcvD3WoRT72PzEekHBkQmBDhc39DJGoREiuGmYo=
github.com/ipfs/go-ds-measure v0.2.2 h1:4kwvBGbbSXNYe4ANlg7qTIYoZU6mNlqzQHdVqICkqGI=
github.com/ipfs/go-ds-measure v0.2.2/go.mod h1:b/87ak0jMgH9Ylt7oH0+XGy4P8jHx9KG09Qz+pOeTIs=
github.com/ipfs/go-ds-pebble v0.5.3 h1:4esRt82+LkenUnIWyUCghR1gzRfqeCYGGKX/hRmabro=
github.com/ipfs/go-ds-pebble v0.5.3/go.mod h1:pn2bxYkAE7JRkbAF7D8xuEEFD3oOQ7QqQZPWkAVBs58=
github.com/ipfs/go-dsqueue v0.1.0 h1:OrahKDtT/Q+iMgKaM9XWdxrYPVASFpTuLah8QpKjboc=
github.com/ipfs/go-dsqueue v0.1.0/go.mod h1:iLNkodSOSKTLn0gCvL9ikArz5rZfNh8F9/BRvHe7RbY=
github.com/ipfs/go-ds-pebble v0.5.7 h1:4PQI46y3fjjxUTgHwYqcOVyoxiU6v1sqN6ONeRXGQTM=
github.com/ipfs/go-ds-pebble v0.5.7/go.mod h1:rsIgXE2qN+VfHKBin2cOOGFTZ/Agor6i8wBWA6ihbr0=
github.com/ipfs/go-dsqueue v0.1.1 h1:6PQlHDyf9PSTN69NmwUir5+0is3tU0vRJj8zLlgK8Mc=
github.com/ipfs/go-dsqueue v0.1.1/go.mod h1:Xxg353WSwwzYn3FGSzZ+taSQII3pIZ+EJC8/oWRDM10=
github.com/ipfs/go-fs-lock v0.1.1 h1:TecsP/Uc7WqYYatasreZQiP9EGRy4ZnKoG4yXxR33nw=
github.com/ipfs/go-fs-lock v0.1.1/go.mod h1:2goSXMCw7QfscHmSe09oXiR34DQeUdm+ei+dhonqly0=
github.com/ipfs/go-ipfs-cmds v0.15.0 h1:nQDgKadrzyiFyYoZMARMIoVoSwe3gGTAfGvrWLeAQbQ=
@ -416,8 +416,8 @@ github.com/ipfs/go-ipld-git v0.1.1/go.mod h1:+VyMqF5lMcJh4rwEppV0e6g4nCCHXThLYYD
github.com/ipfs/go-ipld-legacy v0.2.2 h1:DThbqCPVLpWBcGtU23KDLiY2YRZZnTkXQyfz8aOfBkQ=
github.com/ipfs/go-ipld-legacy v0.2.2/go.mod h1:hhkj+b3kG9b2BcUNw8IFYAsfeNo8E3U7eYlWeAOPyDU=
github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
github.com/ipfs/go-log/v2 v2.8.2 h1:nVG4nNHUwwI/sTs9Bi5iE8sXFQwXs3AjkkuWhg7+Y2I=
github.com/ipfs/go-log/v2 v2.8.2/go.mod h1:UhIYAwMV7Nb4ZmihUxfIRM2Istw/y9cAk3xaK+4Zs2c=
github.com/ipfs/go-log/v2 v2.9.0 h1:l4b06AwVXwldIzbVPZy5z7sKp9lHFTX0KWfTBCtHaOk=
github.com/ipfs/go-log/v2 v2.9.0/go.mod h1:UhIYAwMV7Nb4ZmihUxfIRM2Istw/y9cAk3xaK+4Zs2c=
github.com/ipfs/go-metrics-interface v0.3.0 h1:YwG7/Cy4R94mYDUuwsBfeziJCVm9pBMJ6q/JR9V40TU=
github.com/ipfs/go-metrics-interface v0.3.0/go.mod h1:OxxQjZDGocXVdyTPocns6cOLwHieqej/jos7H4POwoY=
github.com/ipfs/go-metrics-prometheus v0.1.0 h1:bApWOHkrH3VTBHzTHrZSfq4n4weOZDzZFxUXv+HyKcA=
@ -504,8 +504,8 @@ github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZ
github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784=
github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo=
github.com/libp2p/go-libp2p v0.43.0 h1:b2bg2cRNmY4HpLK8VHYQXLX2d3iND95OjodLFymvqXU=
github.com/libp2p/go-libp2p v0.43.0/go.mod h1:IiSqAXDyP2sWH+J2gs43pNmB/y4FOi2XQPbsb+8qvzc=
github.com/libp2p/go-libp2p v0.45.0 h1:Pdhr2HsFXaYjtfiNcBP4CcRUONvbMFdH3puM9vV4Tiw=
github.com/libp2p/go-libp2p v0.45.0/go.mod h1:NovCojezAt4dnDd4fH048K7PKEqH0UFYYqJRjIIu8zc=
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g=
@ -514,8 +514,8 @@ github.com/libp2p/go-libp2p-gostream v0.6.0 h1:QfAiWeQRce6pqnYfmIVWJFXNdDyfiR/qk
github.com/libp2p/go-libp2p-gostream v0.6.0/go.mod h1:Nywu0gYZwfj7Jc91PQvbGU8dIpqbQQkjWgDuOrFaRdA=
github.com/libp2p/go-libp2p-http v0.5.0 h1:+x0AbLaUuLBArHubbbNRTsgWz0RjNTy6DJLOxQ3/QBc=
github.com/libp2p/go-libp2p-http v0.5.0/go.mod h1:glh87nZ35XCQyFsdzZps6+F4HYI6DctVFY5u1fehwSg=
github.com/libp2p/go-libp2p-kad-dht v0.35.1 h1:RQglhc9OxqDwlFFdhQMwKxIPBIBfGsleROnK5hqVsoE=
github.com/libp2p/go-libp2p-kad-dht v0.35.1/go.mod h1:1oCXzkkBiYh3d5cMWLpInSOZ6am2AlpC4G+GDcZFcE0=
github.com/libp2p/go-libp2p-kad-dht v0.36.0 h1:7QuXhV36+Vyj+L6A7mrYkn2sYLrbRcbjvsYDu/gXhn8=
github.com/libp2p/go-libp2p-kad-dht v0.36.0/go.mod h1:O24LxTH9Rt3I5XU8nmiA9VynS4TrTwAyj+zBJKB05vQ=
github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio=
github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s=
github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4=
@ -551,6 +551,8 @@ github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/marcopolo/simnet v0.0.1 h1:rSMslhPz6q9IvJeFWDoMGxMIrlsbXau3NkuIXHGJxfg=
github.com/marcopolo/simnet v0.0.1/go.mod h1:WDaQkgLAjqDUEBAOXz22+1j6wXKfGlC5sD5XWt3ddOs=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
@ -730,8 +732,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4=
github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw=
github.com/probe-lab/go-libdht v0.3.0 h1:Q3ZXK8wCjZvgeHSTtRrppXobXY/KHPLZJfc+cdTTyqA=
github.com/probe-lab/go-libdht v0.3.0/go.mod h1:hamw22kI6YkPQFGy5P6BrWWDrgE9ety5Si8iWAyuDvc=
github.com/probe-lab/go-libdht v0.4.0 h1:LAqHuko/owRW6+0cs5wmJXbHzg09EUMJEh5DI37yXqo=
github.com/probe-lab/go-libdht v0.4.0/go.mod h1:hamw22kI6YkPQFGy5P6BrWWDrgE9ety5Si8iWAyuDvc=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
@ -1005,8 +1007,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -1017,8 +1019,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b h1:18qgiDvlvH7kk8Ioa8Ov+K6xCi0GMvmGfGW0sgd/SYA=
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY=
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -1095,8 +1097,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -1123,8 +1125,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1192,8 +1194,8 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 h1:LvzTn0GQhWuvKH/kVRS3R3bVAsdQWI7hvfLHGgh9+lU=
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
@ -1204,8 +1206,8 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1217,8 +1219,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=

View File

@ -78,6 +78,7 @@ var uptimeBuckets = []time.Duration{
}
// A LogEvent is the object sent to the telemetry endpoint.
// See https://github.com/ipfs/kubo/blob/master/docs/telemetry.md for details.
type LogEvent struct {
UUID string `json:"uuid"`
@ -91,7 +92,10 @@ type LogEvent struct {
UptimeBucket time.Duration `json:"uptime_bucket"`
ReproviderStrategy string `json:"reprovider_strategy"`
ReproviderStrategy string `json:"reprovider_strategy"`
ProvideDHTSweepEnabled bool `json:"provide_dht_sweep_enabled"`
ProvideDHTIntervalCustom bool `json:"provide_dht_interval_custom"`
ProvideDHTMaxWorkersCustom bool `json:"provide_dht_max_workers_custom"`
RoutingType string `json:"routing_type"`
RoutingAcceleratedDHTClient bool `json:"routing_accelerated_dht_client"`
@ -352,6 +356,7 @@ func (p *telemetryPlugin) Start(n *core.IpfsNode) error {
func (p *telemetryPlugin) prepareEvent() {
p.collectBasicInfo()
p.collectRoutingInfo()
p.collectProvideInfo()
p.collectAutoNATInfo()
p.collectAutoConfInfo()
p.collectSwarmInfo()
@ -360,13 +365,6 @@ func (p *telemetryPlugin) prepareEvent() {
p.collectPlatformInfo()
}
// Collects:
// * AgentVersion
// * PrivateNetwork
// * RepoSizeBucket
// * BootstrappersCustom
// * UptimeBucket
// * ReproviderStrategy
func (p *telemetryPlugin) collectBasicInfo() {
p.event.AgentVersion = ipfs.GetUserAgentVersion()
@ -406,8 +404,6 @@ func (p *telemetryPlugin) collectBasicInfo() {
break
}
p.event.UptimeBucket = uptimeBucket
p.event.ReproviderStrategy = p.config.Provide.Strategy.WithDefault(config.DefaultProvideStrategy)
}
func (p *telemetryPlugin) collectRoutingInfo() {
@ -416,6 +412,13 @@ func (p *telemetryPlugin) collectRoutingInfo() {
p.event.RoutingDelegatedCount = len(p.config.Routing.DelegatedRouters)
}
func (p *telemetryPlugin) collectProvideInfo() {
p.event.ReproviderStrategy = p.config.Provide.Strategy.WithDefault(config.DefaultProvideStrategy)
p.event.ProvideDHTSweepEnabled = p.config.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled)
p.event.ProvideDHTIntervalCustom = !p.config.Provide.DHT.Interval.IsDefault()
p.event.ProvideDHTMaxWorkersCustom = !p.config.Provide.DHT.MaxWorkers.IsDefault()
}
type reachabilityHost interface {
Reachability() network.Reachability
}

View File

@ -2,6 +2,7 @@ package common
import (
"fmt"
"maps"
"strings"
)
@ -65,9 +66,9 @@ func MapSetKV(v map[string]interface{}, key string, value interface{}) error {
// child maps until a non-map value is found.
func MapMergeDeep(left, right map[string]interface{}) map[string]interface{} {
// We want to alter a copy of the map, not the original
result := make(map[string]interface{})
for k, v := range left {
result[k] = v
result := maps.Clone(left)
if result == nil {
result = make(map[string]interface{})
}
for key, rightVal := range right {

View File

@ -6,6 +6,8 @@ import (
"errors"
"fmt"
"net/http"
"path"
"strings"
drclient "github.com/ipfs/boxo/routing/http/client"
"github.com/ipfs/boxo/routing/http/contentrouter"
@ -24,6 +26,7 @@ import (
"github.com/libp2p/go-libp2p/core/routing"
ma "github.com/multiformats/go-multiaddr"
"go.opencensus.io/stats/view"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
)
var log = logging.Logger("routing/delegated")
@ -187,8 +190,27 @@ func httpRoutingFromConfig(conf config.Router, extraHTTP *ExtraHTTPParams) (rout
delegateHTTPClient := &http.Client{
Transport: &drclient.ResponseBodyLimitedTransport{
RoundTripper: transport,
LimitBytes: 1 << 20,
RoundTripper: otelhttp.NewTransport(transport,
otelhttp.WithSpanNameFormatter(func(operation string, req *http.Request) string {
if req.Method == http.MethodGet {
switch {
case strings.HasPrefix(req.URL.Path, "/routing/v1/providers"):
return "DelegatedHTTPClient.FindProviders"
case strings.HasPrefix(req.URL.Path, "/routing/v1/peers"):
return "DelegatedHTTPClient.FindPeers"
case strings.HasPrefix(req.URL.Path, "/routing/v1/ipns"):
return "DelegatedHTTPClient.GetIPNS"
}
} else if req.Method == http.MethodPut {
switch {
case strings.HasPrefix(req.URL.Path, "/routing/v1/ipns"):
return "DelegatedHTTPClient.PutIPNS"
}
}
return "DelegatedHTTPClient." + path.Dir(req.URL.Path)
}),
),
LimitBytes: 1 << 20,
},
}

View File

@ -6,6 +6,7 @@ import (
"path/filepath"
"strings"
"testing"
"time"
"github.com/dustin/go-humanize"
"github.com/ipfs/kubo/config"
@ -15,6 +16,19 @@ import (
"github.com/stretchr/testify/require"
)
// waitForLogMessage polls a buffer for a log message, waiting up to timeout duration.
// Returns true if message found, false if timeout reached.
func waitForLogMessage(buffer *harness.Buffer, message string, timeout time.Duration) bool {
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
if strings.Contains(buffer.String(), message) {
return true
}
time.Sleep(100 * time.Millisecond)
}
return false
}
func TestAdd(t *testing.T) {
t.Parallel()
@ -435,7 +449,182 @@ func TestAdd(t *testing.T) {
require.Equal(t, 992, len(root.Links))
})
})
}
func TestAddFastProvide(t *testing.T) {
t.Parallel()
const (
shortString = "hello world"
shortStringCidV0 = "Qmf412jQZiuVUtdgnB36FXFX7xg5V6KEbSJ4dpQuhkLyfD" // cidv0 - dag-pb - sha2-256
)
t.Run("fast-provide-root disabled via config: verify skipped in logs", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
node.UpdateConfig(func(cfg *config.Config) {
cfg.Import.FastProvideRoot = config.False
})
// Start daemon with debug logging
node.StartDaemonWithReq(harness.RunRequest{
CmdOpts: []harness.CmdOpt{
harness.RunWithEnv(map[string]string{
"GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug",
}),
},
}, "")
defer node.StopDaemon()
cidStr := node.IPFSAddStr(shortString)
require.Equal(t, shortStringCidV0, cidStr)
// Verify fast-provide-root was disabled
daemonLog := node.Daemon.Stderr.String()
require.Contains(t, daemonLog, "fast-provide-root: skipped")
})
t.Run("fast-provide-root enabled with wait=false: verify async provide", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Use default config (FastProvideRoot=true, FastProvideWait=false)
node.StartDaemonWithReq(harness.RunRequest{
CmdOpts: []harness.CmdOpt{
harness.RunWithEnv(map[string]string{
"GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug",
}),
},
}, "")
defer node.StopDaemon()
cidStr := node.IPFSAddStr(shortString)
require.Equal(t, shortStringCidV0, cidStr)
daemonLog := node.Daemon.Stderr
// Should see async mode started
require.Contains(t, daemonLog.String(), "fast-provide-root: enabled")
require.Contains(t, daemonLog.String(), "fast-provide-root: providing asynchronously")
// Wait for async completion or failure (up to 11 seconds - slightly more than fastProvideTimeout)
// In test environment with no DHT peers, this will fail with "failed to find any peer in table"
completedOrFailed := waitForLogMessage(daemonLog, "async provide completed", 11*time.Second) ||
waitForLogMessage(daemonLog, "async provide failed", 11*time.Second)
require.True(t, completedOrFailed, "async provide should complete or fail within timeout")
})
t.Run("fast-provide-root enabled with wait=true: verify sync provide", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
node.UpdateConfig(func(cfg *config.Config) {
cfg.Import.FastProvideWait = config.True
})
node.StartDaemonWithReq(harness.RunRequest{
CmdOpts: []harness.CmdOpt{
harness.RunWithEnv(map[string]string{
"GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug",
}),
},
}, "")
defer node.StopDaemon()
// Use Runner.Run with stdin to allow for expected errors
res := node.Runner.Run(harness.RunRequest{
Path: node.IPFSBin,
Args: []string{"add", "-q"},
CmdOpts: []harness.CmdOpt{
harness.RunWithStdin(strings.NewReader(shortString)),
},
})
// In sync mode (wait=true), provide errors propagate and fail the command.
// Test environment uses 'test' profile with no bootstrappers, and CI has
// insufficient peers for proper DHT puts, so we expect this to fail with
// "failed to find any peer in table" error from the DHT.
require.Equal(t, 1, res.ExitCode())
require.Contains(t, res.Stderr.String(), "Error: fast-provide: failed to find any peer in table")
daemonLog := node.Daemon.Stderr.String()
// Should see sync mode started
require.Contains(t, daemonLog, "fast-provide-root: enabled")
require.Contains(t, daemonLog, "fast-provide-root: providing synchronously")
require.Contains(t, daemonLog, "sync provide failed") // Verify the failure was logged
})
t.Run("fast-provide-wait ignored when root disabled", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
node.UpdateConfig(func(cfg *config.Config) {
cfg.Import.FastProvideRoot = config.False
cfg.Import.FastProvideWait = config.True
})
node.StartDaemonWithReq(harness.RunRequest{
CmdOpts: []harness.CmdOpt{
harness.RunWithEnv(map[string]string{
"GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug",
}),
},
}, "")
defer node.StopDaemon()
cidStr := node.IPFSAddStr(shortString)
require.Equal(t, shortStringCidV0, cidStr)
daemonLog := node.Daemon.Stderr.String()
require.Contains(t, daemonLog, "fast-provide-root: skipped")
require.Contains(t, daemonLog, "wait-flag-ignored")
})
t.Run("CLI flag overrides config: flag=true overrides config=false", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
node.UpdateConfig(func(cfg *config.Config) {
cfg.Import.FastProvideRoot = config.False
})
node.StartDaemonWithReq(harness.RunRequest{
CmdOpts: []harness.CmdOpt{
harness.RunWithEnv(map[string]string{
"GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug",
}),
},
}, "")
defer node.StopDaemon()
cidStr := node.IPFSAddStr(shortString, "--fast-provide-root=true")
require.Equal(t, shortStringCidV0, cidStr)
daemonLog := node.Daemon.Stderr
// Flag should enable it despite config saying false
require.Contains(t, daemonLog.String(), "fast-provide-root: enabled")
require.Contains(t, daemonLog.String(), "fast-provide-root: providing asynchronously")
})
t.Run("CLI flag overrides config: flag=false overrides config=true", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
node.UpdateConfig(func(cfg *config.Config) {
cfg.Import.FastProvideRoot = config.True
})
node.StartDaemonWithReq(harness.RunRequest{
CmdOpts: []harness.CmdOpt{
harness.RunWithEnv(map[string]string{
"GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug",
}),
},
}, "")
defer node.StopDaemon()
cidStr := node.IPFSAddStr(shortString, "--fast-provide-root=false")
require.Equal(t, shortStringCidV0, cidStr)
daemonLog := node.Daemon.Stderr.String()
// Flag should disable it despite config saying true
require.Contains(t, daemonLog, "fast-provide-root: skipped")
})
}
// createDirectoryForHAMT aims to create enough files with long names for the directory block to be close to the UnixFSHAMTDirectorySizeThreshold.

View File

@ -0,0 +1,164 @@
package cli
import (
"strings"
"testing"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/stretchr/testify/assert"
"github.com/tidwall/sjson"
)
func TestConfigSecrets(t *testing.T) {
t.Parallel()
t.Run("Identity.PrivKey protection", func(t *testing.T) {
t.Parallel()
t.Run("Identity.PrivKey is concealed in config show", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Read the actual config file to get the real PrivKey
configFile := node.ReadFile(node.ConfigFile())
assert.Contains(t, configFile, "PrivKey")
// config show should NOT contain the PrivKey
configShow := node.RunIPFS("config", "show").Stdout.String()
assert.NotContains(t, configShow, "PrivKey")
})
t.Run("Identity.PrivKey cannot be read via ipfs config", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Attempting to read Identity.PrivKey should fail
res := node.RunIPFS("config", "Identity.PrivKey")
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.String(), "cannot show or change private key")
})
t.Run("Identity.PrivKey cannot be read via ipfs config Identity", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Attempting to read Identity section should fail (it contains PrivKey)
res := node.RunIPFS("config", "Identity")
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.String(), "cannot show or change private key")
})
t.Run("Identity.PrivKey cannot be set via config replace", func(t *testing.T) {
t.Parallel()
// Key rotation must be done in offline mode via the dedicated `ipfs key rotate` command.
// This test ensures PrivKey cannot be changed via config replace.
node := harness.NewT(t).NewNode().Init()
configShow := node.RunIPFS("config", "show").Stdout.String()
// Try to inject a PrivKey via config replace
configJSON := MustVal(sjson.Set(configShow, "Identity.PrivKey", "CAASqAkwggSkAgEAAo"))
node.WriteBytes("new-config", []byte(configJSON))
res := node.RunIPFS("config", "replace", "new-config")
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stderr.String(), "setting private key")
})
t.Run("Identity.PrivKey is preserved when re-injecting config", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Read the original config file
originalConfig := node.ReadFile(node.ConfigFile())
assert.Contains(t, originalConfig, "PrivKey")
// Extract the PrivKey value for comparison
var origPrivKey string
assert.Contains(t, originalConfig, "PrivKey")
// Simple extraction - find the PrivKey line
for _, line := range strings.Split(originalConfig, "\n") {
if strings.Contains(line, "\"PrivKey\":") {
origPrivKey = line
break
}
}
assert.NotEmpty(t, origPrivKey)
// Get config show output (which should NOT contain PrivKey)
configShow := node.RunIPFS("config", "show").Stdout.String()
assert.NotContains(t, configShow, "PrivKey")
// Re-inject the config via config replace
node.WriteBytes("config-show", []byte(configShow))
node.IPFS("config", "replace", "config-show")
// The PrivKey should still be in the config file
newConfig := node.ReadFile(node.ConfigFile())
assert.Contains(t, newConfig, "PrivKey")
// Verify the PrivKey line is the same
var newPrivKey string
for _, line := range strings.Split(newConfig, "\n") {
if strings.Contains(line, "\"PrivKey\":") {
newPrivKey = line
break
}
}
assert.Equal(t, origPrivKey, newPrivKey, "PrivKey should be preserved")
})
})
t.Run("TLS security validation", func(t *testing.T) {
t.Parallel()
t.Run("AutoConf.TLSInsecureSkipVerify defaults to false", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Check the default value in a fresh init
res := node.RunIPFS("config", "AutoConf.TLSInsecureSkipVerify")
// Field may not exist (exit code 1) or be false/empty (exit code 0)
// Both are acceptable as they mean "not true"
output := res.Stdout.String()
assert.NotContains(t, output, "true", "default should not be true")
})
t.Run("AutoConf.TLSInsecureSkipVerify can be set to true", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Set to true
node.IPFS("config", "AutoConf.TLSInsecureSkipVerify", "true", "--json")
// Verify it was set
res := node.RunIPFS("config", "AutoConf.TLSInsecureSkipVerify")
assert.Equal(t, 0, res.ExitCode())
assert.Contains(t, res.Stdout.String(), "true")
})
t.Run("HTTPRetrieval.TLSInsecureSkipVerify defaults to false", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Check the default value in a fresh init
res := node.RunIPFS("config", "HTTPRetrieval.TLSInsecureSkipVerify")
// Field may not exist (exit code 1) or be false/empty (exit code 0)
// Both are acceptable as they mean "not true"
output := res.Stdout.String()
assert.NotContains(t, output, "true", "default should not be true")
})
t.Run("HTTPRetrieval.TLSInsecureSkipVerify can be set to true", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Set to true
node.IPFS("config", "HTTPRetrieval.TLSInsecureSkipVerify", "true", "--json")
// Verify it was set
res := node.RunIPFS("config", "HTTPRetrieval.TLSInsecureSkipVerify")
assert.Equal(t, 0, res.ExitCode())
assert.Contains(t, res.Stdout.String(), "true")
})
})
}

View File

@ -5,10 +5,13 @@ import (
"io"
"os"
"testing"
"time"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/ipfs/kubo/test/cli/testutils"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
@ -102,3 +105,200 @@ func TestDag(t *testing.T) {
assert.Equal(t, content, stat.Stdout.Bytes())
})
}
func TestDagImportFastProvide(t *testing.T) {
t.Parallel()
t.Run("fast-provide-root disabled via config: verify skipped in logs", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
node.UpdateConfig(func(cfg *config.Config) {
cfg.Import.FastProvideRoot = config.False
})
// Start daemon with debug logging
node.StartDaemonWithReq(harness.RunRequest{
CmdOpts: []harness.CmdOpt{
harness.RunWithEnv(map[string]string{
"GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug",
}),
},
}, "")
defer node.StopDaemon()
// Import CAR file
r, err := os.Open(fixtureFile)
require.NoError(t, err)
defer r.Close()
err = node.IPFSDagImport(r, fixtureCid)
require.NoError(t, err)
// Verify fast-provide-root was disabled
daemonLog := node.Daemon.Stderr.String()
require.Contains(t, daemonLog, "fast-provide-root: skipped")
})
t.Run("fast-provide-root enabled with wait=false: verify async provide", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Use default config (FastProvideRoot=true, FastProvideWait=false)
node.StartDaemonWithReq(harness.RunRequest{
CmdOpts: []harness.CmdOpt{
harness.RunWithEnv(map[string]string{
"GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug",
}),
},
}, "")
defer node.StopDaemon()
// Import CAR file
r, err := os.Open(fixtureFile)
require.NoError(t, err)
defer r.Close()
err = node.IPFSDagImport(r, fixtureCid)
require.NoError(t, err)
daemonLog := node.Daemon.Stderr
// Should see async mode started
require.Contains(t, daemonLog.String(), "fast-provide-root: enabled")
require.Contains(t, daemonLog.String(), "fast-provide-root: providing asynchronously")
require.Contains(t, daemonLog.String(), fixtureCid) // Should log the specific CID being provided
// Wait for async completion or failure (slightly more than DefaultFastProvideTimeout)
// In test environment with no DHT peers, this will fail with "failed to find any peer in table"
timeout := config.DefaultFastProvideTimeout + time.Second
completedOrFailed := waitForLogMessage(daemonLog, "async provide completed", timeout) ||
waitForLogMessage(daemonLog, "async provide failed", timeout)
require.True(t, completedOrFailed, "async provide should complete or fail within timeout")
})
t.Run("fast-provide-root enabled with wait=true: verify sync provide", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
node.UpdateConfig(func(cfg *config.Config) {
cfg.Import.FastProvideWait = config.True
})
node.StartDaemonWithReq(harness.RunRequest{
CmdOpts: []harness.CmdOpt{
harness.RunWithEnv(map[string]string{
"GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug",
}),
},
}, "")
defer node.StopDaemon()
// Import CAR file - use Run instead of IPFSDagImport to handle expected error
r, err := os.Open(fixtureFile)
require.NoError(t, err)
defer r.Close()
res := node.Runner.Run(harness.RunRequest{
Path: node.IPFSBin,
Args: []string{"dag", "import", "--pin-roots=false"},
CmdOpts: []harness.CmdOpt{
harness.RunWithStdin(r),
},
})
// In sync mode (wait=true), provide errors propagate and fail the command.
// Test environment uses 'test' profile with no bootstrappers, and CI has
// insufficient peers for proper DHT puts, so we expect this to fail with
// "failed to find any peer in table" error from the DHT.
require.Equal(t, 1, res.ExitCode())
require.Contains(t, res.Stderr.String(), "Error: fast-provide: failed to find any peer in table")
daemonLog := node.Daemon.Stderr.String()
// Should see sync mode started
require.Contains(t, daemonLog, "fast-provide-root: enabled")
require.Contains(t, daemonLog, "fast-provide-root: providing synchronously")
require.Contains(t, daemonLog, fixtureCid) // Should log the specific CID being provided
require.Contains(t, daemonLog, "sync provide failed") // Verify the failure was logged
})
t.Run("fast-provide-wait ignored when root disabled", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
node.UpdateConfig(func(cfg *config.Config) {
cfg.Import.FastProvideRoot = config.False
cfg.Import.FastProvideWait = config.True
})
node.StartDaemonWithReq(harness.RunRequest{
CmdOpts: []harness.CmdOpt{
harness.RunWithEnv(map[string]string{
"GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug",
}),
},
}, "")
defer node.StopDaemon()
// Import CAR file
r, err := os.Open(fixtureFile)
require.NoError(t, err)
defer r.Close()
err = node.IPFSDagImport(r, fixtureCid)
require.NoError(t, err)
daemonLog := node.Daemon.Stderr.String()
require.Contains(t, daemonLog, "fast-provide-root: skipped")
// Note: dag import doesn't log wait-flag-ignored like add does
})
t.Run("CLI flag overrides config: flag=true overrides config=false", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
node.UpdateConfig(func(cfg *config.Config) {
cfg.Import.FastProvideRoot = config.False
})
node.StartDaemonWithReq(harness.RunRequest{
CmdOpts: []harness.CmdOpt{
harness.RunWithEnv(map[string]string{
"GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug",
}),
},
}, "")
defer node.StopDaemon()
// Import CAR file with flag override
r, err := os.Open(fixtureFile)
require.NoError(t, err)
defer r.Close()
err = node.IPFSDagImport(r, fixtureCid, "--fast-provide-root=true")
require.NoError(t, err)
daemonLog := node.Daemon.Stderr
// Flag should enable it despite config saying false
require.Contains(t, daemonLog.String(), "fast-provide-root: enabled")
require.Contains(t, daemonLog.String(), "fast-provide-root: providing asynchronously")
require.Contains(t, daemonLog.String(), fixtureCid) // Should log the specific CID being provided
})
t.Run("CLI flag overrides config: flag=false overrides config=true", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
node.UpdateConfig(func(cfg *config.Config) {
cfg.Import.FastProvideRoot = config.True
})
node.StartDaemonWithReq(harness.RunRequest{
CmdOpts: []harness.CmdOpt{
harness.RunWithEnv(map[string]string{
"GOLOG_LOG_LEVEL": "error,core/commands=debug,core/commands/cmdenv=debug",
}),
},
}, "")
defer node.StopDaemon()
// Import CAR file with flag override
r, err := os.Open(fixtureFile)
require.NoError(t, err)
defer r.Close()
err = node.IPFSDagImport(r, fixtureCid, "--fast-provide-root=false")
require.NoError(t, err)
daemonLog := node.Daemon.Stderr.String()
// Flag should disable it despite config saying true
require.Contains(t, daemonLog, "fast-provide-root: skipped")
})
}

View File

@ -72,9 +72,9 @@ func TestRoutingV1Proxy(t *testing.T) {
cidStr := nodes[0].IPFSAddStr(string(random.Bytes(1000)))
// Reprovide as initialProviderDelay still ongoing
res := nodes[0].IPFS("routing", "reprovide")
require.NoError(t, res.Err)
res = nodes[1].IPFS("routing", "findprovs", cidStr)
waitUntilProvidesComplete(t, nodes[0])
res := nodes[1].IPFS("routing", "findprovs", cidStr)
assert.Equal(t, nodes[0].PeerID().String(), res.Stdout.Trimmed())
})

View File

@ -14,7 +14,6 @@ import (
"github.com/ipfs/kubo/test/cli/harness"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRoutingV1Server(t *testing.T) {
@ -39,9 +38,7 @@ func TestRoutingV1Server(t *testing.T) {
text := "hello world " + uuid.New().String()
cidStr := nodes[2].IPFSAddStr(text)
_ = nodes[3].IPFSAddStr(text)
// Reprovide as initialProviderDelay still ongoing
res := nodes[3].IPFS("routing", "reprovide")
require.NoError(t, res.Err)
waitUntilProvidesComplete(t, nodes[3])
cid, err := cid.Decode(cidStr)
assert.NoError(t, err)

View File

@ -17,6 +17,8 @@ func TestDHTOptimisticProvide(t *testing.T) {
nodes[0].UpdateConfig(func(cfg *config.Config) {
cfg.Experimental.OptimisticProvide = true
// Optimistic provide only works with the legacy provider.
cfg.Provide.DHT.SweepEnabled = config.False
})
nodes.StartDaemons().Connect()

View File

@ -1,11 +1,16 @@
package cli
import (
"bufio"
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"os/exec"
"strings"
"testing"
"time"
"github.com/ipfs/kubo/test/cli/harness"
. "github.com/ipfs/kubo/test/cli/testutils"
@ -599,6 +604,164 @@ func TestLogLevel(t *testing.T) {
})
})
// Constants for slog interop tests
const (
slogTestLogTailTimeout = 10 * time.Second
slogTestLogWaitTimeout = 5 * time.Second
slogTestLogStartupDelay = 1 * time.Second // Wait for log tail to start
slogTestSubsystemCmdsHTTP = "cmds/http" // Native go-log subsystem
slogTestSubsystemNetIdentify = "net/identify" // go-libp2p slog subsystem
)
// logMatch represents a matched log entry for slog interop tests
type logMatch struct {
subsystem string
line string
}
// startLogMonitoring starts ipfs log tail and returns command and channel for matched logs.
startLogMonitoring := func(t *testing.T, node *harness.Node) (*exec.Cmd, chan logMatch) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), slogTestLogTailTimeout)
t.Cleanup(cancel)
cmd := exec.CommandContext(ctx, node.IPFSBin, "log", "tail")
cmd.Env = append([]string(nil), os.Environ()...)
for k, v := range node.Runner.Env {
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v))
}
cmd.Dir = node.Runner.Dir
stdout, err := cmd.StdoutPipe()
require.NoError(t, err)
require.NoError(t, cmd.Start())
matches := make(chan logMatch, 10)
go func() {
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
line := scanner.Text()
// Check for actual logger field in JSON, not just substring match
if strings.Contains(line, `"logger":"cmds/http"`) {
matches <- logMatch{slogTestSubsystemCmdsHTTP, line}
}
if strings.Contains(line, `"logger":"net/identify"`) {
matches <- logMatch{slogTestSubsystemNetIdentify, line}
}
}
}()
return cmd, matches
}
// waitForBothSubsystems waits for both native go-log and slog subsystems to appear in logs.
waitForBothSubsystems := func(t *testing.T, matches chan logMatch, timeout time.Duration) {
t.Helper()
seen := make(map[string]struct{})
deadline := time.After(timeout)
for len(seen) < 2 {
select {
case match := <-matches:
if _, exists := seen[match.subsystem]; !exists {
t.Logf("Found %s log", match.subsystem)
seen[match.subsystem] = struct{}{}
}
case <-deadline:
t.Fatalf("Timeout waiting for logs. Seen: %v", seen)
}
}
assert.Contains(t, seen, slogTestSubsystemCmdsHTTP, "should see cmds/http (native go-log)")
assert.Contains(t, seen, slogTestSubsystemNetIdentify, "should see net/identify (slog from go-libp2p)")
}
// triggerIdentifyProtocol connects node1 to node2, triggering net/identify logs.
triggerIdentifyProtocol := func(t *testing.T, node1, node2 *harness.Node) {
t.Helper()
// Get node2's peer ID and address
node2ID := node2.PeerID().String()
addrsRes := node2.IPFS("id", "-f", "<addrs>")
require.NoError(t, addrsRes.Err)
addrs := strings.Split(strings.TrimSpace(addrsRes.Stdout.String()), "\n")
require.NotEmpty(t, addrs, "node2 should have at least one address")
// Connect node1 to node2
multiaddr := fmt.Sprintf("%s/p2p/%s", addrs[0], node2ID)
res := node1.IPFS("swarm", "connect", multiaddr)
require.NoError(t, res.Err)
}
// verifySlogInterop verifies that both native go-log and slog from go-libp2p
// appear in ipfs log tail with correct formatting and level control.
verifySlogInterop := func(t *testing.T, node1, node2 *harness.Node) {
t.Helper()
cmd, matches := startLogMonitoring(t, node1)
defer func() {
_ = cmd.Process.Kill()
}()
time.Sleep(slogTestLogStartupDelay)
// Trigger cmds/http (native go-log)
node1.IPFS("version")
// Trigger net/identify (slog from go-libp2p)
triggerIdentifyProtocol(t, node1, node2)
waitForBothSubsystems(t, matches, slogTestLogWaitTimeout)
}
// This test verifies that go-log's slog bridge works with go-libp2p's gologshim
// when log levels are set via GOLOG_LOG_LEVEL environment variable.
// It tests both native go-log loggers (cmds/http) and slog-based loggers from
// go-libp2p (net/identify), ensuring both types appear in `ipfs log tail`.
t.Run("slog interop via env var", func(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node1 := h.NewNode().Init()
node1.Runner.Env["GOLOG_LOG_LEVEL"] = "error,cmds/http=debug,net/identify=debug"
node1.StartDaemon()
defer node1.StopDaemon()
node2 := h.NewNode().Init().StartDaemon()
defer node2.StopDaemon()
verifySlogInterop(t, node1, node2)
})
// This test verifies that go-log's slog bridge works with go-libp2p's gologshim
// when log levels are set dynamically via `ipfs log level` CLI commands.
// It tests the key feature that SetLogLevel auto-creates level entries for subsystems
// that don't exist yet, enabling `ipfs log level net/identify debug` to work even
// before the net/identify logger is created. This is critical for slog interop.
t.Run("slog interop via CLI", func(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node1 := h.NewNode().Init().StartDaemon()
defer node1.StopDaemon()
node2 := h.NewNode().Init().StartDaemon()
defer node2.StopDaemon()
// Set levels via CLI for both subsystems BEFORE triggering events
res := node1.IPFS("log", "level", slogTestSubsystemCmdsHTTP, "debug")
require.NoError(t, res.Err)
res = node1.IPFS("log", "level", slogTestSubsystemNetIdentify, "debug")
require.NoError(t, res.Err) // Auto-creates level entry for slog subsystem
verifySlogInterop(t, node1, node2)
})
}
func getExpectedSubsystems(t *testing.T, node *harness.Node) []string {

View File

@ -0,0 +1,524 @@
package cli
import (
"bufio"
"encoding/json"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
provideStatEventuallyTimeout = 15 * time.Second
provideStatEventuallyTick = 100 * time.Millisecond
)
// sweepStats mirrors the subset of JSON fields actually used by tests.
// This type is intentionally independent from upstream types to detect breaking changes.
// Only includes fields that tests actually access to keep it simple and maintainable.
type sweepStats struct {
Sweep struct {
Closed bool `json:"closed"`
Connectivity struct {
Status string `json:"status"`
} `json:"connectivity"`
Queues struct {
PendingKeyProvides int `json:"pending_key_provides"`
} `json:"queues"`
Schedule struct {
Keys int `json:"keys"`
} `json:"schedule"`
} `json:"Sweep"`
}
// parseSweepStats parses JSON output from ipfs provide stat command.
// Tests will naturally fail if upstream removes/renames fields we depend on.
func parseSweepStats(t *testing.T, jsonOutput string) sweepStats {
t.Helper()
var stats sweepStats
err := json.Unmarshal([]byte(jsonOutput), &stats)
require.NoError(t, err, "failed to parse provide stat JSON output")
return stats
}
// TestProvideStatAllMetricsDocumented verifies that all metrics output by
// `ipfs provide stat --all` are documented in docs/provide-stats.md.
//
// The test works as follows:
// 1. Starts an IPFS node with Provide.DHT.SweepEnabled=true
// 2. Runs `ipfs provide stat --all` to get all metrics
// 3. Parses the output and extracts all lines with exactly 2 spaces indent
// (these are the actual metric lines)
// 4. Reads docs/provide-stats.md and extracts all ### section headers
// 5. Ensures every metric in the output has a corresponding ### section in the docs
func TestProvideStatAllMetricsDocumented(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init()
// Enable sweep provider
node.SetIPFSConfig("Provide.DHT.SweepEnabled", true)
node.SetIPFSConfig("Provide.Enabled", true)
node.StartDaemon()
defer node.StopDaemon()
// Run `ipfs provide stat --all` to get all metrics
res := node.IPFS("provide", "stat", "--all")
require.NoError(t, res.Err)
// Parse metrics from the command output
// Only consider lines with exactly two spaces of padding (" ")
// These are the actual metric lines as shown in provide.go
outputMetrics := make(map[string]bool)
scanner := bufio.NewScanner(strings.NewReader(res.Stdout.String()))
// Only consider lines that start with exactly two spaces
indent := " "
for scanner.Scan() {
line := scanner.Text()
if !strings.HasPrefix(line, indent) || strings.HasPrefix(line, indent) {
continue
}
// Remove the indent
line = strings.TrimPrefix(line, indent)
// Extract metric name - everything before the first ':'
parts := strings.SplitN(line, ":", 2)
if len(parts) >= 1 {
metricName := strings.TrimSpace(parts[0])
if metricName != "" {
outputMetrics[metricName] = true
}
}
}
require.NoError(t, scanner.Err())
// Read docs/provide-stats.md
// Find the repo root by looking for go.mod
repoRoot := ".."
for range 6 {
if _, err := os.Stat(filepath.Join(repoRoot, "go.mod")); err == nil {
break
}
repoRoot = filepath.Join("..", repoRoot)
}
docsPath := filepath.Join(repoRoot, "docs", "provide-stats.md")
docsFile, err := os.Open(docsPath)
require.NoError(t, err, "Failed to open provide-stats.md")
defer docsFile.Close()
// Parse all ### metric headers from the docs
documentedMetrics := make(map[string]bool)
docsScanner := bufio.NewScanner(docsFile)
for docsScanner.Scan() {
line := docsScanner.Text()
if metricName, found := strings.CutPrefix(line, "### "); found {
metricName = strings.TrimSpace(metricName)
documentedMetrics[metricName] = true
}
}
require.NoError(t, docsScanner.Err())
// Check that all output metrics are documented
var undocumentedMetrics []string
for metric := range outputMetrics {
if !documentedMetrics[metric] {
undocumentedMetrics = append(undocumentedMetrics, metric)
}
}
require.Empty(t, undocumentedMetrics,
"The following metrics from 'ipfs provide stat --all' are not documented in docs/provide-stats.md: %v\n"+
"All output metrics: %v\n"+
"Documented metrics: %v",
undocumentedMetrics, outputMetrics, documentedMetrics)
}
// TestProvideStatBasic tests basic functionality of ipfs provide stat
func TestProvideStatBasic(t *testing.T) {
t.Parallel()
t.Run("works with Sweep provider and shows brief output", func(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init()
node.SetIPFSConfig("Provide.DHT.SweepEnabled", true)
node.SetIPFSConfig("Provide.Enabled", true)
node.StartDaemon()
defer node.StopDaemon()
res := node.IPFS("provide", "stat")
require.NoError(t, res.Err)
assert.Empty(t, res.Stderr.String())
output := res.Stdout.String()
// Brief output should contain specific full labels
assert.Contains(t, output, "Provide queue:")
assert.Contains(t, output, "Reprovide queue:")
assert.Contains(t, output, "CIDs scheduled:")
assert.Contains(t, output, "Regions scheduled:")
assert.Contains(t, output, "Avg record holders:")
assert.Contains(t, output, "Ongoing provides:")
assert.Contains(t, output, "Ongoing reprovides:")
assert.Contains(t, output, "Total CIDs provided:")
})
t.Run("requires daemon to be online", func(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init()
res := node.RunIPFS("provide", "stat")
assert.Error(t, res.Err)
assert.Contains(t, res.Stderr.String(), "this command must be run in online mode")
})
}
// TestProvideStatFlags tests various command flags
func TestProvideStatFlags(t *testing.T) {
t.Parallel()
t.Run("--all flag shows all sections with headings", func(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init()
node.SetIPFSConfig("Provide.DHT.SweepEnabled", true)
node.SetIPFSConfig("Provide.Enabled", true)
node.StartDaemon()
defer node.StopDaemon()
res := node.IPFS("provide", "stat", "--all")
require.NoError(t, res.Err)
output := res.Stdout.String()
// Should contain section headings with colons
assert.Contains(t, output, "Connectivity:")
assert.Contains(t, output, "Queues:")
assert.Contains(t, output, "Schedule:")
assert.Contains(t, output, "Timings:")
assert.Contains(t, output, "Network:")
assert.Contains(t, output, "Operations:")
assert.Contains(t, output, "Workers:")
// Should contain detailed metrics not in brief mode
assert.Contains(t, output, "Uptime:")
assert.Contains(t, output, "Cycle started:")
assert.Contains(t, output, "Reprovide interval:")
assert.Contains(t, output, "Peers swept:")
assert.Contains(t, output, "Full keyspace coverage:")
})
t.Run("--compact requires --all", func(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init()
node.SetIPFSConfig("Provide.DHT.SweepEnabled", true)
node.SetIPFSConfig("Provide.Enabled", true)
node.StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("provide", "stat", "--compact")
assert.Error(t, res.Err)
assert.Contains(t, res.Stderr.String(), "--compact requires --all flag")
})
t.Run("--compact with --all shows 2-column layout", func(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init()
node.SetIPFSConfig("Provide.DHT.SweepEnabled", true)
node.SetIPFSConfig("Provide.Enabled", true)
node.StartDaemon()
defer node.StopDaemon()
res := node.IPFS("provide", "stat", "--all", "--compact")
require.NoError(t, res.Err)
output := res.Stdout.String()
lines := strings.Split(strings.TrimSpace(output), "\n")
require.NotEmpty(t, lines)
// In compact mode, find a line that has both Schedule and Connectivity metrics
// This confirms 2-column layout is working
foundTwoColumns := false
for _, line := range lines {
if strings.Contains(line, "CIDs scheduled:") && strings.Contains(line, "Status:") {
foundTwoColumns = true
break
}
}
assert.True(t, foundTwoColumns, "Should have at least one line with both 'CIDs scheduled:' and 'Status:' confirming 2-column layout")
})
t.Run("individual section flags work with full labels", func(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init()
node.SetIPFSConfig("Provide.DHT.SweepEnabled", true)
node.SetIPFSConfig("Provide.Enabled", true)
node.StartDaemon()
defer node.StopDaemon()
testCases := []struct {
flag string
contains []string
}{
{
flag: "--connectivity",
contains: []string{"Status:"},
},
{
flag: "--queues",
contains: []string{"Provide queue:", "Reprovide queue:"},
},
{
flag: "--schedule",
contains: []string{"CIDs scheduled:", "Regions scheduled:", "Avg prefix length:", "Next region prefix:", "Next region reprovide:"},
},
{
flag: "--timings",
contains: []string{"Uptime:", "Current time offset:", "Cycle started:", "Reprovide interval:"},
},
{
flag: "--network",
contains: []string{"Avg record holders:", "Peers swept:", "Full keyspace coverage:", "Reachable peers:", "Avg region size:", "Replication factor:"},
},
{
flag: "--operations",
contains: []string{"Ongoing provides:", "Ongoing reprovides:", "Total CIDs provided:", "Total records provided:", "Total provide errors:"},
},
{
flag: "--workers",
contains: []string{"Active workers:", "Free workers:", "Workers stats:", "Periodic", "Burst"},
},
}
for _, tc := range testCases {
res := node.IPFS("provide", "stat", tc.flag)
require.NoError(t, res.Err, "flag %s should work", tc.flag)
output := res.Stdout.String()
for _, expected := range tc.contains {
assert.Contains(t, output, expected, "flag %s should contain '%s'", tc.flag, expected)
}
}
})
t.Run("multiple section flags can be combined", func(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init()
node.SetIPFSConfig("Provide.DHT.SweepEnabled", true)
node.SetIPFSConfig("Provide.Enabled", true)
node.StartDaemon()
defer node.StopDaemon()
res := node.IPFS("provide", "stat", "--network", "--operations")
require.NoError(t, res.Err)
output := res.Stdout.String()
// Should have section headings when multiple flags combined
assert.Contains(t, output, "Network:")
assert.Contains(t, output, "Operations:")
assert.Contains(t, output, "Avg record holders:")
assert.Contains(t, output, "Ongoing provides:")
})
}
// TestProvideStatLegacyProvider tests Legacy provider specific behavior
func TestProvideStatLegacyProvider(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init()
node.SetIPFSConfig("Provide.DHT.SweepEnabled", false)
node.SetIPFSConfig("Provide.Enabled", true)
node.StartDaemon()
defer node.StopDaemon()
t.Run("shows legacy stats from old provider system", func(t *testing.T) {
res := node.IPFS("provide", "stat")
require.NoError(t, res.Err)
// Legacy provider shows stats from the old reprovider system
output := res.Stdout.String()
assert.Contains(t, output, "TotalReprovides:")
assert.Contains(t, output, "AvgReprovideDuration:")
assert.Contains(t, output, "LastReprovideDuration:")
})
t.Run("rejects flags with legacy provider", func(t *testing.T) {
flags := []string{"--all", "--connectivity", "--queues", "--network", "--workers"}
for _, flag := range flags {
res := node.RunIPFS("provide", "stat", flag)
assert.Error(t, res.Err, "flag %s should be rejected for legacy provider", flag)
assert.Contains(t, res.Stderr.String(), "cannot use flags with legacy provide stats")
}
})
t.Run("rejects --lan flag with legacy provider", func(t *testing.T) {
res := node.RunIPFS("provide", "stat", "--lan")
assert.Error(t, res.Err)
assert.Contains(t, res.Stderr.String(), "LAN stats only available for Sweep provider with Dual DHT")
})
}
// TestProvideStatOutputFormats tests different output formats
func TestProvideStatOutputFormats(t *testing.T) {
t.Parallel()
t.Run("JSON output with Sweep provider", func(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init()
node.SetIPFSConfig("Provide.DHT.SweepEnabled", true)
node.SetIPFSConfig("Provide.Enabled", true)
node.StartDaemon()
defer node.StopDaemon()
res := node.IPFS("provide", "stat", "--enc=json")
require.NoError(t, res.Err)
// Parse JSON to verify structure
var result struct {
Sweep map[string]interface{} `json:"Sweep"`
Legacy map[string]interface{} `json:"Legacy"`
}
err := json.Unmarshal([]byte(res.Stdout.String()), &result)
require.NoError(t, err, "Output should be valid JSON")
assert.NotNil(t, result.Sweep, "Sweep stats should be present")
assert.Nil(t, result.Legacy, "Legacy stats should not be present")
})
t.Run("JSON output with Legacy provider", func(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init()
node.SetIPFSConfig("Provide.DHT.SweepEnabled", false)
node.SetIPFSConfig("Provide.Enabled", true)
node.StartDaemon()
defer node.StopDaemon()
res := node.IPFS("provide", "stat", "--enc=json")
require.NoError(t, res.Err)
// Parse JSON to verify structure
var result struct {
Sweep map[string]interface{} `json:"Sweep"`
Legacy map[string]interface{} `json:"Legacy"`
}
err := json.Unmarshal([]byte(res.Stdout.String()), &result)
require.NoError(t, err, "Output should be valid JSON")
assert.Nil(t, result.Sweep, "Sweep stats should not be present")
assert.NotNil(t, result.Legacy, "Legacy stats should be present")
})
}
// TestProvideStatIntegration tests integration with provide operations
func TestProvideStatIntegration(t *testing.T) {
t.Parallel()
t.Run("stats reflect content being added to schedule", func(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init()
node.SetIPFSConfig("Provide.DHT.SweepEnabled", true)
node.SetIPFSConfig("Provide.Enabled", true)
node.SetIPFSConfig("Provide.DHT.Interval", "1h")
node.StartDaemon()
defer node.StopDaemon()
// Get initial scheduled CID count
res1 := node.IPFS("provide", "stat", "--enc=json")
require.NoError(t, res1.Err)
initialKeys := parseSweepStats(t, res1.Stdout.String()).Sweep.Schedule.Keys
// Add content - this should increase CIDs scheduled
node.IPFSAddStr("test content for stats")
// Wait for content to appear in schedule (with timeout)
// The buffered provider may take a moment to schedule items
require.Eventually(t, func() bool {
res := node.IPFS("provide", "stat", "--enc=json")
require.NoError(t, res.Err)
stats := parseSweepStats(t, res.Stdout.String())
return stats.Sweep.Schedule.Keys > initialKeys
}, provideStatEventuallyTimeout, provideStatEventuallyTick, "Content should appear in schedule after adding")
})
t.Run("stats work with all documented strategies", func(t *testing.T) {
t.Parallel()
// Test all strategies documented in docs/config.md#providestrategy
strategies := []string{"all", "pinned", "roots", "mfs", "pinned+mfs"}
for _, strategy := range strategies {
h := harness.NewT(t)
node := h.NewNode().Init()
node.SetIPFSConfig("Provide.DHT.SweepEnabled", true)
node.SetIPFSConfig("Provide.Enabled", true)
node.SetIPFSConfig("Provide.Strategy", strategy)
node.StartDaemon()
res := node.IPFS("provide", "stat")
require.NoError(t, res.Err, "stats should work with strategy %s", strategy)
output := res.Stdout.String()
assert.NotEmpty(t, output)
assert.Contains(t, output, "CIDs scheduled:")
node.StopDaemon()
}
})
}
// TestProvideStatDisabledConfig tests behavior when provide system is disabled
func TestProvideStatDisabledConfig(t *testing.T) {
t.Parallel()
t.Run("Provide.Enabled=false returns error stats not available", func(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init()
node.SetIPFSConfig("Provide.DHT.SweepEnabled", true)
node.SetIPFSConfig("Provide.Enabled", false)
node.StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("provide", "stat")
assert.Error(t, res.Err)
assert.Contains(t, res.Stderr.String(), "stats not available")
})
t.Run("Provide.Enabled=true with Provide.DHT.Interval=0 returns error stats not available", func(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init()
node.SetIPFSConfig("Provide.DHT.SweepEnabled", true)
node.SetIPFSConfig("Provide.Enabled", true)
node.SetIPFSConfig("Provide.DHT.Interval", "0")
node.StartDaemon()
defer node.StopDaemon()
res := node.RunIPFS("provide", "stat")
assert.Error(t, res.Err)
assert.Contains(t, res.Stderr.String(), "stats not available")
})
}

View File

@ -3,6 +3,7 @@ package cli
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"strings"
@ -608,6 +609,124 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
})
}
// runResumeTests validates Provide.DHT.ResumeEnabled behavior for SweepingProvider.
//
// Background: The provider tracks current_time_offset = (now - cycleStart) % interval
// where cycleStart is the timestamp marking the beginning of the reprovide cycle.
// With ResumeEnabled=true, cycleStart persists in the datastore across restarts.
// With ResumeEnabled=false, cycleStart resets to 'now' on each startup.
func runResumeTests(t *testing.T, apply cfgApplier) {
t.Helper()
const (
reprovideInterval = 30 * time.Second
initialRuntime = 10 * time.Second // Let cycle progress
downtime = 5 * time.Second // Simulated offline period
restartTime = 2 * time.Second // Daemon restart stabilization
// Thresholds account for timing jitter (~2-3s margin)
minOffsetBeforeRestart = 8 * time.Second // Expect ~10s
minOffsetAfterResume = 12 * time.Second // Expect ~17s (10s + 5s + 2s)
maxOffsetAfterReset = 5 * time.Second // Expect ~2s (fresh start)
)
setupNode := func(t *testing.T, resumeEnabled bool) *harness.Node {
node := harness.NewT(t).NewNode().Init()
apply(node) // Sets Provide.DHT.SweepEnabled=true
node.SetIPFSConfig("Provide.DHT.ResumeEnabled", resumeEnabled)
node.SetIPFSConfig("Provide.DHT.Interval", reprovideInterval.String())
node.SetIPFSConfig("Bootstrap", []string{})
node.StartDaemon()
return node
}
t.Run("preserves cycle state across restart", func(t *testing.T) {
t.Parallel()
node := setupNode(t, true)
defer node.StopDaemon()
for i := 0; i < 10; i++ {
node.IPFSAddStr(fmt.Sprintf("resume-test-%d-%d", i, time.Now().UnixNano()))
}
time.Sleep(initialRuntime)
beforeRestart := node.IPFS("provide", "stat", "--enc=json")
offsetBeforeRestart, _, err := parseProvideStatJSON(beforeRestart.Stdout.String())
require.NoError(t, err)
require.Greater(t, offsetBeforeRestart, minOffsetBeforeRestart,
"cycle should have progressed")
node.StopDaemon()
time.Sleep(downtime)
node.StartDaemon()
time.Sleep(restartTime)
afterRestart := node.IPFS("provide", "stat", "--enc=json")
offsetAfterRestart, _, err := parseProvideStatJSON(afterRestart.Stdout.String())
require.NoError(t, err)
assert.GreaterOrEqual(t, offsetAfterRestart, minOffsetAfterResume,
"offset should account for downtime")
})
t.Run("resets cycle when disabled", func(t *testing.T) {
t.Parallel()
node := setupNode(t, false)
defer node.StopDaemon()
for i := 0; i < 10; i++ {
node.IPFSAddStr(fmt.Sprintf("no-resume-%d-%d", i, time.Now().UnixNano()))
}
time.Sleep(initialRuntime)
beforeRestart := node.IPFS("provide", "stat", "--enc=json")
offsetBeforeRestart, _, err := parseProvideStatJSON(beforeRestart.Stdout.String())
require.NoError(t, err)
require.Greater(t, offsetBeforeRestart, minOffsetBeforeRestart,
"cycle should have progressed")
node.StopDaemon()
time.Sleep(downtime)
node.StartDaemon()
time.Sleep(restartTime)
afterRestart := node.IPFS("provide", "stat", "--enc=json")
offsetAfterRestart, _, err := parseProvideStatJSON(afterRestart.Stdout.String())
require.NoError(t, err)
assert.Less(t, offsetAfterRestart, maxOffsetAfterReset,
"offset should reset to near zero")
})
}
type provideStatJSON struct {
Sweep struct {
Timing struct {
CurrentTimeOffset int64 `json:"current_time_offset"` // nanoseconds
} `json:"timing"`
Schedule struct {
NextReprovidePrefix string `json:"next_reprovide_prefix"`
} `json:"schedule"`
} `json:"Sweep"`
}
// parseProvideStatJSON extracts timing and schedule information from
// the JSON output of 'ipfs provide stat --enc=json'.
// Note: prefix is unused in current tests but kept for potential future use.
func parseProvideStatJSON(output string) (offset time.Duration, prefix string, err error) {
var stat provideStatJSON
if err := json.Unmarshal([]byte(output), &stat); err != nil {
return 0, "", err
}
offset = time.Duration(stat.Sweep.Timing.CurrentTimeOffset)
prefix = stat.Sweep.Schedule.NextReprovidePrefix
return offset, prefix, nil
}
func TestProvider(t *testing.T) {
t.Parallel()
@ -637,6 +756,11 @@ func TestProvider(t *testing.T) {
t.Run(v.name, func(t *testing.T) {
// t.Parallel()
runProviderSuite(t, v.reprovide, v.apply)
// Resume tests only apply to SweepingProvider
if v.name == "SweepingProvider" {
runResumeTests(t, v.apply)
}
})
}
}

View File

@ -0,0 +1,384 @@
package cli
import (
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Well-known block file names in flatfs blockstore that should not be corrupted during testing.
// Flatfs stores each block as a separate .data file on disk.
const (
// emptyFileFlatfsFilename is the flatfs filename for an empty UnixFS file block
emptyFileFlatfsFilename = "CIQL7TG2PB52XIZLLHDYIUFMHUQLMMZWBNBZSLDXFCPZ5VDNQQ2WDZQ"
// emptyDirFlatfsFilename is the flatfs filename for an empty UnixFS directory block.
// This block has special handling and may be served from memory even when corrupted on disk.
emptyDirFlatfsFilename = "CIQFTFEEHEDF6KLBT32BFAGLXEZL4UWFNWM4LFTLMXQBCERZ6CMLX3Y"
)
// getEligibleFlatfsBlockFiles returns flatfs block files (*.data) that are safe to corrupt in tests.
// Filters out well-known blocks (empty file/dir) that cause test flakiness.
//
// Note: This helper is specific to the flatfs blockstore implementation where each block
// is stored as a separate file on disk under blocks/*/*.data.
func getEligibleFlatfsBlockFiles(t *testing.T, node *harness.Node) []string {
blockFiles, err := filepath.Glob(filepath.Join(node.Dir, "blocks", "*", "*.data"))
require.NoError(t, err)
require.NotEmpty(t, blockFiles, "no flatfs block files found")
var eligible []string
for _, f := range blockFiles {
name := filepath.Base(f)
if !strings.Contains(name, emptyFileFlatfsFilename) &&
!strings.Contains(name, emptyDirFlatfsFilename) {
eligible = append(eligible, f)
}
}
return eligible
}
// corruptRandomBlock corrupts a random block file in the flatfs blockstore.
// Returns the path to the corrupted file.
func corruptRandomBlock(t *testing.T, node *harness.Node) string {
eligible := getEligibleFlatfsBlockFiles(t, node)
require.NotEmpty(t, eligible, "no eligible blocks to corrupt")
toCorrupt := eligible[0]
err := os.WriteFile(toCorrupt, []byte("corrupted data"), 0644)
require.NoError(t, err)
return toCorrupt
}
// corruptMultipleBlocks corrupts multiple block files in the flatfs blockstore.
// Returns the paths to the corrupted files.
func corruptMultipleBlocks(t *testing.T, node *harness.Node, count int) []string {
eligible := getEligibleFlatfsBlockFiles(t, node)
require.GreaterOrEqual(t, len(eligible), count, "not enough eligible blocks to corrupt")
var corrupted []string
for i := 0; i < count && i < len(eligible); i++ {
err := os.WriteFile(eligible[i], []byte(fmt.Sprintf("corrupted data %d", i)), 0644)
require.NoError(t, err)
corrupted = append(corrupted, eligible[i])
}
return corrupted
}
func TestRepoVerify(t *testing.T) {
t.Run("healthy repo passes", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
node.IPFS("add", "-q", "--raw-leaves=false", "-r", node.IPFSBin)
res := node.IPFS("repo", "verify")
assert.Contains(t, res.Stdout.String(), "all blocks validated")
})
t.Run("detects corruption", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
node.IPFSAddStr("test content")
corruptRandomBlock(t, node)
res := node.RunIPFS("repo", "verify")
assert.Equal(t, 1, res.ExitCode())
assert.Contains(t, res.Stdout.String(), "was corrupt")
assert.Contains(t, res.Stderr.String(), "1 blocks corrupt")
})
t.Run("drop removes corrupt blocks", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
cid := node.IPFSAddStr("test content")
corruptRandomBlock(t, node)
res := node.RunIPFS("repo", "verify", "--drop")
assert.Equal(t, 0, res.ExitCode(), "should exit 0 when all corrupt blocks removed successfully")
output := res.Stdout.String()
assert.Contains(t, output, "1 blocks corrupt")
assert.Contains(t, output, "1 removed")
// Verify block is gone
res = node.RunIPFS("block", "stat", cid)
assert.NotEqual(t, 0, res.ExitCode())
})
t.Run("heal requires online mode", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
node.IPFSAddStr("test content")
corruptRandomBlock(t, node)
res := node.RunIPFS("repo", "verify", "--heal")
assert.NotEqual(t, 0, res.ExitCode())
assert.Contains(t, res.Stderr.String(), "online mode")
})
t.Run("heal repairs from network", func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(2).Init()
nodes.StartDaemons().Connect()
defer nodes.StopDaemons()
// Add content to node 0
cid := nodes[0].IPFSAddStr("test content for healing")
// Wait for it to appear on node 1
nodes[1].IPFS("block", "get", cid)
// Corrupt on node 1
corruptRandomBlock(t, nodes[1])
// Heal should restore from node 0
res := nodes[1].RunIPFS("repo", "verify", "--heal")
assert.Equal(t, 0, res.ExitCode(), "should exit 0 when all corrupt blocks healed successfully")
output := res.Stdout.String()
// Should report corruption and healing with specific counts
assert.Contains(t, output, "1 blocks corrupt")
assert.Contains(t, output, "1 removed")
assert.Contains(t, output, "1 healed")
// Verify block is restored
nodes[1].IPFS("block", "stat", cid)
})
t.Run("healed blocks contain correct data", func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(2).Init()
nodes.StartDaemons().Connect()
defer nodes.StopDaemons()
// Add specific content to node 0
testContent := "this is the exact content that should be healed correctly"
cid := nodes[0].IPFSAddStr(testContent)
// Fetch to node 1 and verify the content is correct initially
nodes[1].IPFS("block", "get", cid)
res := nodes[1].IPFS("cat", cid)
assert.Equal(t, testContent, res.Stdout.String())
// Corrupt on node 1
corruptRandomBlock(t, nodes[1])
// Heal the corruption
res = nodes[1].RunIPFS("repo", "verify", "--heal")
assert.Equal(t, 0, res.ExitCode(), "should exit 0 when all corrupt blocks healed successfully")
output := res.Stdout.String()
assert.Contains(t, output, "1 blocks corrupt")
assert.Contains(t, output, "1 removed")
assert.Contains(t, output, "1 healed")
// Verify the healed content matches the original exactly
res = nodes[1].IPFS("cat", cid)
assert.Equal(t, testContent, res.Stdout.String(), "healed content should match original")
// Also verify via block get that the raw block data is correct
block0 := nodes[0].IPFS("block", "get", cid)
block1 := nodes[1].IPFS("block", "get", cid)
assert.Equal(t, block0.Stdout.String(), block1.Stdout.String(), "raw block data should match")
})
t.Run("multiple corrupt blocks", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Create 20 blocks
for i := 0; i < 20; i++ {
node.IPFSAddStr(strings.Repeat("test content ", i+1))
}
// Corrupt 5 blocks
corruptMultipleBlocks(t, node, 5)
// Verify detects all corruptions
res := node.RunIPFS("repo", "verify")
assert.Equal(t, 1, res.ExitCode())
// Error summary is in stderr
assert.Contains(t, res.Stderr.String(), "5 blocks corrupt")
// Test with --drop
res = node.RunIPFS("repo", "verify", "--drop")
assert.Equal(t, 0, res.ExitCode(), "should exit 0 when all corrupt blocks removed successfully")
assert.Contains(t, res.Stdout.String(), "5 blocks corrupt")
assert.Contains(t, res.Stdout.String(), "5 removed")
})
t.Run("empty repository", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Verify empty repo passes
res := node.IPFS("repo", "verify")
assert.Equal(t, 0, res.ExitCode())
assert.Contains(t, res.Stdout.String(), "all blocks validated")
// Should work with --drop and --heal too
res = node.IPFS("repo", "verify", "--drop")
assert.Equal(t, 0, res.ExitCode())
assert.Contains(t, res.Stdout.String(), "all blocks validated")
})
t.Run("partial heal success", func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(2).Init()
// Start both nodes and connect them
nodes.StartDaemons().Connect()
defer nodes.StopDaemons()
// Add 5 blocks to node 0, pin them to keep available
cid1 := nodes[0].IPFSAddStr("content available for healing 1")
cid2 := nodes[0].IPFSAddStr("content available for healing 2")
cid3 := nodes[0].IPFSAddStr("content available for healing 3")
cid4 := nodes[0].IPFSAddStr("content available for healing 4")
cid5 := nodes[0].IPFSAddStr("content available for healing 5")
// Pin these on node 0 to ensure they stay available
nodes[0].IPFS("pin", "add", cid1)
nodes[0].IPFS("pin", "add", cid2)
nodes[0].IPFS("pin", "add", cid3)
nodes[0].IPFS("pin", "add", cid4)
nodes[0].IPFS("pin", "add", cid5)
// Node 1 fetches these blocks
nodes[1].IPFS("block", "get", cid1)
nodes[1].IPFS("block", "get", cid2)
nodes[1].IPFS("block", "get", cid3)
nodes[1].IPFS("block", "get", cid4)
nodes[1].IPFS("block", "get", cid5)
// Now remove some blocks from node 0 to simulate partial availability
nodes[0].IPFS("pin", "rm", cid3)
nodes[0].IPFS("pin", "rm", cid4)
nodes[0].IPFS("pin", "rm", cid5)
nodes[0].IPFS("repo", "gc")
// Verify node 1 is still connected
peers := nodes[1].IPFS("swarm", "peers")
require.Contains(t, peers.Stdout.String(), nodes[0].PeerID().String())
// Corrupt 5 blocks on node 1
corruptMultipleBlocks(t, nodes[1], 5)
// Heal should partially succeed (only cid1 and cid2 available from node 0)
res := nodes[1].RunIPFS("repo", "verify", "--heal")
assert.Equal(t, 1, res.ExitCode())
// Should show mixed results with specific counts in stderr
errOutput := res.Stderr.String()
assert.Contains(t, errOutput, "5 blocks corrupt")
assert.Contains(t, errOutput, "5 removed")
// Only cid1 and cid2 are available for healing, cid3-5 were GC'd
assert.Contains(t, errOutput, "2 healed")
assert.Contains(t, errOutput, "3 failed to heal")
})
t.Run("heal with block not available on network", func(t *testing.T) {
t.Parallel()
nodes := harness.NewT(t).NewNodes(2).Init()
// Start both nodes and connect
nodes.StartDaemons().Connect()
defer nodes.StopDaemons()
// Add unique content only to node 1
nodes[1].IPFSAddStr("unique content that exists nowhere else")
// Ensure nodes are connected
peers := nodes[1].IPFS("swarm", "peers")
require.Contains(t, peers.Stdout.String(), nodes[0].PeerID().String())
// Corrupt the block on node 1
corruptRandomBlock(t, nodes[1])
// Heal should fail - node 0 doesn't have this content
res := nodes[1].RunIPFS("repo", "verify", "--heal")
assert.Equal(t, 1, res.ExitCode())
// Should report heal failure with specific counts in stderr
errOutput := res.Stderr.String()
assert.Contains(t, errOutput, "1 blocks corrupt")
assert.Contains(t, errOutput, "1 removed")
assert.Contains(t, errOutput, "1 failed to heal")
})
t.Run("large repository scale test", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Create 1000 small blocks
for i := 0; i < 1000; i++ {
node.IPFSAddStr(fmt.Sprintf("content-%d", i))
}
// Corrupt 10 blocks
corruptMultipleBlocks(t, node, 10)
// Verify handles large repos efficiently
res := node.RunIPFS("repo", "verify")
assert.Equal(t, 1, res.ExitCode())
// Should report exactly 10 corrupt blocks in stderr
assert.Contains(t, res.Stderr.String(), "10 blocks corrupt")
// Test --drop at scale
res = node.RunIPFS("repo", "verify", "--drop")
assert.Equal(t, 0, res.ExitCode(), "should exit 0 when all corrupt blocks removed successfully")
output := res.Stdout.String()
assert.Contains(t, output, "10 blocks corrupt")
assert.Contains(t, output, "10 removed")
})
t.Run("drop with partial removal failures", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Create several blocks
for i := 0; i < 5; i++ {
node.IPFSAddStr(fmt.Sprintf("content for removal test %d", i))
}
// Corrupt 3 blocks
corruptedFiles := corruptMultipleBlocks(t, node, 3)
require.Len(t, corruptedFiles, 3)
// Make one of the corrupted files read-only to simulate removal failure
err := os.Chmod(corruptedFiles[0], 0400) // read-only
require.NoError(t, err)
defer func() { _ = os.Chmod(corruptedFiles[0], 0644) }() // cleanup
// Also make the directory read-only to prevent deletion
blockDir := filepath.Dir(corruptedFiles[0])
originalPerm, err := os.Stat(blockDir)
require.NoError(t, err)
err = os.Chmod(blockDir, 0500) // read+execute only, no write
require.NoError(t, err)
defer func() { _ = os.Chmod(blockDir, originalPerm.Mode()) }() // cleanup
// Try to drop - should fail because at least one block can't be removed
res := node.RunIPFS("repo", "verify", "--drop")
assert.Equal(t, 1, res.ExitCode(), "should exit 1 when some blocks fail to remove")
// Restore permissions for verification
_ = os.Chmod(blockDir, originalPerm.Mode())
_ = os.Chmod(corruptedFiles[0], 0644)
// Should report both successes and failures with specific counts
errOutput := res.Stderr.String()
assert.Contains(t, errOutput, "3 blocks corrupt")
assert.Contains(t, errOutput, "2 removed")
assert.Contains(t, errOutput, "1 failed to remove")
})
}

View File

@ -2,7 +2,10 @@ package cli
import (
"fmt"
"strconv"
"strings"
"testing"
"time"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/ipfs/kubo/test/cli/testutils"
@ -10,6 +13,33 @@ import (
"github.com/stretchr/testify/require"
)
func waitUntilProvidesComplete(t *testing.T, n *harness.Node) {
getCidsCount := func(line string) int {
trimmed := strings.TrimSpace(line)
countStr := strings.SplitN(trimmed, " ", 2)[0]
count, err := strconv.Atoi(countStr)
require.NoError(t, err)
return count
}
queuedProvides, ongoingProvides := true, true
for queuedProvides || ongoingProvides {
res := n.IPFS("provide", "stat", "-a")
require.NoError(t, res.Err)
for _, line := range res.Stdout.Lines() {
if trimmed, ok := strings.CutPrefix(line, " Provide queue:"); ok {
provideQueueSize := getCidsCount(trimmed)
queuedProvides = provideQueueSize > 0
}
if trimmed, ok := strings.CutPrefix(line, " Ongoing provides:"); ok {
ongoingProvideCount := getCidsCount(trimmed)
ongoingProvides = ongoingProvideCount > 0
}
}
time.Sleep(10 * time.Millisecond)
}
}
func testRoutingDHT(t *testing.T, enablePubsub bool) {
t.Run(fmt.Sprintf("enablePubSub=%v", enablePubsub), func(t *testing.T) {
t.Parallel()
@ -84,10 +114,8 @@ func testRoutingDHT(t *testing.T, enablePubsub bool) {
t.Run("ipfs routing findprovs", func(t *testing.T) {
t.Parallel()
hash := nodes[3].IPFSAddStr("some stuff")
// Reprovide as initialProviderDelay still ongoing
res := nodes[3].IPFS("routing", "reprovide")
require.NoError(t, res.Err)
res = nodes[4].IPFS("routing", "findprovs", hash)
waitUntilProvidesComplete(t, nodes[3])
res := nodes[4].IPFS("routing", "findprovs", hash)
assert.Equal(t, nodes[3].PeerID().String(), res.Stdout.Trimmed())
})

View File

@ -159,4 +159,127 @@ func TestRPCAuth(t *testing.T) {
node.StopDaemon()
})
t.Run("Requests without Authorization header are rejected when auth is enabled", func(t *testing.T) {
t.Parallel()
node := makeAndStartProtectedNode(t, map[string]*config.RPCAuthScope{
"userA": {
AuthSecret: "bearer:mytoken",
AllowedPaths: []string{"/api/v0"},
},
})
// Create client with NO auth
apiClient := node.APIClient() // Uses http.DefaultClient with no auth headers
// Should be denied without auth header
resp := apiClient.Post("/api/v0/id", nil)
assert.Equal(t, 403, resp.StatusCode)
// Should contain denial message
assert.Contains(t, resp.Body, rpcDeniedMsg)
node.StopDaemon()
})
t.Run("Version endpoint is always accessible even with limited AllowedPaths", func(t *testing.T) {
t.Parallel()
node := makeAndStartProtectedNode(t, map[string]*config.RPCAuthScope{
"userA": {
AuthSecret: "bearer:mytoken",
AllowedPaths: []string{"/api/v0/id"}, // Only /id allowed
},
})
apiClient := node.APIClient()
apiClient.Client = &http.Client{
Transport: auth.NewAuthorizedRoundTripper("Bearer mytoken", http.DefaultTransport),
}
// Can access /version even though not in AllowedPaths
resp := apiClient.Post("/api/v0/version", nil)
assert.Equal(t, 200, resp.StatusCode)
node.StopDaemon()
})
t.Run("User cannot access API with another user's secret", func(t *testing.T) {
t.Parallel()
node := makeAndStartProtectedNode(t, map[string]*config.RPCAuthScope{
"alice": {
AuthSecret: "bearer:alice-secret",
AllowedPaths: []string{"/api/v0/id"},
},
"bob": {
AuthSecret: "bearer:bob-secret",
AllowedPaths: []string{"/api/v0/config"},
},
})
// Alice tries to use Bob's secret
apiClient := node.APIClient()
apiClient.Client = &http.Client{
Transport: auth.NewAuthorizedRoundTripper("Bearer bob-secret", http.DefaultTransport),
}
// Bob's secret should work for Bob's paths
resp := apiClient.Post("/api/v0/config/show", nil)
assert.Equal(t, 200, resp.StatusCode)
// But not for Alice's paths (Bob doesn't have access to /id)
resp = apiClient.Post("/api/v0/id", nil)
assert.Equal(t, 403, resp.StatusCode)
node.StopDaemon()
})
t.Run("Empty AllowedPaths denies all access except version", func(t *testing.T) {
t.Parallel()
node := makeAndStartProtectedNode(t, map[string]*config.RPCAuthScope{
"userA": {
AuthSecret: "bearer:mytoken",
AllowedPaths: []string{}, // Empty!
},
})
apiClient := node.APIClient()
apiClient.Client = &http.Client{
Transport: auth.NewAuthorizedRoundTripper("Bearer mytoken", http.DefaultTransport),
}
// Should deny everything
resp := apiClient.Post("/api/v0/id", nil)
assert.Equal(t, 403, resp.StatusCode)
resp = apiClient.Post("/api/v0/config/show", nil)
assert.Equal(t, 403, resp.StatusCode)
// Except version
resp = apiClient.Post("/api/v0/version", nil)
assert.Equal(t, 200, resp.StatusCode)
node.StopDaemon()
})
t.Run("CLI commands fail without --api-auth when auth is enabled", func(t *testing.T) {
t.Parallel()
node := makeAndStartProtectedNode(t, map[string]*config.RPCAuthScope{
"userA": {
AuthSecret: "bearer:mytoken",
AllowedPaths: []string{"/api/v0"},
},
})
// Try to run command without --api-auth flag
resp := node.RunIPFS("id") // No --api-auth flag
require.Error(t, resp.Err)
require.Contains(t, resp.Stderr.String(), rpcDeniedMsg)
node.StopDaemon()
})
}

View File

@ -205,6 +205,9 @@ func TestTelemetry(t *testing.T) {
"repo_size_bucket",
"uptime_bucket",
"reprovider_strategy",
"provide_dht_sweep_enabled",
"provide_dht_interval_custom",
"provide_dht_max_workers_custom",
"routing_type",
"routing_accelerated_dht_client",
"routing_delegated_count",

View File

@ -8,7 +8,7 @@ require (
github.com/Kubuxu/gocovmerge v0.0.0-20161216165753-7ecaa51963cd
github.com/golangci/golangci-lint v1.64.8
github.com/ipfs/go-cidutil v0.1.0
github.com/ipfs/go-log/v2 v2.8.2
github.com/ipfs/go-log/v2 v2.9.0
github.com/ipfs/go-test v0.2.3
github.com/ipfs/hang-fds v0.1.0
github.com/ipfs/iptb v1.4.1
@ -65,7 +65,7 @@ require (
github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b // indirect
github.com/cockroachdb/errors v1.11.3 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble/v2 v2.1.0 // indirect
github.com/cockroachdb/pebble/v2 v2.1.2 // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
@ -92,7 +92,7 @@ require (
github.com/fzipp/gocyclo v0.6.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.10 // indirect
github.com/gammazero/chanqueue v1.1.1 // indirect
github.com/gammazero/deque v1.1.0 // indirect
github.com/gammazero/deque v1.2.0 // indirect
github.com/getsentry/sentry-go v0.27.0 // indirect
github.com/ghostiam/protogetter v0.3.9 // indirect
github.com/go-critic/go-critic v0.12.0 // indirect
@ -136,12 +136,12 @@ require (
github.com/huin/goupnp v1.3.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/ipfs/bbloom v0.0.4 // indirect
github.com/ipfs/boxo v0.35.1 // indirect
github.com/ipfs/boxo v0.35.2 // indirect
github.com/ipfs/go-bitfield v1.1.0 // indirect
github.com/ipfs/go-block-format v0.2.3 // indirect
github.com/ipfs/go-cid v0.5.0 // indirect
github.com/ipfs/go-cid v0.6.0 // indirect
github.com/ipfs/go-datastore v0.9.0 // indirect
github.com/ipfs/go-dsqueue v0.1.0 // indirect
github.com/ipfs/go-dsqueue v0.1.1 // indirect
github.com/ipfs/go-ipfs-cmds v0.15.0 // indirect
github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect
github.com/ipfs/go-ipld-cbor v0.2.1 // indirect
@ -182,9 +182,9 @@ require (
github.com/libp2p/go-cidranger v1.1.0 // indirect
github.com/libp2p/go-doh-resolver v0.5.0 // indirect
github.com/libp2p/go-flow-metrics v0.3.0 // indirect
github.com/libp2p/go-libp2p v0.43.0 // indirect
github.com/libp2p/go-libp2p v0.45.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
github.com/libp2p/go-libp2p-kad-dht v0.35.1 // indirect
github.com/libp2p/go-libp2p-kad-dht v0.36.0 // indirect
github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect
github.com/libp2p/go-libp2p-record v0.3.1 // indirect
github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect
@ -258,9 +258,7 @@ require (
github.com/quasilyte/gogrep v0.5.0 // indirect
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.55.0 // indirect
github.com/quic-go/webtransport-go v0.9.0 // indirect
github.com/raeperd/recvcheck v0.2.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
@ -329,15 +327,15 @@ require (
go.uber.org/zap v1.27.0 // indirect
go.uber.org/zap/exp v0.3.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
golang.org/x/crypto v0.43.0 // indirect
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b // indirect
golang.org/x/crypto v0.45.0 // indirect
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect
golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac // indirect
golang.org/x/mod v0.29.0 // indirect
golang.org/x/net v0.46.0 // indirect
golang.org/x/sync v0.17.0 // indirect
golang.org/x/sys v0.37.0 // indirect
golang.org/x/term v0.36.0 // indirect
golang.org/x/text v0.30.0 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/term v0.37.0 // indirect
golang.org/x/text v0.31.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/tools v0.38.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect

View File

@ -118,8 +118,8 @@ github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZe
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA=
github.com/cockroachdb/pebble/v2 v2.1.0 h1:6KZvjSpWcEXZUvlLzTRC7T1A2G7r+bFskIzggklxixo=
github.com/cockroachdb/pebble/v2 v2.1.0/go.mod h1:Aza05DCCc05ghIJZkB4Q/axv/JK9wx5cFwWcnhG0eGw=
github.com/cockroachdb/pebble/v2 v2.1.2 h1:IwYt+Y2Cdw6egblwk1kWzdmJvD2680t5VK/3i0BJ6IA=
github.com/cockroachdb/pebble/v2 v2.1.2/go.mod h1:Aza05DCCc05ghIJZkB4Q/axv/JK9wx5cFwWcnhG0eGw=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 h1:Nua446ru3juLHLZd4AwKNzClZgL1co3pUPGv3o8FlcA=
@ -189,8 +189,8 @@ github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIp
github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/gammazero/chanqueue v1.1.1 h1:n9Y+zbBxw2f7uUE9wpgs0rOSkP/I/yhDLiNuhyVjojQ=
github.com/gammazero/chanqueue v1.1.1/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc=
github.com/gammazero/deque v1.1.0 h1:OyiyReBbnEG2PP0Bnv1AASLIYvyKqIFN5xfl1t8oGLo=
github.com/gammazero/deque v1.1.0/go.mod h1:JVrR+Bj1NMQbPnYclvDlvSX0nVGReLrQZ0aUMuWLctg=
github.com/gammazero/deque v1.2.0 h1:scEFO8Uidhw6KDU5qg1HA5fYwM0+us2qdeJqm43bitU=
github.com/gammazero/deque v1.2.0/go.mod h1:JVrR+Bj1NMQbPnYclvDlvSX0nVGReLrQZ0aUMuWLctg=
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM=
@ -334,22 +334,24 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
github.com/ipfs/boxo v0.35.1 h1:MGL3aaaxnu/h9KKq+X/6FxapI/qlDmnRNk33U7tz/fQ=
github.com/ipfs/boxo v0.35.1/go.mod h1:/p1XZVp+Yzv78RuKjb3BESBYEQglRgDrWvmN5mFrsus=
github.com/ipfs/boxo v0.35.2 h1:0QZJJh6qrak28abENOi5OA8NjBnZM4p52SxeuIDqNf8=
github.com/ipfs/boxo v0.35.2/go.mod h1:bZn02OFWwJtY8dDW9XLHaki59EC5o+TGDECXEbe1w8U=
github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA=
github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU=
github.com/ipfs/go-block-format v0.2.3 h1:mpCuDaNXJ4wrBJLrtEaGFGXkferrw5eqVvzaHhtFKQk=
github.com/ipfs/go-block-format v0.2.3/go.mod h1:WJaQmPAKhD3LspLixqlqNFxiZ3BZ3xgqxxoSR/76pnA=
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
github.com/ipfs/go-cid v0.6.0 h1:DlOReBV1xhHBhhfy/gBNNTSyfOM6rLiIx9J7A4DGf30=
github.com/ipfs/go-cid v0.6.0/go.mod h1:NC4kS1LZjzfhK40UGmpXv5/qD2kcMzACYJNntCUiDhQ=
github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q=
github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA=
github.com/ipfs/go-datastore v0.9.0 h1:WocriPOayqalEsueHv6SdD4nPVl4rYMfYGLD4bqCZ+w=
github.com/ipfs/go-datastore v0.9.0/go.mod h1:uT77w/XEGrvJWwHgdrMr8bqCN6ZTW9gzmi+3uK+ouHg=
github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
github.com/ipfs/go-dsqueue v0.1.0 h1:OrahKDtT/Q+iMgKaM9XWdxrYPVASFpTuLah8QpKjboc=
github.com/ipfs/go-dsqueue v0.1.0/go.mod h1:iLNkodSOSKTLn0gCvL9ikArz5rZfNh8F9/BRvHe7RbY=
github.com/ipfs/go-ds-leveldb v0.5.2 h1:6nmxlQ2zbp4LCNdJVsmHfs9GP0eylfBNxpmY1csp0x0=
github.com/ipfs/go-ds-leveldb v0.5.2/go.mod h1:2fAwmcvD3WoRT72PzEekHBkQmBDhc39DJGoREiuGmYo=
github.com/ipfs/go-dsqueue v0.1.1 h1:6PQlHDyf9PSTN69NmwUir5+0is3tU0vRJj8zLlgK8Mc=
github.com/ipfs/go-dsqueue v0.1.1/go.mod h1:Xxg353WSwwzYn3FGSzZ+taSQII3pIZ+EJC8/oWRDM10=
github.com/ipfs/go-ipfs-cmds v0.15.0 h1:nQDgKadrzyiFyYoZMARMIoVoSwe3gGTAfGvrWLeAQbQ=
github.com/ipfs/go-ipfs-cmds v0.15.0/go.mod h1:VABf/mv/wqvYX6hLG6Z+40eNAEw3FQO0bSm370Or3Wk=
github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ=
@ -364,8 +366,8 @@ github.com/ipfs/go-ipld-format v0.6.3 h1:9/lurLDTotJpZSuL++gh3sTdmcFhVkCwsgx2+rA
github.com/ipfs/go-ipld-format v0.6.3/go.mod h1:74ilVN12NXVMIV+SrBAyC05UJRk0jVvGqdmrcYZvCBk=
github.com/ipfs/go-ipld-legacy v0.2.2 h1:DThbqCPVLpWBcGtU23KDLiY2YRZZnTkXQyfz8aOfBkQ=
github.com/ipfs/go-ipld-legacy v0.2.2/go.mod h1:hhkj+b3kG9b2BcUNw8IFYAsfeNo8E3U7eYlWeAOPyDU=
github.com/ipfs/go-log/v2 v2.8.2 h1:nVG4nNHUwwI/sTs9Bi5iE8sXFQwXs3AjkkuWhg7+Y2I=
github.com/ipfs/go-log/v2 v2.8.2/go.mod h1:UhIYAwMV7Nb4ZmihUxfIRM2Istw/y9cAk3xaK+4Zs2c=
github.com/ipfs/go-log/v2 v2.9.0 h1:l4b06AwVXwldIzbVPZy5z7sKp9lHFTX0KWfTBCtHaOk=
github.com/ipfs/go-log/v2 v2.9.0/go.mod h1:UhIYAwMV7Nb4ZmihUxfIRM2Istw/y9cAk3xaK+4Zs2c=
github.com/ipfs/go-metrics-interface v0.3.0 h1:YwG7/Cy4R94mYDUuwsBfeziJCVm9pBMJ6q/JR9V40TU=
github.com/ipfs/go-metrics-interface v0.3.0/go.mod h1:OxxQjZDGocXVdyTPocns6cOLwHieqej/jos7H4POwoY=
github.com/ipfs/go-peertaskqueue v0.8.2 h1:PaHFRaVFdxQk1Qo3OKiHPYjmmusQy7gKQUaL8JDszAU=
@ -458,12 +460,12 @@ github.com/libp2p/go-doh-resolver v0.5.0 h1:4h7plVVW+XTS+oUBw2+8KfoM1jF6w8XmO7+s
github.com/libp2p/go-doh-resolver v0.5.0/go.mod h1:aPDxfiD2hNURgd13+hfo29z9IC22fv30ee5iM31RzxU=
github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784=
github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo=
github.com/libp2p/go-libp2p v0.43.0 h1:b2bg2cRNmY4HpLK8VHYQXLX2d3iND95OjodLFymvqXU=
github.com/libp2p/go-libp2p v0.43.0/go.mod h1:IiSqAXDyP2sWH+J2gs43pNmB/y4FOi2XQPbsb+8qvzc=
github.com/libp2p/go-libp2p v0.45.0 h1:Pdhr2HsFXaYjtfiNcBP4CcRUONvbMFdH3puM9vV4Tiw=
github.com/libp2p/go-libp2p v0.45.0/go.mod h1:NovCojezAt4dnDd4fH048K7PKEqH0UFYYqJRjIIu8zc=
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
github.com/libp2p/go-libp2p-kad-dht v0.35.1 h1:RQglhc9OxqDwlFFdhQMwKxIPBIBfGsleROnK5hqVsoE=
github.com/libp2p/go-libp2p-kad-dht v0.35.1/go.mod h1:1oCXzkkBiYh3d5cMWLpInSOZ6am2AlpC4G+GDcZFcE0=
github.com/libp2p/go-libp2p-kad-dht v0.36.0 h1:7QuXhV36+Vyj+L6A7mrYkn2sYLrbRcbjvsYDu/gXhn8=
github.com/libp2p/go-libp2p-kad-dht v0.36.0/go.mod h1:O24LxTH9Rt3I5XU8nmiA9VynS4TrTwAyj+zBJKB05vQ=
github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s=
github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4=
github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg=
@ -490,6 +492,8 @@ github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s
github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE=
github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1rNTU04=
github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc=
github.com/marcopolo/simnet v0.0.1 h1:rSMslhPz6q9IvJeFWDoMGxMIrlsbXau3NkuIXHGJxfg=
github.com/marcopolo/simnet v0.0.1/go.mod h1:WDaQkgLAjqDUEBAOXz22+1j6wXKfGlC5sD5XWt3ddOs=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
github.com/matoous/godox v1.1.0 h1:W5mqwbyWrwZv6OQ5Z1a/DHGMOvXYCBP3+Ht7KMoJhq4=
@ -782,6 +786,8 @@ github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/tdakkota/asciicheck v0.4.1 h1:bm0tbcmi0jezRA2b5kg4ozmMuGAFotKI3RZfrhfovg8=
github.com/tdakkota/asciicheck v0.4.1/go.mod h1:0k7M3rCfRXb0Z6bwgvkEIMleKH3kXNz9UqJ9Xuqopr8=
@ -909,11 +915,11 @@ golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98y
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b h1:18qgiDvlvH7kk8Ioa8Ov+K6xCi0GMvmGfGW0sgd/SYA=
golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY=
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac h1:TSSpLIG4v+p0rPv1pNOQtl1I8knsO4S9trOxNMOLVP4=
@ -963,8 +969,8 @@ golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -983,8 +989,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1014,8 +1020,8 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 h1:LvzTn0GQhWuvKH/kVRS3R3bVAsdQWI7hvfLHGgh9+lU=
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@ -1029,8 +1035,8 @@ golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@ -1044,8 +1050,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=

View File

@ -93,8 +93,8 @@ EOF
test_cmp expected actual
'
test_expect_failure "'ipfs add' with an unregistered hash and wrapped leaves fails without crashing" '
ipfs add --hash poseidon-bls12_381-a2-fc1 --raw-leaves=false -r mountdir/planets
test_expect_success "'ipfs add' with an unregistered hash and wrapped leaves fails without crashing" '
test_expect_code 1 ipfs add --hash poseidon-bls12_381-a2-fc1 --raw-leaves=false -r mountdir/planets
'
}

View File

@ -3,6 +3,9 @@
# Copyright (c) 2016 Jeromy Johnson
# MIT Licensed; see the LICENSE file in this repository.
#
# NOTE: This is a legacy sharness test kept for compatibility.
# New tests for 'ipfs repo verify' should be added to test/cli/repo_verify_test.go
#
test_description="Test ipfs repo fsck"

View File

@ -250,6 +250,5 @@ process_resident_memory_bytes
process_start_time_seconds
process_virtual_memory_bytes
process_virtual_memory_max_bytes
provider_reprovider_provide_count
provider_reprovider_reprovide_count
provider_provides_total
target_info

View File

@ -11,7 +11,7 @@ import (
var CurrentCommit string
// CurrentVersionNumber is the current application's version literal.
const CurrentVersionNumber = "0.38.2"
const CurrentVersionNumber = "0.39.0"
const ApiVersion = "/kubo/" + CurrentVersionNumber + "/" //nolint