Merge remote-tracking branch 'origin/master' into feat/ipip-0524-codec-conversion-config

# Conflicts:
#	.github/workflows/gateway-conformance.yml
#	docs/changelogs/v0.40.md
#	docs/examples/kubo-as-a-library/go.mod
#	docs/examples/kubo-as-a-library/go.sum
#	go.mod
#	go.sum
#	test/dependencies/go.mod
#	test/dependencies/go.sum
This commit is contained in:
Marcin Rataj 2026-02-05 19:41:33 +01:00
commit d749ea4a25
139 changed files with 9724 additions and 2773 deletions

View File

@ -1,6 +1,45 @@
# Dependabot PRs are auto-tidied by .github/workflows/dependabot-tidy.yml
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "monthly"
open-pull-requests-limit: 10
labels:
- "dependencies"
ignore:
# Updated via go-ds-* wrappers in ipfs-ecosystem group
- dependency-name: "github.com/cockroachdb/pebble*"
- dependency-name: "github.com/syndtr/goleveldb"
- dependency-name: "github.com/dgraph-io/badger*"
groups:
ipfs-ecosystem:
patterns:
- "github.com/ipfs/*"
- "github.com/ipfs-shipyard/*"
- "github.com/ipshipyard/*"
- "github.com/multiformats/*"
- "github.com/ipld/*"
libp2p-ecosystem:
patterns:
- "github.com/libp2p/*"
golang-x:
patterns:
- "golang.org/x/*"
opentelemetry:
patterns:
- "go.opentelemetry.io/*"
prometheus:
patterns:
- "github.com/prometheus/*"
- "contrib.go.opencensus.io/*"
- "go.opencensus.io"
uber:
patterns:
- "go.uber.org/*"

61
.github/workflows/dependabot-tidy.yml vendored Normal file
View File

@ -0,0 +1,61 @@
# Dependabot only updates go.mod/go.sum in the root module, but this repo has
# multiple Go modules (see docs/examples/). This workflow runs `make mod_tidy`
# on Dependabot PRs to keep all go.sum files in sync, preventing go-check CI
# failures.
name: Dependabot Tidy
on:
pull_request_target:
types: [opened, synchronize]
workflow_dispatch:
inputs:
pr_number:
description: 'PR number to run mod_tidy on'
required: true
type: number
permissions:
contents: write
pull-requests: write
jobs:
tidy:
if: github.actor == 'dependabot[bot]' || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
steps:
- name: Get PR info
id: pr
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
pr_number="${{ inputs.pr_number }}"
else
pr_number="${{ github.event.pull_request.number }}"
fi
echo "number=$pr_number" >> $GITHUB_OUTPUT
branch=$(gh pr view "$pr_number" --repo "${{ github.repository }}" --json headRefName -q '.headRefName')
echo "branch=$branch" >> $GITHUB_OUTPUT
- uses: actions/checkout@v6
with:
ref: ${{ steps.pr.outputs.branch }}
token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
- name: Run make mod_tidy
run: make mod_tidy
- name: Check for changes
id: git-check
run: |
if [[ -n $(git status --porcelain) ]]; then
echo "modified=true" >> $GITHUB_OUTPUT
fi
- name: Commit changes
if: steps.git-check.outputs.modified == 'true'
run: |
git config user.name "github-actions[bot]"
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
git add -A
git commit -m "chore: run make mod_tidy"
git push

View File

@ -41,7 +41,7 @@ jobs:
steps:
# 1. Download the gateway-conformance fixtures
- name: Download gateway-conformance fixtures
uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@376504c31aae5e2d47c23cb1e131a7573a7e3a7f # TODO: switch to release tag once https://github.com/ipfs/gateway-conformance/pull/254 ships
uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@e17586f4cccdd0f93f19a68b66a25e07e03731f6 # TODO: switch to release tag once https://github.com/ipfs/gateway-conformance/pull/254 ships
with:
output: fixtures
@ -93,7 +93,7 @@ jobs:
# 6. Run the gateway-conformance tests
- name: Run gateway-conformance tests
uses: ipfs/gateway-conformance/.github/actions/test@376504c31aae5e2d47c23cb1e131a7573a7e3a7f # TODO: switch to release tag once https://github.com/ipfs/gateway-conformance/pull/254 ships
uses: ipfs/gateway-conformance/.github/actions/test@e17586f4cccdd0f93f19a68b66a25e07e03731f6 # TODO: switch to release tag once https://github.com/ipfs/gateway-conformance/pull/254 ships
with:
gateway-url: http://127.0.0.1:8080
subdomain-url: http://localhost:8080
@ -109,13 +109,13 @@ jobs:
run: cat output.md >> $GITHUB_STEP_SUMMARY
- name: Upload HTML report
if: failure() || success()
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: gateway-conformance.html
path: output.html
- name: Upload JSON report
if: failure() || success()
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: gateway-conformance.json
path: output.json
@ -127,7 +127,7 @@ jobs:
steps:
# 1. Download the gateway-conformance fixtures
- name: Download gateway-conformance fixtures
uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@376504c31aae5e2d47c23cb1e131a7573a7e3a7f # TODO: switch to release tag once https://github.com/ipfs/gateway-conformance/pull/254 ships
uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@e17586f4cccdd0f93f19a68b66a25e07e03731f6 # TODO: switch to release tag once https://github.com/ipfs/gateway-conformance/pull/254 ships
with:
output: fixtures
@ -199,7 +199,7 @@ jobs:
# 9. Run the gateway-conformance tests over libp2p
- name: Run gateway-conformance tests over libp2p
uses: ipfs/gateway-conformance/.github/actions/test@376504c31aae5e2d47c23cb1e131a7573a7e3a7f # TODO: switch to release tag once https://github.com/ipfs/gateway-conformance/pull/254 ships
uses: ipfs/gateway-conformance/.github/actions/test@e17586f4cccdd0f93f19a68b66a25e07e03731f6 # TODO: switch to release tag once https://github.com/ipfs/gateway-conformance/pull/254 ships
with:
gateway-url: http://127.0.0.1:8092
args: --specs "trustless-gateway,-trustless-ipns-gateway" -skip 'TestGatewayCar/GET_response_for_application/vnd.ipld.car/Header_Content-Length'
@ -214,13 +214,13 @@ jobs:
run: cat output.md >> $GITHUB_STEP_SUMMARY
- name: Upload HTML report
if: failure() || success()
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: gateway-conformance-libp2p.html
path: output.html
- name: Upload JSON report
if: failure() || success()
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: gateway-conformance-libp2p.json
path: output.json

View File

@ -14,11 +14,13 @@ concurrency:
cancel-in-progress: true
jobs:
go-test:
# Unit tests with coverage collection (uploaded to Codecov)
unit-tests:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
timeout-minutes: 20
timeout-minutes: 15
env:
GOTRACEBACK: single # reduce noise on test timeout panics
TEST_DOCKER: 0
TEST_FUSE: 0
TEST_VERBOSE: 1
@ -36,41 +38,18 @@ jobs:
go-version-file: 'go.mod'
- name: Install missing tools
run: sudo apt update && sudo apt install -y zsh
- name: 👉️ If this step failed, go to «Summary» (top left) → inspect the «Failures/Errors» table
env:
# increasing parallelism beyond 2 doesn't speed up the tests much
PARALLEL: 2
- name: Run unit tests
run: |
make -j "$PARALLEL" test/unit/gotest.junit.xml &&
make test_unit &&
[[ ! $(jq -s -c 'map(select(.Action == "fail")) | .[]' test/unit/gotest.json) ]]
- name: Upload coverage to Codecov
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
if: failure() || success()
with:
name: unittests
files: coverage/unit_tests.coverprofile
- name: Test kubo-as-a-library example
run: |
# we want to first test with the kubo version in the go.mod file
go test -v ./...
# we also want to test the examples against the current version of kubo
# however, that version might be in a fork so we need to replace the dependency
# backup the go.mod and go.sum files to restore them after we run the tests
cp go.mod go.mod.bak
cp go.sum go.sum.bak
# make sure the examples run against the current version of kubo
go mod edit -replace github.com/ipfs/kubo=./../../..
go mod tidy
go test -v ./...
# restore the go.mod and go.sum files to their original state
mv go.mod.bak go.mod
mv go.sum.bak go.sum
working-directory: docs/examples/kubo-as-a-library
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: false
- name: Create a proper JUnit XML report
uses: ipdxco/gotest-json-to-junit-xml@v1
with:
@ -78,9 +57,9 @@ jobs:
output: test/unit/gotest.junit.xml
if: failure() || success()
- name: Archive the JUnit XML report
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: unit
name: unit-tests-junit
path: test/unit/gotest.junit.xml
if: failure() || success()
- name: Create a HTML report
@ -91,9 +70,9 @@ jobs:
output: test/unit/gotest.html
if: failure() || success()
- name: Archive the HTML report
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: html
name: unit-tests-html
path: test/unit/gotest.html
if: failure() || success()
- name: Create a Markdown report
@ -106,3 +85,86 @@ jobs:
- name: Set the summary
run: cat test/unit/gotest.md >> $GITHUB_STEP_SUMMARY
if: failure() || success()
# End-to-end integration/regression tests from test/cli
# (Go-based replacement for legacy test/sharness shell scripts)
cli-tests:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
timeout-minutes: 15
env:
GOTRACEBACK: single # reduce noise on test timeout panics
TEST_VERBOSE: 1
GIT_PAGER: cat
IPFS_CHECK_RCMGR_DEFAULTS: 1
defaults:
run:
shell: bash
steps:
- name: Check out Kubo
uses: actions/checkout@v6
- name: Set up Go
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
- name: Install missing tools
run: sudo apt update && sudo apt install -y zsh
- name: Run CLI tests
env:
IPFS_PATH: ${{ runner.temp }}/ipfs-test
run: make test_cli
- name: Create JUnit XML report
uses: ipdxco/gotest-json-to-junit-xml@v1
with:
input: test/cli/cli-tests.json
output: test/cli/cli-tests.junit.xml
if: failure() || success()
- name: Archive JUnit XML report
uses: actions/upload-artifact@v6
with:
name: cli-tests-junit
path: test/cli/cli-tests.junit.xml
if: failure() || success()
- name: Create HTML report
uses: ipdxco/junit-xml-to-html@v1
with:
mode: no-frames
input: test/cli/cli-tests.junit.xml
output: test/cli/cli-tests.html
if: failure() || success()
- name: Archive HTML report
uses: actions/upload-artifact@v6
with:
name: cli-tests-html
path: test/cli/cli-tests.html
if: failure() || success()
- name: Create Markdown report
uses: ipdxco/junit-xml-to-html@v1
with:
mode: summary
input: test/cli/cli-tests.junit.xml
output: test/cli/cli-tests.md
if: failure() || success()
- name: Set summary
run: cat test/cli/cli-tests.md >> $GITHUB_STEP_SUMMARY
if: failure() || success()
# Example tests (kubo-as-a-library)
example-tests:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
timeout-minutes: 5
env:
GOTRACEBACK: single
defaults:
run:
shell: bash
steps:
- name: Check out Kubo
uses: actions/checkout@v6
- name: Set up Go
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
- name: Run example tests
run: make test_examples

View File

@ -1,3 +1,17 @@
# Interoperability Tests
#
# This workflow ensures Kubo remains compatible with the broader IPFS ecosystem.
# It builds Kubo from source, then runs:
#
# 1. helia-interop: Tests compatibility with Helia (JavaScript IPFS implementation)
# using Playwright-based tests from @helia/interop package.
#
# 2. ipfs-webui: Runs E2E tests from ipfs/ipfs-webui repository to verify
# the web interface works correctly with the locally built Kubo binary.
#
# Both jobs use caching to speed up repeated runs (npm dependencies, Playwright
# browsers, and webui build artifacts).
name: Interop
on:
@ -37,7 +51,7 @@ jobs:
with:
go-version-file: 'go.mod'
- run: make build
- uses: actions/upload-artifact@v5
- uses: actions/upload-artifact@v6
with:
name: kubo
path: cmd/ipfs/ipfs
@ -52,23 +66,47 @@ jobs:
- uses: actions/setup-node@v6
with:
node-version: lts/*
- uses: actions/download-artifact@v6
- uses: actions/download-artifact@v7
with:
name: kubo
path: cmd/ipfs
- run: chmod +x cmd/ipfs/ipfs
- run: echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT
id: npm-cache-dir
- uses: actions/cache@v4
with:
path: ${{ steps.npm-cache-dir.outputs.dir }}
key: ${{ runner.os }}-${{ github.job }}-helia-${{ hashFiles('**/package-lock.json') }}
restore-keys: ${{ runner.os }}-${{ github.job }}-helia-
- run: sudo apt update
- run: sudo apt install -y libxkbcommon0 libxdamage1 libgbm1 libpango-1.0-0 libcairo2 # dependencies for playwright
- run: npx --package @helia/interop helia-interop
# Cache node_modules based on latest @helia/interop version from npm registry.
# This ensures we always test against the latest release while still benefiting
# from caching when the version hasn't changed.
- name: Get latest @helia/interop version
id: helia-version
run: echo "version=$(npm view @helia/interop version)" >> $GITHUB_OUTPUT
- name: Cache helia-interop node_modules
uses: actions/cache@v5
id: helia-cache
with:
path: node_modules
key: ${{ runner.os }}-helia-interop-${{ steps.helia-version.outputs.version }}
- name: Install @helia/interop
if: steps.helia-cache.outputs.cache-hit != 'true'
run: npm install @helia/interop
# TODO(IPIP-499): Remove --grep --invert workaround once helia implements IPIP-499
# Tracking issue: https://github.com/ipfs/helia/issues/941
#
# PROVISIONAL HACK: Skip '@helia/mfs - should have the same CID after
# creating a file' test due to IPIP-499 changes in kubo.
#
# WHY IT FAILS: The test creates a 5-byte file in MFS on both kubo and helia,
# then compares the root directory CID. With kubo PR #11148, `ipfs files write`
# now produces raw CIDs for single-block files (matching `ipfs add --raw-leaves`),
# while helia uses `reduceSingleLeafToSelf: false` which keeps the dag-pb wrapper.
# Different file CIDs lead to different directory CIDs.
#
# We run aegir directly (instead of helia-interop binary) because only aegir
# supports the --grep/--invert flags needed to exclude specific tests.
- name: Run helia-interop tests (excluding IPIP-499 incompatible test)
run: npx aegir test -t node --bail -- --grep 'should have the same CID after creating a file' --invert
env:
KUBO_BINARY: ${{ github.workspace }}/cmd/ipfs/ipfs
working-directory: node_modules/@helia/interop
ipfs-webui:
needs: [interop-prep]
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
@ -84,10 +122,7 @@ jobs:
run:
shell: bash
steps:
- uses: actions/setup-node@v6
with:
node-version: 20.x
- uses: actions/download-artifact@v6
- uses: actions/download-artifact@v7
with:
name: kubo
path: cmd/ipfs
@ -96,36 +131,73 @@ jobs:
with:
repository: ipfs/ipfs-webui
path: ipfs-webui
- run: |
echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT
id: npm-cache-dir
- uses: actions/cache@v4
- uses: actions/setup-node@v6
with:
path: ${{ steps.npm-cache-dir.outputs.dir }}
key: ${{ runner.os }}-${{ github.job }}-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-${{ github.job }}-
- env:
NPM_CACHE_DIR: ${{ steps.npm-cache-dir.outputs.dir }}
run: |
npm ci --prefer-offline --no-audit --progress=false --cache "$NPM_CACHE_DIR"
npx playwright install --with-deps
working-directory: ipfs-webui
- id: ref
node-version-file: 'ipfs-webui/.tool-versions'
- id: webui-ref
run: echo "ref=$(git rev-parse --short HEAD)" | tee -a $GITHUB_OUTPUT
working-directory: ipfs-webui
- id: state
- id: webui-state
env:
GITHUB_TOKEN: ${{ github.token }}
ENDPOINT: repos/ipfs/ipfs-webui/commits/${{ steps.ref.outputs.ref }}/status
ENDPOINT: repos/ipfs/ipfs-webui/commits/${{ steps.webui-ref.outputs.ref }}/status
SELECTOR: .state
KEY: state
run: gh api "$ENDPOINT" --jq "$SELECTOR" | xargs -I{} echo "$KEY={}" | tee -a $GITHUB_OUTPUT
- name: Build ipfs-webui@main (state=${{ steps.state.outputs.state }})
# Cache node_modules based on package-lock.json
- name: Cache node_modules
uses: actions/cache@v5
id: node-modules-cache
with:
path: ipfs-webui/node_modules
key: ${{ runner.os }}-webui-node-modules-${{ hashFiles('ipfs-webui/package-lock.json') }}
restore-keys: |
${{ runner.os }}-webui-node-modules-
- name: Install dependencies
if: steps.node-modules-cache.outputs.cache-hit != 'true'
run: npm ci --prefer-offline --no-audit --progress=false
working-directory: ipfs-webui
# Cache Playwright browsers
- name: Cache Playwright browsers
uses: actions/cache@v5
id: playwright-cache
with:
path: ~/.cache/ms-playwright
key: ${{ runner.os }}-playwright-${{ hashFiles('ipfs-webui/package-lock.json') }}
restore-keys: |
${{ runner.os }}-playwright-
# On cache miss: download browsers and install OS dependencies
- name: Install Playwright with dependencies
if: steps.playwright-cache.outputs.cache-hit != 'true'
run: npx playwright install --with-deps
working-directory: ipfs-webui
# On cache hit: only ensure OS dependencies are present (fast, idempotent)
- name: Install Playwright OS dependencies
if: steps.playwright-cache.outputs.cache-hit == 'true'
run: npx playwright install-deps
working-directory: ipfs-webui
# Cache test build output
- name: Cache test build
uses: actions/cache@v5
id: test-build-cache
with:
path: ipfs-webui/build
key: ${{ runner.os }}-webui-build-${{ hashFiles('ipfs-webui/package-lock.json', 'ipfs-webui/src/**', 'ipfs-webui/public/**') }}
restore-keys: |
${{ runner.os }}-webui-build-
- name: Build ipfs-webui@${{ steps.webui-ref.outputs.ref }} (state=${{ steps.webui-state.outputs.state }})
if: steps.test-build-cache.outputs.cache-hit != 'true'
run: npm run test:build
working-directory: ipfs-webui
- name: Test ipfs-webui@main (state=${{ steps.state.outputs.state }}) E2E against the locally built Kubo binary
- name: Test ipfs-webui@${{ steps.webui-ref.outputs.ref }} (state=${{ steps.webui-state.outputs.state }}) E2E against the locally built Kubo binary
run: npm run test:e2e
env:
IPFS_GO_EXEC: ${{ github.workspace }}/cmd/ipfs/ipfs
working-directory: ipfs-webui
- name: Upload test artifacts on failure
if: failure()
uses: actions/upload-artifact@v6
with:
name: webui-test-results
path: ipfs-webui/test-results/
retention-days: 7

View File

@ -32,7 +32,7 @@ jobs:
go-version-file: 'kubo/go.mod'
- name: Install missing tools
run: sudo apt update && sudo apt install -y socat net-tools fish libxml2-utils
- uses: actions/cache@v4
- uses: actions/cache@v5
with:
path: test/sharness/lib/dependencies
key: ${{ runner.os }}-test-generate-junit-html-${{ hashFiles('test/sharness/lib/test-generate-junit-html.sh') }}
@ -55,11 +55,13 @@ jobs:
# increasing parallelism beyond 10 doesn't speed up the tests much
PARALLEL: ${{ github.repository == 'ipfs/kubo' && 10 || 3 }}
- name: Upload coverage report
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
if: failure() || success()
with:
name: sharness
files: kubo/coverage/sharness_tests.coverprofile
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: false
- name: Aggregate results
run: find kubo/test/sharness/test-results -name 't*-*.sh.*.counts' | kubo/test/sharness/lib/sharness/aggregate-results.sh > kubo/test/sharness/test-results/summary.txt
- name: 👉️ If this step failed, go to «Summary» (top left) → «HTML Report» → inspect the «Failures» column
@ -88,7 +90,7 @@ jobs:
destination: sharness.html
- name: Upload one-page HTML report
if: github.repository != 'ipfs/kubo' && (failure() || success())
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: sharness.html
path: kubo/test/sharness/test-results/sharness.html
@ -108,7 +110,7 @@ jobs:
destination: sharness-html/
- name: Upload full HTML report
if: github.repository != 'ipfs/kubo' && (failure() || success())
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: sharness-html
path: kubo/test/sharness/test-results/sharness-html

View File

@ -77,7 +77,7 @@ jobs:
- name: Upload test results
if: always()
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: ${{ matrix.os }}-test-results
path: |

5
.gitignore vendored
View File

@ -28,6 +28,11 @@ go-ipfs-source.tar.gz
docs/examples/go-ipfs-as-a-library/example-folder/Qm*
/test/sharness/t0054-dag-car-import-export-data/*.car
# test artifacts from make test_unit / test_cli
/test/unit/gotest.json
/test/unit/gotest.junit.xml
/test/cli/cli-tests.json
# ignore build output from snapcraft
/ipfs_*.snap
/parts

View File

@ -1,6 +1,10 @@
IPFS as a project, including go-ipfs and all of its modules, follows the [standard IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md).
# Contributing to Kubo
We also adhere to the [GO IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md) which provide additional information of how to collaborate and contribute in the Go implementation of IPFS.
**For development setup, building, and testing, see the [Developer Guide](docs/developer-guide.md).**
IPFS as a project, including Kubo and all of its modules, follows the [standard IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md).
We also adhere to the [Go IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md) which provide additional information on how to collaborate and contribute to the Go implementation of IPFS.
We appreciate your time and attention for going over these. Please open an issue on ipfs/community if you have any questions.

530
README.md
View File

@ -2,7 +2,7 @@
<br>
<a href="https://github.com/ipfs/kubo/blob/master/docs/logo/"><img src="https://user-images.githubusercontent.com/157609/250148884-d6d12db8-fdcf-4be3-8546-2550b69845d8.png" alt="Kubo logo" title="Kubo logo" width="200"></a>
<br>
Kubo: IPFS Implementation in GO
Kubo: IPFS Implementation in Go
<br>
</h1>
@ -11,111 +11,61 @@
<p align="center">
<a href="https://ipfs.tech"><img src="https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square" alt="Official Part of IPFS Project"></a>
<a href="https://discuss.ipfs.tech"><img alt="Discourse Forum" src="https://img.shields.io/discourse/posts?server=https%3A%2F%2Fdiscuss.ipfs.tech"></a>
<a href="https://matrix.to/#/#ipfs-space:ipfs.io"><img alt="Matrix" src="https://img.shields.io/matrix/ipfs-space%3Aipfs.io?server_fqdn=matrix.org"></a>
<a href="https://docs.ipfs.tech/community/"><img alt="Matrix" src="https://img.shields.io/matrix/ipfs-space%3Aipfs.io?server_fqdn=matrix.org"></a>
<a href="https://github.com/ipfs/kubo/actions"><img src="https://img.shields.io/github/actions/workflow/status/ipfs/kubo/gobuild.yml?branch=master"></a>
<a href="https://github.com/ipfs/kubo/releases"><img alt="GitHub release" src="https://img.shields.io/github/v/release/ipfs/kubo?filter=!*rc*"></a>
</p>
<hr />
<p align="center">
<b><a href="#what-is-kubo">What is Kubo?</a></b> | <b><a href="#quick-taste">Quick Taste</a></b> | <b><a href="#install">Install</a></b> | <b><a href="#documentation">Documentation</a></b> | <b><a href="#development">Development</a></b> | <b><a href="#getting-help">Getting Help</a></b>
</p>
## What is Kubo?
Kubo was the first IPFS implementation and is the most widely used one today. Implementing the *Interplanetary Filesystem* - the standard for content-addressing on the Web, interoperable with HTTP. Thus powered by future-proof data models and the libp2p for network communication. Kubo is written in Go.
Kubo was the first [IPFS](https://docs.ipfs.tech/concepts/what-is-ipfs/) implementation and is the [most widely used one today](https://probelab.io/ipfs/topology/#chart-agent-types-avg). It takes an opinionated approach to content-addressing ([CIDs](https://docs.ipfs.tech/concepts/glossary/#cid), [DAGs](https://docs.ipfs.tech/concepts/glossary/#dag)) that maximizes interoperability: [UnixFS](https://docs.ipfs.tech/concepts/glossary/#unixfs) for files and directories, [HTTP Gateways](https://docs.ipfs.tech/concepts/glossary/#gateway) for web browsers, [Bitswap](https://docs.ipfs.tech/concepts/glossary/#bitswap) and [HTTP](https://specs.ipfs.tech/http-gateways/trustless-gateway/) for verifiable data transfer.
Featureset
- Runs an IPFS-Node as a network service that is part of LAN and WAN DHT
- Native support for UnixFS (most popular way to represent files and directories on IPFS)
- [HTTP Gateway](https://specs.ipfs.tech/http-gateways/) (`/ipfs` and `/ipns`) functionality for trusted and [trustless](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) content retrieval
- [HTTP Routing V1](https://specs.ipfs.tech/routing/http-routing-v1/) (`/routing/v1`) client and server implementation for [delegated routing](./docs/delegated-routing.md) lookups
- [HTTP Kubo RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) (`/api/v0`) to access and control the daemon
- [Command Line Interface](https://docs.ipfs.tech/reference/kubo/cli/) based on (`/api/v0`) RPC API
- [WebUI](https://github.com/ipfs/ipfs-webui/#readme) to manage the Kubo node
- [Content blocking](/docs/content-blocking.md) support for operators of public nodes
**Features:**
### Other implementations
- Runs an IPFS node as a network service (LAN [mDNS](https://github.com/libp2p/specs/blob/master/discovery/mdns.md) and WAN [Amino DHT](https://docs.ipfs.tech/concepts/glossary/#dht))
- [Command-line interface](https://docs.ipfs.tech/reference/kubo/cli/) (`ipfs --help`)
- [WebUI](https://github.com/ipfs/ipfs-webui/#readme) for node management
- [HTTP Gateway](https://specs.ipfs.tech/http-gateways/) for trusted and [trustless](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) content retrieval
- [HTTP RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) to control the daemon
- [HTTP Routing V1](https://specs.ipfs.tech/routing/http-routing-v1/) client and server for [delegated routing](./docs/delegated-routing.md)
- [Content blocking](./docs/content-blocking.md) for public node operators
See [List](https://docs.ipfs.tech/basics/ipfs-implementations/)
**Other IPFS implementations:** [Helia](https://github.com/ipfs/helia) (JavaScript), [more...](https://docs.ipfs.tech/concepts/ipfs-implementations/)
## What is IPFS?
## Quick Taste
IPFS is a global, versioned, peer-to-peer filesystem. It combines good ideas from previous systems such as Git, BitTorrent, Kademlia, SFS, and the Web. It is like a single BitTorrent swarm, exchanging git objects. IPFS provides an interface as simple as the HTTP web, but with permanence built-in. You can also mount the world at /ipfs.
After [installing Kubo](#install), verify it works:
For more info see: https://docs.ipfs.tech/concepts/what-is-ipfs/
```console
$ ipfs init
generating ED25519 keypair...done
peer identity: 12D3KooWGcSLQdLDBi2BvoP8WnpdHvhWPbxpGcqkf93rL2XMZK7R
Before opening an issue, consider using one of the following locations to ensure you are opening your thread in the right place:
- kubo (previously named go-ipfs) _implementation_ bugs in [this repo](https://github.com/ipfs/kubo/issues).
- Documentation issues in [ipfs/docs issues](https://github.com/ipfs/ipfs-docs/issues).
- IPFS _design_ in [ipfs/specs issues](https://github.com/ipfs/specs/issues).
- Exploration of new ideas in [ipfs/notes issues](https://github.com/ipfs/notes/issues).
- Ask questions and meet the rest of the community at the [IPFS Forum](https://discuss.ipfs.tech).
- Or [chat with us](https://docs.ipfs.tech/community/chat/).
$ ipfs daemon &
Daemon is ready
[![YouTube Channel Subscribers](https://img.shields.io/youtube/channel/subscribers/UCdjsUXJ3QawK4O5L1kqqsew?label=Subscribe%20IPFS&style=social&cacheSeconds=3600)](https://www.youtube.com/channel/UCdjsUXJ3QawK4O5L1kqqsew) [![Follow @IPFS on Twitter](https://img.shields.io/twitter/follow/IPFS?style=social&cacheSeconds=3600)](https://twitter.com/IPFS)
$ echo "hello IPFS" | ipfs add -q --cid-version 1
bafkreicouv3sksjuzxb3rbb6rziy6duakk2aikegsmtqtz5rsuppjorxsa
## Next milestones
$ ipfs cat bafkreicouv3sksjuzxb3rbb6rziy6duakk2aikegsmtqtz5rsuppjorxsa
hello IPFS
```
[Milestones on GitHub](https://github.com/ipfs/kubo/milestones)
Verify this CID is provided by your node to the IPFS network: <https://check.ipfs.network/?cid=bafkreicouv3sksjuzxb3rbb6rziy6duakk2aikegsmtqtz5rsuppjorxsa>
## Table of Contents
- [What is Kubo?](#what-is-kubo)
- [What is IPFS?](#what-is-ipfs)
- [Next milestones](#next-milestones)
- [Table of Contents](#table-of-contents)
- [Security Issues](#security-issues)
- [Install](#install)
- [Minimal System Requirements](#minimal-system-requirements)
- [Docker](#docker)
- [Official prebuilt binaries](#official-prebuilt-binaries)
- [Updating](#updating)
- [Downloading builds using IPFS](#downloading-builds-using-ipfs)
- [Unofficial Linux packages](#unofficial-linux-packages)
- [ArchLinux](#arch-linux)
- [Gentoo Linux](#gentoo-linux)
- [Nix](#nix)
- [Solus](#solus)
- [openSUSE](#opensuse)
- [Guix](#guix)
- [Snap](#snap)
- [Ubuntu PPA](#ubuntu-ppa)
- [Fedora](#fedora-copr)
- [Unofficial Windows packages](#unofficial-windows-packages)
- [Chocolatey](#chocolatey)
- [Scoop](#scoop)
- [Unofficial MacOS packages](#unofficial-macos-packages)
- [MacPorts](#macports)
- [Nix](#nix-macos)
- [Homebrew](#homebrew)
- [Build from Source](#build-from-source)
- [Install Go](#install-go)
- [Download and Compile IPFS](#download-and-compile-ipfs)
- [Cross Compiling](#cross-compiling)
- [Troubleshooting](#troubleshooting)
- [Getting Started](#getting-started)
- [Usage](#usage)
- [Some things to try](#some-things-to-try)
- [Troubleshooting](#troubleshooting-1)
- [Packages](#packages)
- [Development](#development)
- [Map of Implemented Subsystems](#map-of-implemented-subsystems)
- [CLI, HTTP-API, Architecture Diagram](#cli-http-api-architecture-diagram)
- [Testing](#testing)
- [Development Dependencies](#development-dependencies)
- [Developer Notes](#developer-notes)
- [Maintainer Info](#maintainer-info)
- [Contributing](#contributing)
- [License](#license)
## Security Issues
Please follow [`SECURITY.md`](SECURITY.md).
See `ipfs add --help` for all import options. Ready for more? Follow the [command-line quick start](https://docs.ipfs.tech/how-to/command-line-quick-start/).
## Install
The canonical download instructions for IPFS are over at: https://docs.ipfs.tech/install/. It is **highly recommended** you follow those instructions if you are not interested in working on IPFS development.
Follow the [official installation guide](https://docs.ipfs.tech/install/command-line/), or choose: [prebuilt binary](#official-prebuilt-binaries) | [Docker](#docker) | [package manager](#package-managers) | [from source](#build-from-source).
For production use, Release Docker images (below) are recommended.
Prefer a GUI? Try [IPFS Desktop](https://docs.ipfs.tech/install/ipfs-desktop/) and/or [IPFS Companion](https://docs.ipfs.tech/install/ipfs-companion/).
### Minimal System Requirements
@ -127,388 +77,148 @@ Kubo runs on most Linux, macOS, and Windows systems. For optimal performance, we
> [!CAUTION]
> Systems with less than the recommended memory may experience instability, frequent OOM errors or restarts, and missing data announcement (reprovider window), which can make data fully or partially inaccessible to other peers. Running Kubo on underprovisioned hardware is at your own risk.
### Official Prebuilt Binaries
Download from https://dist.ipfs.tech#kubo or [GitHub Releases](https://github.com/ipfs/kubo/releases/latest).
### Docker
Official images are published at https://hub.docker.com/r/ipfs/kubo/: [![Docker Image Version (latest semver)](https://img.shields.io/docker/v/ipfs/kubo?color=blue&label=kubo%20docker%20image&logo=docker&sort=semver&style=flat-square&cacheSeconds=3600)](https://hub.docker.com/r/ipfs/kubo/)
#### 🟢 Release Images
- These are production grade images. Use them.
- `latest` and [`release`](https://hub.docker.com/r/ipfs/kubo/tags?name=release) tags always point at [the latest stable release](https://github.com/ipfs/kubo/releases/latest). If you use this, remember to `docker pull` periodically to update.
- [`vN.N.N`](https://hub.docker.com/r/ipfs/kubo/tags?name=v) points at a specific [release tag](https://github.com/ipfs/kubo/releases)
#### 🟠 Developer Preview Images
- These tags are used by developers for internal testing, not intended for end users or production use.
- [`master-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-latest) always points at the `HEAD` of the [`master`](https://github.com/ipfs/kubo/commits/master/) branch
- [`master-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-2) points at a specific commit from the `master` branch
Use these for production deployments.
#### 🔴 Internal Staging Images
- We use `staging` for testing arbitrary commits and experimental patches.
- To build image for current HEAD, force push to `staging` via `git push origin HEAD:staging --force`)
- [`staging-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-latest) always points at the `HEAD` of the [`staging`](https://github.com/ipfs/kubo/commits/staging/) branch
- [`staging-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-2) points at a specific commit from the `staging` branch
- `latest` and [`release`](https://hub.docker.com/r/ipfs/kubo/tags?name=release) always point at [the latest stable release](https://github.com/ipfs/kubo/releases/latest)
- [`vN.N.N`](https://hub.docker.com/r/ipfs/kubo/tags?name=v) points at a specific [release tag](https://github.com/ipfs/kubo/releases)
```console
$ docker pull ipfs/kubo:latest
$ docker run --rm -it --net=host ipfs/kubo:latest
```
To [customize your node](https://docs.ipfs.tech/install/run-ipfs-inside-docker/#customizing-your-node),
pass necessary config via `-e` or by mounting scripts in the `/container-init.d`.
To [customize your node](https://docs.ipfs.tech/install/run-ipfs-inside-docker/#customizing-your-node), pass config via `-e` or mount scripts in `/container-init.d`.
Learn more at https://docs.ipfs.tech/install/run-ipfs-inside-docker/
#### 🟠 Developer Preview Images
### Official prebuilt binaries
For internal testing, not intended for production.
The official binaries are published at https://dist.ipfs.tech#kubo:
- [`master-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-latest) points at `HEAD` of [`master`](https://github.com/ipfs/kubo/commits/master/)
- [`master-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-2) points at a specific commit
[![dist.ipfs.tech Downloads](https://img.shields.io/github/v/release/ipfs/kubo?label=dist.ipfs.tech&logo=ipfs&style=flat-square&cacheSeconds=3600)](https://dist.ipfs.tech#kubo)
#### 🔴 Internal Staging Images
From there:
- Click the blue "Download Kubo" on the right side of the page.
- Open/extract the archive.
- Move kubo (`ipfs`) to your path (`install.sh` can do it for you).
For testing arbitrary commits and experimental patches (force push to `staging` branch).
If you are unable to access [dist.ipfs.tech](https://dist.ipfs.tech#kubo), you can also download kubo from:
- this project's GitHub [releases](https://github.com/ipfs/kubo/releases/latest) page
- `/ipns/dist.ipfs.tech` at [dweb.link](https://dweb.link/ipns/dist.ipfs.tech#kubo) gateway
#### Updating
##### Downloading builds using IPFS
List the available versions of Kubo implementation:
```console
$ ipfs cat /ipns/dist.ipfs.tech/kubo/versions
```
Then, to view available builds for a version from the previous command (`$VERSION`):
```console
$ ipfs ls /ipns/dist.ipfs.tech/kubo/$VERSION
```
To download a given build of a version:
```console
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_darwin-amd64.tar.gz # darwin amd64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_darwin-arm64.tar.gz # darwin arm64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_freebsd-amd64.tar.gz # freebsd amd64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-amd64.tar.gz # linux amd64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-riscv64.tar.gz # linux riscv64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-arm64.tar.gz # linux arm64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_windows-amd64.zip # windows amd64 build
```
### Unofficial Linux packages
<a href="https://repology.org/project/kubo/versions">
<img src="https://repology.org/badge/vertical-allrepos/kubo.svg" alt="Packaging status" align="right">
</a>
- [ArchLinux](#arch-linux)
- [Gentoo Linux](#gentoo-linux)
- [Nix](#nix-linux)
- [Solus](#solus)
- [openSUSE](#opensuse)
- [Guix](#guix)
- [Snap](#snap)
- [Ubuntu PPA](#ubuntu-ppa)
- [Fedora](#fedora-copr)
#### Arch Linux
[![kubo via Community Repo](https://img.shields.io/archlinux/v/community/x86_64/kubo?color=1793d1&label=kubo&logo=arch-linux&style=flat-square&cacheSeconds=3600)](https://wiki.archlinux.org/title/IPFS)
```bash
# pacman -S kubo
```
[![kubo-git via AUR](https://img.shields.io/static/v1?label=kubo-git&message=latest%40master&color=1793d1&logo=arch-linux&style=flat-square&cacheSeconds=3600)](https://archlinux.org/packages/kubo/)
#### <a name="gentoo-linux">Gentoo Linux</a>
https://wiki.gentoo.org/wiki/Kubo
```bash
# emerge -a net-p2p/kubo
```
https://packages.gentoo.org/packages/net-p2p/kubo
#### <a name="nix-linux">Nix</a>
With the purely functional package manager [Nix](https://nixos.org/nix/) you can install kubo like this:
```
$ nix-env -i kubo
```
You can also install the Package by using its attribute name, which is also `kubo`.
#### Solus
[Package for Solus](https://dev.getsol.us/source/kubo/repository/master/)
```
$ sudo eopkg install kubo
```
You can also install it through the Solus software center.
#### openSUSE
[Community Package for kubo](https://software.opensuse.org/package/kubo)
#### Guix
[Community Package for kubo](https://packages.guix.gnu.org/search/?query=kubo) is available.
#### Snap
No longer supported, see rationale in [kubo#8688](https://github.com/ipfs/kubo/issues/8688).
#### Ubuntu PPA
[PPA homepage](https://launchpad.net/~twdragon/+archive/ubuntu/ipfs) on Launchpad.
##### Latest Ubuntu (>= 20.04 LTS)
```sh
sudo add-apt-repository ppa:twdragon/ipfs
sudo apt update
sudo apt install ipfs-kubo
```
### Fedora COPR
[`taw00/ipfs-rpm`](https://github.com/taw00/ipfs-rpm)
##### Any Ubuntu version
```sh
sudo su
echo 'deb https://ppa.launchpadcontent.net/twdragon/ipfs/ubuntu <<DISTRO>> main' >> /etc/apt/sources.list.d/ipfs
echo 'deb-src https://ppa.launchpadcontent.net/twdragon/ipfs/ubuntu <<DISTRO>> main' >> /etc/apt/sources.list.d/ipfs
exit
sudo apt update
sudo apt install ipfs-kubo
```
where `<<DISTRO>>` is the codename of your Ubuntu distribution (for example, `jammy` for 22.04 LTS). During the first installation the package maintenance script may automatically ask you about which networking profile, CPU accounting model, and/or existing node configuration file you want to use.
**NOTE**: this method also may work with any compatible Debian-based distro which has `libc6` inside, and APT as a package manager.
### Unofficial Windows packages
- [Chocolatey](#chocolatey)
- [Scoop](#scoop)
#### Chocolatey
No longer supported, see rationale in [kubo#9341](https://github.com/ipfs/kubo/issues/9341).
#### Scoop
Scoop provides kubo as `kubo` in its 'extras' bucket.
```Powershell
PS> scoop bucket add extras
PS> scoop install kubo
```
### Unofficial macOS packages
- [MacPorts](#macports)
- [Nix](#nix-macos)
- [Homebrew](#homebrew)
#### MacPorts
The package [ipfs](https://ports.macports.org/port/ipfs) currently points to kubo and is being maintained.
```
$ sudo port install ipfs
```
#### <a name="nix-macos">Nix</a>
In macOS you can use the purely functional package manager [Nix](https://nixos.org/nix/):
```
$ nix-env -i kubo
```
You can also install the Package by using its attribute name, which is also `kubo`.
#### Homebrew
A Homebrew formula [ipfs](https://formulae.brew.sh/formula/ipfs) is maintained too.
```
$ brew install --formula ipfs
```
- [`staging-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-latest) points at `HEAD` of [`staging`](https://github.com/ipfs/kubo/commits/staging/)
- [`staging-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-2) points at a specific commit
### Build from Source
![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/ipfs/kubo?label=Requires%20Go&logo=go&style=flat-square&cacheSeconds=3600)
kubo's build system requires Go and some standard POSIX build tools:
* GNU make
* Git
* GCC (or some other go compatible C Compiler) (optional)
To build without GCC, build with `CGO_ENABLED=0` (e.g., `make build CGO_ENABLED=0`).
#### Install Go
![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/ipfs/kubo?label=Requires%20Go&logo=go&style=flat-square&cacheSeconds=3600)
If you need to update: [Download latest version of Go](https://golang.org/dl/).
You'll need to add Go's bin directories to your `$PATH` environment variable e.g., by adding these lines to your `/etc/profile` (for a system-wide installation) or `$HOME/.profile`:
```
export PATH=$PATH:/usr/local/go/bin
export PATH=$PATH:$GOPATH/bin
```bash
git clone https://github.com/ipfs/kubo.git
cd kubo
make build # creates cmd/ipfs/ipfs
make install # installs to $GOPATH/bin/ipfs
```
(If you run into trouble, see the [Go install instructions](https://golang.org/doc/install)).
See the [Developer Guide](docs/developer-guide.md) for details, Windows instructions, and troubleshooting.
#### Download and Compile IPFS
### Package Managers
```
$ git clone https://github.com/ipfs/kubo.git
Kubo is available in community-maintained packages across many operating systems, Linux distributions, and package managers. See [Repology](https://repology.org/project/kubo/versions) for the full list: [![Packaging status](https://repology.org/badge/tiny-repos/kubo.svg)](https://repology.org/project/kubo/versions)
$ cd kubo
$ make install
```
> [!WARNING]
> These packages are maintained by third-party volunteers. The IPFS Project and Kubo maintainers are not responsible for their contents or supply chain security. For increased security, [build from source](#build-from-source).
Alternatively, you can run `make build` to build the kubo binary (storing it in `cmd/ipfs/ipfs`) without installing it.
#### Linux
**NOTE:** If you get an error along the lines of "fatal error: stdlib.h: No such file or directory", you're missing a C compiler. Either re-run `make` with `CGO_ENABLED=0` or install GCC.
| Distribution | Install | Version |
|--------------|---------|---------|
| Ubuntu | [PPA](https://launchpad.net/~twdragon/+archive/ubuntu/ipfs): `sudo apt install ipfs-kubo` | [![PPA: twdragon](https://img.shields.io/badge/PPA-twdragon-E95420?logo=ubuntu)](https://launchpad.net/~twdragon/+archive/ubuntu/ipfs) |
| Arch | `pacman -S kubo` | [![Arch package](https://repology.org/badge/version-for-repo/arch/kubo.svg)](https://archlinux.org/packages/extra/x86_64/kubo/) |
| Fedora | [COPR](https://copr.fedorainfracloud.org/coprs/taw/ipfs/): `dnf install kubo` | [![COPR: taw](https://img.shields.io/badge/COPR-taw-51A2DA?logo=fedora)](https://copr.fedorainfracloud.org/coprs/taw/ipfs/) |
| Nix | `nix-env -i kubo` | [![nixpkgs unstable](https://repology.org/badge/version-for-repo/nix_unstable/kubo.svg)](https://search.nixos.org/packages?query=kubo) |
| Gentoo | `emerge -a net-p2p/kubo` | [![Gentoo package](https://repology.org/badge/version-for-repo/gentoo/kubo.svg)](https://packages.gentoo.org/packages/net-p2p/kubo) |
| openSUSE | `zypper install kubo` | [![openSUSE Tumbleweed](https://repology.org/badge/version-for-repo/opensuse_tumbleweed/kubo.svg)](https://software.opensuse.org/package/kubo) |
| Solus | `sudo eopkg install kubo` | [![Solus package](https://repology.org/badge/version-for-repo/solus/kubo.svg)](https://packages.getsol.us/shannon/k/kubo/) |
| Guix | `guix install kubo` | [![Guix package](https://repology.org/badge/version-for-repo/gnuguix/kubo.svg)](https://packages.guix.gnu.org/packages/kubo/) |
| _other_ | [See Repology for the full list](https://repology.org/project/kubo/versions) | |
##### Cross Compiling
~~Snap~~ no longer supported ([#8688](https://github.com/ipfs/kubo/issues/8688))
Compiling for a different platform is as simple as running:
#### macOS
```
make build GOOS=myTargetOS GOARCH=myTargetArchitecture
```
| Manager | Install | Version |
|---------|---------|---------|
| Homebrew | `brew install ipfs` | [![Homebrew](https://repology.org/badge/version-for-repo/homebrew/kubo.svg)](https://formulae.brew.sh/formula/ipfs) |
| MacPorts | `sudo port install ipfs` | [![MacPorts](https://repology.org/badge/version-for-repo/macports/kubo.svg)](https://ports.macports.org/port/ipfs/) |
| Nix | `nix-env -i kubo` | [![nixpkgs unstable](https://repology.org/badge/version-for-repo/nix_unstable/kubo.svg)](https://search.nixos.org/packages?query=kubo) |
| _other_ | [See Repology for the full list](https://repology.org/project/kubo/versions) | |
#### Troubleshooting
#### Windows
- Separate [instructions are available for building on Windows](docs/windows.md).
- `git` is required in order for `go get` to fetch all dependencies.
- Package managers often contain out-of-date `golang` packages.
Ensure that `go version` reports the minimum version required (see go.mod). See above for how to install go.
- If you are interested in development, please install the development
dependencies as well.
- Shell command completions can be generated with one of the `ipfs commands completion` subcommands. Read [docs/command-completion.md](docs/command-completion.md) to learn more.
- See the [misc folder](https://github.com/ipfs/kubo/tree/master/misc) for how to connect IPFS to systemd or whatever init system your distro uses.
| Manager | Install | Version |
|---------|---------|---------|
| Scoop | `scoop install kubo` | [![Scoop](https://repology.org/badge/version-for-repo/scoop/kubo.svg)](https://scoop.sh/#/apps?q=kubo) |
| _other_ | [See Repology for the full list](https://repology.org/project/kubo/versions) | |
## Getting Started
~~Chocolatey~~ no longer supported ([#9341](https://github.com/ipfs/kubo/issues/9341))
### Usage
## Documentation
[![docs: Command-line quick start](https://img.shields.io/static/v1?label=docs&message=Command-line%20quick%20start&color=blue&style=flat-square&cacheSeconds=3600)](https://docs.ipfs.tech/how-to/command-line-quick-start/)
[![docs: Command-line reference](https://img.shields.io/static/v1?label=docs&message=Command-line%20reference&color=blue&style=flat-square&cacheSeconds=3600)](https://docs.ipfs.tech/reference/kubo/cli/)
To start using IPFS, you must first initialize IPFS's config files on your
system, this is done with `ipfs init`. See `ipfs init --help` for information on
the optional arguments it takes. After initialization is complete, you can use
`ipfs mount`, `ipfs add` and any of the other commands to explore!
For detailed configuration options, see [docs/config.md](https://github.com/ipfs/kubo/blob/master/docs/config.md).
### Some things to try
Basic proof of 'ipfs working' locally:
echo "hello world" > hello
ipfs add hello
# This should output a hash string that looks something like:
# QmT78zSuBmuS4z925WZfrqQ1qHaJ56DQaTfyMUF7F8ff5o
ipfs cat <that hash>
### HTTP/RPC clients
For programmatic interaction with Kubo, see our [list of HTTP/RPC clients](docs/http-rpc-clients.md).
### Troubleshooting
If you have previously installed IPFS before and you are running into problems getting a newer version to work, try deleting (or backing up somewhere else) your IPFS config directory (~/.ipfs by default) and rerunning `ipfs init`. This will reinitialize the config file to its defaults and clear out the local datastore of any bad entries.
For more information about configuration options, see [docs/config.md](https://github.com/ipfs/kubo/blob/master/docs/config.md).
Please direct general questions and help requests to our [forums](https://discuss.ipfs.tech).
If you believe you've found a bug, check the [issues list](https://github.com/ipfs/kubo/issues) and, if you don't see your problem there, either come talk to us on [Matrix chat](https://docs.ipfs.tech/community/chat/), or file an issue of your own!
## Packages
See [IPFS in GO](https://docs.ipfs.tech/reference/go/api/) documentation.
| Topic | Description |
|-------|-------------|
| [Configuration](docs/config.md) | All config options reference |
| [Environment variables](docs/environment-variables.md) | Runtime settings via env vars |
| [Experimental features](docs/experimental-features.md) | Opt-in features in development |
| [HTTP Gateway](docs/gateway.md) | Path, subdomain, and trustless gateway setup |
| [HTTP RPC clients](docs/http-rpc-clients.md) | Client libraries for Go, JS |
| [Delegated routing](docs/delegated-routing.md) | Multi-router and HTTP routing |
| [Metrics & monitoring](docs/metrics.md) | Prometheus metrics |
| [Content blocking](docs/content-blocking.md) | Denylist for public nodes |
| [Customizing](docs/customizing.md) | Unsure if use Plugins, Boxo, or fork? |
| [Debug guide](docs/debug-guide.md) | CPU profiles, memory analysis, tracing |
| [Changelogs](docs/changelogs/) | Release notes for each version |
| [All documentation](https://github.com/ipfs/kubo/tree/master/docs) | Full list of docs |
## Development
Some places to get you started on the codebase:
See the [Developer Guide](docs/developer-guide.md) for build instructions, testing, and contribution workflow.
- Main file: [./cmd/ipfs/main.go](https://github.com/ipfs/kubo/blob/master/cmd/ipfs/main.go)
- CLI Commands: [./core/commands/](https://github.com/ipfs/kubo/tree/master/core/commands)
- Bitswap (the data trading engine): [go-bitswap](https://github.com/ipfs/go-bitswap)
- libp2p
- libp2p: https://github.com/libp2p/go-libp2p
- DHT: https://github.com/libp2p/go-libp2p-kad-dht
- [IPFS : The `Add` command demystified](https://github.com/ipfs/kubo/tree/master/docs/add-code-flow.md)
## Getting Help
### Map of Implemented Subsystems
**WIP**: This is a high-level architecture diagram of the various sub-systems of this specific implementation. To be updated with how they interact. Anyone who has suggestions is welcome to comment [here](https://docs.google.com/drawings/d/1OVpBT2q-NtSJqlPX3buvjYhOnWfdzb85YEsM_njesME/edit) on how we can improve this!
<img src="https://docs.google.com/drawings/d/e/2PACX-1vS_n1FvSu6mdmSirkBrIIEib2gqhgtatD9awaP2_WdrGN4zTNeg620XQd9P95WT-IvognSxIIdCM5uE/pub?w=1446&amp;h=1036">
- [IPFS Forum](https://discuss.ipfs.tech) - community support, questions, and discussion
- [Community](https://docs.ipfs.tech/community/) - chat, events, and working groups
- [GitHub Issues](https://github.com/ipfs/kubo/issues) - bug reports for Kubo specifically
- [IPFS Docs Issues](https://github.com/ipfs/ipfs-docs/issues) - documentation issues
### CLI, HTTP-API, Architecture Diagram
![](./docs/cli-http-api-core-diagram.png)
> [Origin](https://github.com/ipfs/pm/pull/678#discussion_r210410924)
Description: Dotted means "likely going away". The "Legacy" parts are thin wrappers around some commands to translate between the new system and the old system. The grayed-out parts on the "daemon" diagram are there to show that the code is all the same, it's just that we turn some pieces on and some pieces off depending on whether we're running on the client or the server.
### Testing
```
make test
```
### Development Dependencies
If you make changes to the protocol buffers, you will need to install the [protoc compiler](https://github.com/google/protobuf).
### Developer Notes
Find more documentation for developers on [docs](./docs)
## Maintainer Info
Kubo is maintained by [Shipyard](https://ipshipyard.com/).
* This repository is part of [Shipyard's GO Triage triage](https://ipshipyard.notion.site/IPFS-Go-Triage-Boxo-Kubo-Rainbow-0ddee6b7f28d412da7dabe4f9107c29a).
* [Release Process](https://ipshipyard.notion.site/Kubo-Release-Process-6dba4f5755c9458ab5685eeb28173778)
## Security Issues
See [`SECURITY.md`](SECURITY.md).
## Contributing
[![](https://cdn.rawgit.com/jbenet/contribute-ipfs-gif/master/img/contribute.gif)](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md)
We ❤️ all [our contributors](docs/AUTHORS); this project wouldnt be what it is without you! If you want to help out, please see [CONTRIBUTING.md](CONTRIBUTING.md).
We welcome contributions. See [CONTRIBUTING.md](CONTRIBUTING.md) and the [Developer Guide](docs/developer-guide.md).
This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md).
This repository follows the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md).
Members of IPFS community provide Kubo support on [discussion forum category here](https://discuss.ipfs.tech/c/help/help-kubo/23).
## Maintainer Info
Need help with IPFS itself? Learn where to get help and support at https://ipfs.tech/help.
<a href="https://ipshipyard.com/"><img align="right" src="https://github.com/user-attachments/assets/39ed3504-bb71-47f6-9bf8-cb9a1698f272" /></a>
> [!NOTE]
> Kubo is maintained by the [Shipyard](https://ipshipyard.com/) team.
>
> [Release Process](https://ipshipyard.notion.site/Kubo-Release-Process-6dba4f5755c9458ab5685eeb28173778)
## License
This project is dual-licensed under Apache 2.0 and MIT terms:
Dual-licensed under Apache 2.0 and MIT:
- Apache License, Version 2.0, ([LICENSE-APACHE](https://github.com/ipfs/kubo/blob/master/LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
- MIT license ([LICENSE-MIT](https://github.com/ipfs/kubo/blob/master/LICENSE-MIT) or http://opensource.org/licenses/MIT)
- [LICENSE-APACHE](LICENSE-APACHE)
- [LICENSE-MIT](LICENSE-MIT)

View File

@ -134,15 +134,14 @@ help:
@echo ''
@echo 'TESTING TARGETS:'
@echo ''
@echo ' test - Run all tests'
@echo ' test_short - Run short go tests and short sharness tests'
@echo ' test_go_short - Run short go tests'
@echo ' test_go_test - Run all go tests'
@echo ' test - Run all tests (test_go_fmt, test_unit, test_cli, test_sharness)'
@echo ' test_short - Run fast tests (test_go_fmt, test_unit)'
@echo ' test_unit - Run unit tests with coverage (excludes test/cli)'
@echo ' test_cli - Run CLI integration tests (requires built binary)'
@echo ' test_go_fmt - Check Go source formatting'
@echo ' test_go_build - Build kubo for all platforms from .github/build-platforms.yml'
@echo ' test_go_expensive - Run all go tests and build all platforms'
@echo ' test_go_race - Run go tests with the race detector enabled'
@echo ' test_go_lint - Run the `golangci-lint` vetting tool'
@echo ' test_go_lint - Run golangci-lint'
@echo ' test_sharness - Run sharness tests'
@echo ' coverage - Collects coverage info from unit tests and sharness'
@echo ' coverage - Collect coverage info from unit tests and sharness'
@echo
.PHONY: help

View File

@ -101,7 +101,7 @@ func (api *KeyAPI) List(ctx context.Context) ([]iface.Key, error) {
var out struct {
Keys []keyOutput
}
if err := api.core().Request("key/list").Exec(ctx, &out); err != nil {
if err := api.core().Request("key/ls").Exec(ctx, &out); err != nil {
return nil, err
}

View File

@ -181,8 +181,8 @@ Headers.
cmds.BoolOption(enableGCKwd, "Enable automatic periodic repo garbage collection"),
cmds.BoolOption(adjustFDLimitKwd, "Check and raise file descriptor limits if needed").WithDefault(true),
cmds.BoolOption(migrateKwd, "If true, assume yes at the migrate prompt. If false, assume no."),
cmds.BoolOption(enablePubSubKwd, "DEPRECATED"),
cmds.BoolOption(enableIPNSPubSubKwd, "Enable IPNS over pubsub. Implicitly enables pubsub, overrides Ipns.UsePubsub config."),
cmds.BoolOption(enablePubSubKwd, "DEPRECATED CLI flag. Use Pubsub.Enabled config instead."),
cmds.BoolOption(enableIPNSPubSubKwd, "DEPRECATED CLI flag. Use Ipns.UsePubsub config instead."),
cmds.BoolOption(enableMultiplexKwd, "DEPRECATED"),
cmds.StringOption(agentVersionSuffix, "Optional suffix to the AgentVersion presented by `ipfs id` and exposed via libp2p identify protocol."),
@ -397,10 +397,14 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
fmt.Printf("PeerID: %s\n", cfg.Identity.PeerID)
if !psSet {
if psSet {
log.Error("The --enable-pubsub-experiment flag is deprecated. Use Pubsub.Enabled config option instead.")
} else {
pubsub = cfg.Pubsub.Enabled.WithDefault(false)
}
if !ipnsPsSet {
if ipnsPsSet {
log.Error("The --enable-namesys-pubsub flag is deprecated. Use Ipns.UsePubsub config option instead.")
} else {
ipnsps = cfg.Ipns.UsePubsub.WithDefault(false)
}
@ -883,23 +887,38 @@ func serveHTTPApi(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error
return nil, fmt.Errorf("serveHTTPApi: ConstructNode() failed: %s", err)
}
// Buffer channel to prevent deadlock when multiple servers write errors simultaneously
errc := make(chan error, len(listeners))
var wg sync.WaitGroup
// Start all servers and wait for them to be ready before writing api file.
// This prevents race conditions where external tools (like systemd path units)
// see the file and try to connect before servers can accept connections.
if len(listeners) > 0 {
// Only add an api file if the API is running.
readyChannels := make([]chan struct{}, len(listeners))
for i, lis := range listeners {
readyChannels[i] = make(chan struct{})
ready := readyChannels[i]
wg.Go(func() {
errc <- corehttp.ServeWithReady(node, manet.NetListener(lis), ready, opts...)
})
}
// Wait for all listeners to be ready or any to fail
for _, ready := range readyChannels {
select {
case <-ready:
// This listener is ready
case err := <-errc:
return nil, fmt.Errorf("serveHTTPApi: %w", err)
}
}
if err := node.Repo.SetAPIAddr(rewriteMaddrToUseLocalhostIfItsAny(listeners[0].Multiaddr())); err != nil {
return nil, fmt.Errorf("serveHTTPApi: SetAPIAddr() failed: %w", err)
}
}
errc := make(chan error)
var wg sync.WaitGroup
for _, apiLis := range listeners {
wg.Add(1)
go func(lis manet.Listener) {
defer wg.Done()
errc <- corehttp.Serve(node, manet.NetListener(lis), opts...)
}(apiLis)
}
go func() {
wg.Wait()
close(errc)
@ -1058,26 +1077,42 @@ func serveHTTPGateway(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, e
return nil, fmt.Errorf("serveHTTPGateway: ConstructNode() failed: %s", err)
}
// Buffer channel to prevent deadlock when multiple servers write errors simultaneously
errc := make(chan error, len(listeners))
var wg sync.WaitGroup
// Start all servers and wait for them to be ready before writing gateway file.
// This prevents race conditions where external tools (like systemd path units)
// see the file and try to connect before servers can accept connections.
if len(listeners) > 0 {
readyChannels := make([]chan struct{}, len(listeners))
for i, lis := range listeners {
readyChannels[i] = make(chan struct{})
ready := readyChannels[i]
wg.Go(func() {
errc <- corehttp.ServeWithReady(node, manet.NetListener(lis), ready, opts...)
})
}
// Wait for all listeners to be ready or any to fail
for _, ready := range readyChannels {
select {
case <-ready:
// This listener is ready
case err := <-errc:
return nil, fmt.Errorf("serveHTTPGateway: %w", err)
}
}
addr, err := manet.ToNetAddr(rewriteMaddrToUseLocalhostIfItsAny(listeners[0].Multiaddr()))
if err != nil {
return nil, fmt.Errorf("serveHTTPGateway: manet.ToIP() failed: %w", err)
return nil, fmt.Errorf("serveHTTPGateway: manet.ToNetAddr() failed: %w", err)
}
if err := node.Repo.SetGatewayAddr(addr); err != nil {
return nil, fmt.Errorf("serveHTTPGateway: SetGatewayAddr() failed: %w", err)
}
}
errc := make(chan error)
var wg sync.WaitGroup
for _, lis := range listeners {
wg.Add(1)
go func(lis manet.Listener) {
defer wg.Done()
errc <- corehttp.Serve(node, manet.NetListener(lis), opts...)
}(lis)
}
go func() {
wg.Wait()
close(errc)

3
cmd/ipfswatch/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
ipfswatch
ipfswatch-test-cover
ipfswatch.exe

View File

@ -9,6 +9,7 @@ import (
"os"
"os/signal"
"path/filepath"
"slices"
"syscall"
commands "github.com/ipfs/kubo/commands"
@ -17,6 +18,11 @@ import (
coreapi "github.com/ipfs/kubo/core/coreapi"
corehttp "github.com/ipfs/kubo/core/corehttp"
"github.com/ipfs/kubo/misc/fsutil"
"github.com/ipfs/kubo/plugin"
pluginbadgerds "github.com/ipfs/kubo/plugin/plugins/badgerds"
pluginflatfs "github.com/ipfs/kubo/plugin/plugins/flatfs"
pluginlevelds "github.com/ipfs/kubo/plugin/plugins/levelds"
pluginpebbleds "github.com/ipfs/kubo/plugin/plugins/pebbleds"
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
fsnotify "github.com/fsnotify/fsnotify"
@ -60,6 +66,18 @@ func main() {
}
}
func loadDatastorePlugins(plugins []plugin.Plugin) error {
for _, pl := range plugins {
if pl, ok := pl.(plugin.PluginDatastore); ok {
err := fsrepo.AddDatastoreConfigHandler(pl.DatastoreTypeName(), pl.DatastoreConfigParser())
if err != nil {
return err
}
}
}
return nil
}
func run(ipfsPath, watchPath string) error {
log.Printf("running IPFSWatch on '%s' using repo at '%s'...", watchPath, ipfsPath)
@ -77,6 +95,15 @@ func run(ipfsPath, watchPath string) error {
return err
}
if err = loadDatastorePlugins(slices.Concat(
pluginbadgerds.Plugins,
pluginflatfs.Plugins,
pluginlevelds.Plugins,
pluginpebbleds.Plugins,
)); err != nil {
return err
}
r, err := fsrepo.Open(ipfsPath)
if err != nil {
// TODO handle case: daemon running
@ -123,6 +150,7 @@ func run(ipfsPath, watchPath string) error {
log.Printf("received event: %s", e)
isDir, err := IsDirectory(e.Name)
if err != nil {
log.Println(err)
continue
}
switch e.Op {
@ -193,7 +221,7 @@ func addTree(w *fsnotify.Watcher, root string) error {
return filepath.SkipDir
case isDir:
log.Println(path)
if err := w.Add(path); err != nil {
if err = w.Add(path); err != nil {
return err
}
default:
@ -206,7 +234,10 @@ func addTree(w *fsnotify.Watcher, root string) error {
func IsDirectory(path string) (bool, error) {
fileInfo, err := os.Stat(path)
return fileInfo.IsDir(), err
if err != nil {
return false, err
}
return fileInfo.IsDir(), nil
}
func IsHidden(path string) bool {

View File

@ -16,6 +16,13 @@ type AutoTLS struct {
// Optional, controls if Kubo should add /tls/sni/.../ws listener to every /tcp port if no explicit /ws is defined in Addresses.Swarm
AutoWSS Flag `json:",omitempty"`
// Optional, controls whether to skip network DNS lookups for p2p-forge domains.
// Applies to resolution via DNS.Resolvers, including /dns* multiaddrs in go-libp2p.
// When enabled (default), A/AAAA queries for *.libp2p.direct are resolved
// locally by parsing the IP directly from the hostname, avoiding network I/O.
// Set to false to always use network DNS (useful for debugging).
SkipDNSLookup Flag `json:",omitempty"`
// Optional override of the parent domain that will be used
DomainSuffix *OptionalString `json:",omitempty"`
@ -42,5 +49,6 @@ const (
DefaultCAEndpoint = p2pforge.DefaultCAEndpoint
DefaultAutoWSS = true // requires AutoTLS.Enabled
DefaultAutoTLSShortAddrs = true // requires AutoTLS.Enabled
DefaultAutoTLSSkipDNSLookup = true // skip network DNS for p2p-forge domains
DefaultAutoTLSRegistrationDelay = 1 * time.Hour
)

View File

@ -14,6 +14,7 @@ const (
// Gateway limit defaults from boxo
DefaultRetrievalTimeout = gateway.DefaultRetrievalTimeout
DefaultMaxRequestDuration = gateway.DefaultMaxRequestDuration
DefaultMaxConcurrentRequests = gateway.DefaultMaxConcurrentRequests
DefaultMaxRangeRequestFileSize = 0 // 0 means no limit
)
@ -76,7 +77,7 @@ type Gateway struct {
// AllowCodecConversion enables automatic conversion between codecs when
// the requested format differs from the block's native codec (e.g.,
// converting dag-pb or dag-cbor to dag-json). When disabled, the gateway
// returns 406 Not Acceptable for codec mismatches per IPIP-0524.
// returns 406 Not Acceptable for codec mismatches per IPIP-524.
AllowCodecConversion Flag
// DisableHTMLErrors disables pretty HTML pages when an error occurs. Instead, a `text/plain`
@ -103,6 +104,14 @@ type Gateway struct {
// A value of 0 disables this timeout.
RetrievalTimeout *OptionalDuration `json:",omitempty"`
// MaxRequestDuration is an absolute deadline for the entire request.
// Unlike RetrievalTimeout (which resets on each data write and catches
// stalled transfers), this is a hard limit on the total time a request
// can take. Returns 504 Gateway Timeout when exceeded.
// This protects the gateway from edge cases and slow client attacks.
// A value of 0 uses the default (1 hour).
MaxRequestDuration *OptionalDuration `json:",omitempty"`
// MaxConcurrentRequests limits concurrent HTTP requests handled by the gateway.
// Requests beyond this limit receive 429 Too Many Requests with Retry-After header.
// A value of 0 disables the limit.

View File

@ -2,11 +2,13 @@ package config
import (
"fmt"
"io"
"strconv"
"strings"
chunk "github.com/ipfs/boxo/chunker"
"github.com/ipfs/boxo/ipld/unixfs/importer/helpers"
"github.com/ipfs/boxo/ipld/unixfs/io"
uio "github.com/ipfs/boxo/ipld/unixfs/io"
"github.com/ipfs/boxo/verifcid"
mh "github.com/multiformats/go-multihash"
)
@ -29,29 +31,44 @@ const (
// write-batch. The total size of the batch is limited by
// BatchMaxnodes and BatchMaxSize.
DefaultBatchMaxSize = 100 << 20 // 20MiB
// HAMTSizeEstimation values for Import.UnixFSHAMTDirectorySizeEstimation
HAMTSizeEstimationLinks = "links" // legacy: estimate using link names + CID byte lengths (default)
HAMTSizeEstimationBlock = "block" // full serialized dag-pb block size
HAMTSizeEstimationDisabled = "disabled" // disable HAMT sharding entirely
// DAGLayout values for Import.UnixFSDAGLayout
DAGLayoutBalanced = "balanced" // balanced DAG layout (default)
DAGLayoutTrickle = "trickle" // trickle DAG layout
DefaultUnixFSHAMTDirectorySizeEstimation = HAMTSizeEstimationLinks // legacy behavior
DefaultUnixFSDAGLayout = DAGLayoutBalanced // balanced DAG layout
DefaultUnixFSIncludeEmptyDirs = true // include empty directories
)
var (
DefaultUnixFSFileMaxLinks = int64(helpers.DefaultLinksPerBlock)
DefaultUnixFSDirectoryMaxLinks = int64(0)
DefaultUnixFSHAMTDirectoryMaxFanout = int64(io.DefaultShardWidth)
DefaultUnixFSHAMTDirectoryMaxFanout = int64(uio.DefaultShardWidth)
)
// Import configures the default options for ingesting data. This affects commands
// that ingest data, such as 'ipfs add', 'ipfs dag put, 'ipfs block put', 'ipfs files write'.
type Import struct {
CidVersion OptionalInteger
UnixFSRawLeaves Flag
UnixFSChunker OptionalString
HashFunction OptionalString
UnixFSFileMaxLinks OptionalInteger
UnixFSDirectoryMaxLinks OptionalInteger
UnixFSHAMTDirectoryMaxFanout OptionalInteger
UnixFSHAMTDirectorySizeThreshold OptionalBytes
BatchMaxNodes OptionalInteger
BatchMaxSize OptionalInteger
FastProvideRoot Flag
FastProvideWait Flag
CidVersion OptionalInteger
UnixFSRawLeaves Flag
UnixFSChunker OptionalString
HashFunction OptionalString
UnixFSFileMaxLinks OptionalInteger
UnixFSDirectoryMaxLinks OptionalInteger
UnixFSHAMTDirectoryMaxFanout OptionalInteger
UnixFSHAMTDirectorySizeThreshold OptionalBytes
UnixFSHAMTDirectorySizeEstimation OptionalString // "links", "block", or "disabled"
UnixFSDAGLayout OptionalString // "balanced" or "trickle"
BatchMaxNodes OptionalInteger
BatchMaxSize OptionalInteger
FastProvideRoot Flag
FastProvideWait Flag
}
// ValidateImportConfig validates the Import configuration according to UnixFS spec requirements.
@ -129,6 +146,30 @@ func ValidateImportConfig(cfg *Import) error {
}
}
// Validate UnixFSHAMTDirectorySizeEstimation
if !cfg.UnixFSHAMTDirectorySizeEstimation.IsDefault() {
est := cfg.UnixFSHAMTDirectorySizeEstimation.WithDefault(DefaultUnixFSHAMTDirectorySizeEstimation)
switch est {
case HAMTSizeEstimationLinks, HAMTSizeEstimationBlock, HAMTSizeEstimationDisabled:
// valid
default:
return fmt.Errorf("Import.UnixFSHAMTDirectorySizeEstimation must be %q, %q, or %q, got %q",
HAMTSizeEstimationLinks, HAMTSizeEstimationBlock, HAMTSizeEstimationDisabled, est)
}
}
// Validate UnixFSDAGLayout
if !cfg.UnixFSDAGLayout.IsDefault() {
layout := cfg.UnixFSDAGLayout.WithDefault(DefaultUnixFSDAGLayout)
switch layout {
case DAGLayoutBalanced, DAGLayoutTrickle:
// valid
default:
return fmt.Errorf("Import.UnixFSDAGLayout must be %q or %q, got %q",
DAGLayoutBalanced, DAGLayoutTrickle, layout)
}
}
return nil
}
@ -144,8 +185,7 @@ func isValidChunker(chunker string) bool {
}
// Check for size-<bytes> format
if strings.HasPrefix(chunker, "size-") {
sizeStr := strings.TrimPrefix(chunker, "size-")
if sizeStr, ok := strings.CutPrefix(chunker, "size-"); ok {
if sizeStr == "" {
return false
}
@ -167,7 +207,7 @@ func isValidChunker(chunker string) bool {
// Parse and validate min, avg, max values
values := make([]int, 3)
for i := 0; i < 3; i++ {
for i := range 3 {
val, err := strconv.Atoi(parts[i+1])
if err != nil {
return false
@ -182,3 +222,41 @@ func isValidChunker(chunker string) bool {
return false
}
// HAMTSizeEstimationMode returns the boxo SizeEstimationMode based on the config value.
func (i *Import) HAMTSizeEstimationMode() uio.SizeEstimationMode {
switch i.UnixFSHAMTDirectorySizeEstimation.WithDefault(DefaultUnixFSHAMTDirectorySizeEstimation) {
case HAMTSizeEstimationLinks:
return uio.SizeEstimationLinks
case HAMTSizeEstimationBlock:
return uio.SizeEstimationBlock
case HAMTSizeEstimationDisabled:
return uio.SizeEstimationDisabled
default:
return uio.SizeEstimationLinks
}
}
// UnixFSSplitterFunc returns a SplitterGen function based on Import.UnixFSChunker.
// The returned function creates a Splitter for the configured chunking strategy.
// The chunker string is parsed once when this method is called, not on each use.
func (i *Import) UnixFSSplitterFunc() chunk.SplitterGen {
chunkerStr := i.UnixFSChunker.WithDefault(DefaultUnixFSChunker)
// Parse size-based chunker (most common case) and return optimized generator
if sizeStr, ok := strings.CutPrefix(chunkerStr, "size-"); ok {
if size, err := strconv.ParseInt(sizeStr, 10, 64); err == nil && size > 0 {
return chunk.SizeSplitterGen(size)
}
}
// For other chunker types (rabin, buzhash) or invalid config,
// fall back to parsing per-use (these are rare cases)
return func(r io.Reader) chunk.Splitter {
s, err := chunk.FromString(r, chunkerStr)
if err != nil {
return chunk.DefaultSplitter(r)
}
return s
}
}

View File

@ -4,6 +4,7 @@ import (
"strings"
"testing"
"github.com/ipfs/boxo/ipld/unixfs/io"
mh "github.com/multiformats/go-multihash"
)
@ -406,3 +407,104 @@ func TestIsPowerOfTwo(t *testing.T) {
})
}
}
func TestValidateImportConfig_HAMTSizeEstimation(t *testing.T) {
tests := []struct {
name string
value string
wantErr bool
errMsg string
}{
{name: "valid links", value: HAMTSizeEstimationLinks, wantErr: false},
{name: "valid block", value: HAMTSizeEstimationBlock, wantErr: false},
{name: "valid disabled", value: HAMTSizeEstimationDisabled, wantErr: false},
{name: "invalid unknown", value: "unknown", wantErr: true, errMsg: "must be"},
{name: "invalid empty", value: "", wantErr: true, errMsg: "must be"},
{name: "invalid typo", value: "link", wantErr: true, errMsg: "must be"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := &Import{
UnixFSHAMTDirectorySizeEstimation: *NewOptionalString(tt.value),
}
err := ValidateImportConfig(cfg)
if tt.wantErr {
if err == nil {
t.Errorf("expected error for value=%q, got nil", tt.value)
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
t.Errorf("error = %v, want error containing %q", err, tt.errMsg)
}
} else {
if err != nil {
t.Errorf("unexpected error for value=%q: %v", tt.value, err)
}
}
})
}
}
func TestValidateImportConfig_DAGLayout(t *testing.T) {
tests := []struct {
name string
value string
wantErr bool
errMsg string
}{
{name: "valid balanced", value: DAGLayoutBalanced, wantErr: false},
{name: "valid trickle", value: DAGLayoutTrickle, wantErr: false},
{name: "invalid unknown", value: "unknown", wantErr: true, errMsg: "must be"},
{name: "invalid empty", value: "", wantErr: true, errMsg: "must be"},
{name: "invalid flat", value: "flat", wantErr: true, errMsg: "must be"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := &Import{
UnixFSDAGLayout: *NewOptionalString(tt.value),
}
err := ValidateImportConfig(cfg)
if tt.wantErr {
if err == nil {
t.Errorf("expected error for value=%q, got nil", tt.value)
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
t.Errorf("error = %v, want error containing %q", err, tt.errMsg)
}
} else {
if err != nil {
t.Errorf("unexpected error for value=%q: %v", tt.value, err)
}
}
})
}
}
func TestImport_HAMTSizeEstimationMode(t *testing.T) {
tests := []struct {
cfg string
want io.SizeEstimationMode
}{
{HAMTSizeEstimationLinks, io.SizeEstimationLinks},
{HAMTSizeEstimationBlock, io.SizeEstimationBlock},
{HAMTSizeEstimationDisabled, io.SizeEstimationDisabled},
{"", io.SizeEstimationLinks}, // default (unset returns default)
{"unknown", io.SizeEstimationLinks}, // fallback to default
}
for _, tt := range tests {
t.Run(tt.cfg, func(t *testing.T) {
var imp Import
if tt.cfg != "" {
imp.UnixFSHAMTDirectorySizeEstimation = *NewOptionalString(tt.cfg)
}
got := imp.HAMTSizeEstimationMode()
if got != tt.want {
t.Errorf("Import.HAMTSizeEstimationMode() with %q = %v, want %v", tt.cfg, got, tt.want)
}
})
}
}

View File

@ -312,45 +312,33 @@ fetching may be degraded.
return nil
},
},
"unixfs-v0-2015": {
Description: `Legacy UnixFS import profile for backward-compatible CID generation.
Produces CIDv0 with no raw leaves, sha2-256, 256 KiB chunks, and
link-based HAMT size estimation. Use only when legacy CIDs are required.
See https://github.com/ipfs/specs/pull/499. Alias: legacy-cid-v0`,
Transform: applyUnixFSv02015,
},
"legacy-cid-v0": {
Description: `Makes UnixFS import produce legacy CIDv0 with no raw leaves, sha2-256 and 256 KiB chunks. This is likely the least optimal preset, use only if legacy behavior is required.`,
Transform: func(c *Config) error {
c.Import.CidVersion = *NewOptionalInteger(0)
c.Import.UnixFSRawLeaves = False
c.Import.UnixFSChunker = *NewOptionalString("size-262144")
c.Import.HashFunction = *NewOptionalString("sha2-256")
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174)
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("256KiB")
return nil
},
Description: `Alias for unixfs-v0-2015 profile.`,
Transform: applyUnixFSv02015,
},
"test-cid-v1": {
Description: `Makes UnixFS import produce CIDv1 with raw leaves, sha2-256 and 1 MiB chunks (max 174 links per file, 256 per HAMT node, switch dir to HAMT above 256KiB).`,
"unixfs-v1-2025": {
Description: `Recommended UnixFS import profile for cross-implementation CID determinism.
Uses CIDv1, raw leaves, sha2-256, 1 MiB chunks, 1024 links per file node,
256 HAMT fanout, and block-based size estimation for HAMT threshold.
See https://github.com/ipfs/specs/pull/499`,
Transform: func(c *Config) error {
c.Import.CidVersion = *NewOptionalInteger(1)
c.Import.UnixFSRawLeaves = True
c.Import.UnixFSChunker = *NewOptionalString("size-1048576")
c.Import.HashFunction = *NewOptionalString("sha2-256")
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174)
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("256KiB")
return nil
},
},
"test-cid-v1-wide": {
Description: `Makes UnixFS import produce CIDv1 with raw leaves, sha2-256 and 1MiB chunks and wider file DAGs (max 1024 links per every node type, switch dir to HAMT above 1MiB).`,
Transform: func(c *Config) error {
c.Import.CidVersion = *NewOptionalInteger(1)
c.Import.UnixFSRawLeaves = True
c.Import.UnixFSChunker = *NewOptionalString("size-1048576") // 1MiB
c.Import.UnixFSChunker = *NewOptionalString("size-1048576") // 1 MiB
c.Import.HashFunction = *NewOptionalString("sha2-256")
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(1024)
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0) // no limit here, use size-based Import.UnixFSHAMTDirectorySizeThreshold instead
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(1024)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("1MiB") // 1MiB
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("256KiB")
c.Import.UnixFSHAMTDirectorySizeEstimation = *NewOptionalString(HAMTSizeEstimationBlock)
c.Import.UnixFSDAGLayout = *NewOptionalString(DAGLayoutBalanced)
return nil
},
},
@ -435,3 +423,18 @@ func mapKeys(m map[string]struct{}) []string {
}
return out
}
// applyUnixFSv02015 applies the legacy UnixFS v0 (2015) import settings.
func applyUnixFSv02015(c *Config) error {
c.Import.CidVersion = *NewOptionalInteger(0)
c.Import.UnixFSRawLeaves = False
c.Import.UnixFSChunker = *NewOptionalString("size-262144") // 256 KiB
c.Import.HashFunction = *NewOptionalString("sha2-256")
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174)
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("256KiB")
c.Import.UnixFSHAMTDirectorySizeEstimation = *NewOptionalString(HAMTSizeEstimationLinks)
c.Import.UnixFSDAGLayout = *NewOptionalString(DAGLayoutBalanced)
return nil
}

View File

@ -15,6 +15,7 @@ import (
"github.com/cheggaaa/pb"
"github.com/ipfs/boxo/files"
uio "github.com/ipfs/boxo/ipld/unixfs/io"
mfs "github.com/ipfs/boxo/mfs"
"github.com/ipfs/boxo/path"
"github.com/ipfs/boxo/verifcid"
@ -68,6 +69,7 @@ const (
mtimeNsecsOptionName = "mtime-nsecs"
fastProvideRootOptionName = "fast-provide-root"
fastProvideWaitOptionName = "fast-provide-wait"
emptyDirsOptionName = "empty-dirs"
)
const (
@ -147,6 +149,18 @@ to find it in the future:
See 'ipfs files --help' to learn more about using MFS
for keeping track of added files and directories.
SYMLINK HANDLING:
By default, symbolic links are preserved as UnixFS symlink nodes that store
the target path. Use --dereference-symlinks to resolve symlinks to their
target content instead:
> ipfs add -r --dereference-symlinks ./mydir
This resolves all symlinks, including CLI arguments and those found inside
directories. Symlinks to files become regular file content, symlinks to
directories are traversed and their contents are added.
CHUNKING EXAMPLES:
The chunker option, '-s', specifies the chunking strategy that dictates
@ -200,11 +214,13 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
Options: []cmds.Option{
// Input Processing
cmds.OptionRecursivePath, // a builtin option that allows recursive paths (-r, --recursive)
cmds.OptionDerefArgs, // a builtin option that resolves passed in filesystem links (--dereference-args)
cmds.OptionDerefArgs, // DEPRECATED: use --dereference-symlinks instead
cmds.OptionStdinName, // a builtin option that optionally allows wrapping stdin into a named file
cmds.OptionHidden,
cmds.OptionIgnore,
cmds.OptionIgnoreRules,
cmds.BoolOption(emptyDirsOptionName, "E", "Include empty directories in the import.").WithDefault(config.DefaultUnixFSIncludeEmptyDirs),
cmds.OptionDerefSymlinks, // resolve symlinks to their target content
// Output Control
cmds.BoolOption(quietOptionName, "q", "Write minimal output."),
cmds.BoolOption(quieterOptionName, "Q", "Write only final hash."),
@ -274,7 +290,7 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
}
progress, _ := req.Options[progressOptionName].(bool)
trickle, _ := req.Options[trickleOptionName].(bool)
trickle, trickleSet := req.Options[trickleOptionName].(bool)
wrap, _ := req.Options[wrapOptionName].(bool)
onlyHash, _ := req.Options[onlyHashOptionName].(bool)
silent, _ := req.Options[silentOptionName].(bool)
@ -285,6 +301,7 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
maxFileLinks, maxFileLinksSet := req.Options[maxFileLinksOptionName].(int)
maxDirectoryLinks, maxDirectoryLinksSet := req.Options[maxDirectoryLinksOptionName].(int)
maxHAMTFanout, maxHAMTFanoutSet := req.Options[maxHAMTFanoutOptionName].(int)
var sizeEstimationMode uio.SizeEstimationMode
nocopy, _ := req.Options[noCopyOptionName].(bool)
fscache, _ := req.Options[fstoreCacheOptionName].(bool)
cidVer, cidVerSet := req.Options[cidVersionOptionName].(int)
@ -312,6 +329,17 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
mtimeNsecs, _ := req.Options[mtimeNsecsOptionName].(uint)
fastProvideRoot, fastProvideRootSet := req.Options[fastProvideRootOptionName].(bool)
fastProvideWait, fastProvideWaitSet := req.Options[fastProvideWaitOptionName].(bool)
emptyDirs, _ := req.Options[emptyDirsOptionName].(bool)
// Note: --dereference-args is deprecated but still works for backwards compatibility.
// The help text marks it as DEPRECATED. Users should use --dereference-symlinks instead,
// which is a superset (resolves both CLI arg symlinks AND nested symlinks in directories).
// Wire --trickle from config
if !trickleSet && !cfg.Import.UnixFSDAGLayout.IsDefault() {
layout := cfg.Import.UnixFSDAGLayout.WithDefault(config.DefaultUnixFSDAGLayout)
trickle = layout == config.DAGLayoutTrickle
}
if chunker == "" {
chunker = cfg.Import.UnixFSChunker.WithDefault(config.DefaultUnixFSChunker)
@ -348,6 +376,9 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
maxHAMTFanout = int(cfg.Import.UnixFSHAMTDirectoryMaxFanout.WithDefault(config.DefaultUnixFSHAMTDirectoryMaxFanout))
}
// SizeEstimationMode is always set from config (no CLI flag)
sizeEstimationMode = cfg.Import.HAMTSizeEstimationMode()
fastProvideRoot = config.ResolveBoolFromConfig(fastProvideRoot, fastProvideRootSet, cfg.Import.FastProvideRoot, config.DefaultFastProvideRoot)
fastProvideWait = config.ResolveBoolFromConfig(fastProvideWait, fastProvideWaitSet, cfg.Import.FastProvideWait, config.DefaultFastProvideWait)
@ -409,6 +440,8 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
options.Unixfs.PreserveMode(preserveMode),
options.Unixfs.PreserveMtime(preserveMtime),
options.Unixfs.IncludeEmptyDirs(emptyDirs),
}
if mode != 0 {
@ -441,6 +474,9 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
opts = append(opts, options.Unixfs.MaxHAMTFanout(maxHAMTFanout))
}
// SizeEstimationMode is always set from config
opts = append(opts, options.Unixfs.SizeEstimationMode(sizeEstimationMode))
if trickle {
opts = append(opts, options.Unixfs.Layout(options.TrickleLayout))
}

View File

@ -98,6 +98,9 @@ var blockGetCmd = &cmds.Command{
'ipfs block get' is a plumbing command for retrieving raw IPFS blocks.
It takes a <cid>, and outputs the block to stdout.
`,
HTTP: &cmds.HTTPHelpText{
ResponseContentType: "application/vnd.ipld.raw",
},
},
Arguments: []cmds.Argument{
@ -119,6 +122,8 @@ It takes a <cid>, and outputs the block to stdout.
return err
}
res.SetEncodingType(cmds.OctetStream)
res.SetContentType("application/vnd.ipld.raw")
return res.Emit(r)
},
}

View File

@ -2,12 +2,14 @@ package cmdutils
import (
"fmt"
"slices"
cmds "github.com/ipfs/go-ipfs-cmds"
"github.com/ipfs/boxo/path"
"github.com/ipfs/go-cid"
coreiface "github.com/ipfs/kubo/core/coreiface"
"github.com/libp2p/go-libp2p/core/peer"
)
const (
@ -84,3 +86,13 @@ func PathOrCidPath(str string) (path.Path, error) {
// Send back original err.
return nil, originalErr
}
// CloneAddrInfo returns a copy of the AddrInfo with a cloned Addrs slice.
// This prevents data races if the sender reuses the backing array.
// See: https://github.com/ipfs/kubo/issues/11116
func CloneAddrInfo(ai peer.AddrInfo) peer.AddrInfo {
return peer.AddrInfo{
ID: ai.ID,
Addrs: slices.Clone(ai.Addrs),
}
}

View File

@ -76,6 +76,9 @@ func TestCommands(t *testing.T) {
"/diag/cmds",
"/diag/cmds/clear",
"/diag/cmds/set-time",
"/diag/datastore",
"/diag/datastore/count",
"/diag/datastore/get",
"/diag/profile",
"/diag/sys",
"/files",
@ -90,6 +93,7 @@ func TestCommands(t *testing.T) {
"/files/stat",
"/files/write",
"/files/chmod",
"/files/chroot",
"/files/touch",
"/filestore",
"/filestore/dups",
@ -102,6 +106,7 @@ func TestCommands(t *testing.T) {
"/key/gen",
"/key/import",
"/key/list",
"/key/ls",
"/key/rename",
"/key/rm",
"/key/rotate",
@ -119,12 +124,14 @@ func TestCommands(t *testing.T) {
"/multibase/transcode",
"/multibase/list",
"/name",
"/name/get",
"/name/inspect",
"/name/publish",
"/name/pubsub",
"/name/pubsub/cancel",
"/name/pubsub/state",
"/name/pubsub/subs",
"/name/put",
"/name/resolve",
"/object",
"/object/data",
@ -169,6 +176,7 @@ func TestCommands(t *testing.T) {
"/pubsub/ls",
"/pubsub/peers",
"/pubsub/pub",
"/pubsub/reset",
"/pubsub/sub",
"/refs",
"/refs/local",

View File

@ -7,6 +7,7 @@ import (
"io"
"path"
"github.com/dustin/go-humanize"
"github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/ipfs/kubo/core/commands/cmdutils"
@ -275,6 +276,9 @@ Note that at present only single root selections / .car files are supported.
The output of blocks happens in strict DAG-traversal, first-seen, order.
CAR file follows the CARv1 format: https://ipld.io/specs/transport/car/carv1/
`,
HTTP: &cmds.HTTPHelpText{
ResponseContentType: "application/vnd.ipld.car",
},
},
Arguments: []cmds.Argument{
cmds.StringArg("root", true, false, "CID of a root to recursively export").EnableStdin(),
@ -349,7 +353,11 @@ type DagStatSummary struct {
}
func (s *DagStatSummary) String() string {
return fmt.Sprintf("Total Size: %d\nUnique Blocks: %d\nShared Size: %d\nRatio: %f", s.TotalSize, s.UniqueBlocks, s.SharedSize, s.Ratio)
return fmt.Sprintf("Total Size: %d (%s)\nUnique Blocks: %d\nShared Size: %d (%s)\nRatio: %f",
s.TotalSize, humanize.Bytes(s.TotalSize),
s.UniqueBlocks,
s.SharedSize, humanize.Bytes(s.SharedSize),
s.Ratio)
}
func (s *DagStatSummary) incrementTotalSize(size uint64) {
@ -384,7 +392,7 @@ Note: This command skips duplicate blocks in reporting both size and the number
cmds.StringArg("root", true, true, "CID of a DAG root to get statistics for").EnableStdin(),
},
Options: []cmds.Option{
cmds.BoolOption(progressOptionName, "p", "Return progressive data while reading through the DAG").WithDefault(true),
cmds.BoolOption(progressOptionName, "p", "Show progress on stderr. Auto-detected if stderr is a terminal."),
},
Run: dagStat,
Type: DagStatSummary{},

View File

@ -73,6 +73,8 @@ func dagExport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment
}()
res.SetEncodingType(cmds.OctetStream)
res.SetContentType("application/vnd.ipld.car")
if err := res.Emit(pipeR); err != nil {
pipeR.Close() // ignore the error if any
return err

View File

@ -5,6 +5,7 @@ import (
"io"
"os"
"github.com/dustin/go-humanize"
mdag "github.com/ipfs/boxo/ipld/merkledag"
"github.com/ipfs/boxo/ipld/merkledag/traverse"
cid "github.com/ipfs/go-cid"
@ -19,7 +20,11 @@ import (
// to compute the new state
func dagStat(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
progressive := req.Options[progressOptionName].(bool)
// Default to true (emit intermediate states) for HTTP/RPC clients that want progress
progressive := true
if val, specified := req.Options[progressOptionName].(bool); specified {
progressive = val
}
api, err := cmdenv.GetApi(env, req)
if err != nil {
return err
@ -84,6 +89,18 @@ func dagStat(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment)
}
func finishCLIStat(res cmds.Response, re cmds.ResponseEmitter) error {
// Determine whether to show progress based on TTY detection or explicit flag
var showProgress bool
val, specified := res.Request().Options[progressOptionName]
if !specified {
// Auto-detect: show progress only if stderr is a TTY
if errStat, err := os.Stderr.Stat(); err == nil {
showProgress = (errStat.Mode() & os.ModeCharDevice) != 0
}
} else {
showProgress = val.(bool)
}
var dagStats *DagStatSummary
for {
v, err := res.Next()
@ -96,17 +113,26 @@ func finishCLIStat(res cmds.Response, re cmds.ResponseEmitter) error {
switch out := v.(type) {
case *DagStatSummary:
dagStats = out
if dagStats.Ratio == 0 {
length := len(dagStats.DagStatsArray)
if length > 0 {
currentStat := dagStats.DagStatsArray[length-1]
fmt.Fprintf(os.Stderr, "CID: %s, Size: %d, NumBlocks: %d\n", currentStat.Cid, currentStat.Size, currentStat.NumBlocks)
// Ratio == 0 means this is a progress update (not final result)
if showProgress && dagStats.Ratio == 0 {
// Sum up total progress across all DAGs being scanned
var totalBlocks int64
var totalSize uint64
for _, stat := range dagStats.DagStatsArray {
totalBlocks += stat.NumBlocks
totalSize += stat.Size
}
fmt.Fprintf(os.Stderr, "Fetched/Processed %d blocks, %d bytes (%s)\r", totalBlocks, totalSize, humanize.Bytes(totalSize))
}
default:
return e.TypeErr(out, v)
}
}
// Clear the progress line before final output
if showProgress {
fmt.Fprint(os.Stderr, "\033[2K\r")
}
return re.Emit(dagStats)
}

View File

@ -1,7 +1,16 @@
package commands
import (
"encoding/hex"
"errors"
"fmt"
"io"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
cmds "github.com/ipfs/go-ipfs-cmds"
oldcmds "github.com/ipfs/kubo/commands"
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
)
var DiagCmd = &cmds.Command{
@ -10,8 +19,182 @@ var DiagCmd = &cmds.Command{
},
Subcommands: map[string]*cmds.Command{
"sys": sysDiagCmd,
"cmds": ActiveReqsCmd,
"profile": sysProfileCmd,
"sys": sysDiagCmd,
"cmds": ActiveReqsCmd,
"profile": sysProfileCmd,
"datastore": diagDatastoreCmd,
},
}
var diagDatastoreCmd = &cmds.Command{
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "Low-level datastore inspection for debugging and testing.",
ShortDescription: `
'ipfs diag datastore' provides low-level access to the datastore for debugging
and testing purposes.
WARNING: FOR DEBUGGING/TESTING ONLY
These commands expose internal datastore details and should not be used
in production workflows. The datastore format may change between versions.
The daemon must not be running when calling these commands.
EXAMPLE
Inspecting pubsub seqno validator state:
$ ipfs diag datastore count /pubsub/seqno/
2
$ ipfs diag datastore get --hex /pubsub/seqno/12D3KooW...
Key: /pubsub/seqno/12D3KooW...
Hex Dump:
00000000 18 81 81 c8 91 c0 ea f6 |........|
`,
},
Subcommands: map[string]*cmds.Command{
"get": diagDatastoreGetCmd,
"count": diagDatastoreCountCmd,
},
}
const diagDatastoreHexOptionName = "hex"
type diagDatastoreGetResult struct {
Key string `json:"key"`
Value []byte `json:"value"`
HexDump string `json:"hex_dump,omitempty"`
}
var diagDatastoreGetCmd = &cmds.Command{
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "Read a raw key from the datastore.",
ShortDescription: `
Returns the value stored at the given datastore key.
Default output is raw bytes. Use --hex for human-readable hex dump.
The daemon must not be running when using this command.
WARNING: FOR DEBUGGING/TESTING ONLY
`,
},
Arguments: []cmds.Argument{
cmds.StringArg("key", true, false, "Datastore key to read (e.g., /pubsub/seqno/<peerid>)"),
},
Options: []cmds.Option{
cmds.BoolOption(diagDatastoreHexOptionName, "Output hex dump instead of raw bytes"),
},
NoRemote: true,
PreRun: DaemonNotRunning,
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
cctx := env.(*oldcmds.Context)
repo, err := fsrepo.Open(cctx.ConfigRoot)
if err != nil {
return fmt.Errorf("failed to open repo: %w", err)
}
defer repo.Close()
keyStr := req.Arguments[0]
key := datastore.NewKey(keyStr)
ds := repo.Datastore()
val, err := ds.Get(req.Context, key)
if err != nil {
if errors.Is(err, datastore.ErrNotFound) {
return fmt.Errorf("key not found: %s", keyStr)
}
return fmt.Errorf("failed to read key: %w", err)
}
result := &diagDatastoreGetResult{
Key: keyStr,
Value: val,
}
if hexDump, _ := req.Options[diagDatastoreHexOptionName].(bool); hexDump {
result.HexDump = hex.Dump(val)
}
return cmds.EmitOnce(res, result)
},
Type: diagDatastoreGetResult{},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, result *diagDatastoreGetResult) error {
if result.HexDump != "" {
fmt.Fprintf(w, "Key: %s\nHex Dump:\n%s", result.Key, result.HexDump)
return nil
}
// Raw bytes output
_, err := w.Write(result.Value)
return err
}),
},
}
type diagDatastoreCountResult struct {
Prefix string `json:"prefix"`
Count int64 `json:"count"`
}
var diagDatastoreCountCmd = &cmds.Command{
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "Count entries matching a datastore prefix.",
ShortDescription: `
Counts the number of datastore entries whose keys start with the given prefix.
The daemon must not be running when using this command.
WARNING: FOR DEBUGGING/TESTING ONLY
`,
},
Arguments: []cmds.Argument{
cmds.StringArg("prefix", true, false, "Datastore key prefix (e.g., /pubsub/seqno/)"),
},
NoRemote: true,
PreRun: DaemonNotRunning,
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
cctx := env.(*oldcmds.Context)
repo, err := fsrepo.Open(cctx.ConfigRoot)
if err != nil {
return fmt.Errorf("failed to open repo: %w", err)
}
defer repo.Close()
prefix := req.Arguments[0]
ds := repo.Datastore()
q := query.Query{
Prefix: prefix,
KeysOnly: true,
}
results, err := ds.Query(req.Context, q)
if err != nil {
return fmt.Errorf("failed to query datastore: %w", err)
}
defer results.Close()
var count int64
for result := range results.Next() {
if result.Error != nil {
return fmt.Errorf("query error: %w", result.Error)
}
count++
}
return cmds.EmitOnce(res, &diagDatastoreCountResult{
Prefix: prefix,
Count: count,
})
},
Type: diagDatastoreCountResult{},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, result *diagDatastoreCountResult) error {
_, err := fmt.Fprintf(w, "%d\n", result.Count)
return err
}),
},
}

View File

@ -1,6 +1,8 @@
package commands
import cmds "github.com/ipfs/go-ipfs-cmds"
import (
cmds "github.com/ipfs/go-ipfs-cmds"
)
func CreateCmdExtras(opts ...func(e *cmds.Extra)) *cmds.Extra {
e := new(cmds.Extra)

View File

@ -16,18 +16,24 @@ import (
"time"
humanize "github.com/dustin/go-humanize"
oldcmds "github.com/ipfs/kubo/commands"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/core"
"github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/ipfs/kubo/core/node"
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
bservice "github.com/ipfs/boxo/blockservice"
bstore "github.com/ipfs/boxo/blockstore"
offline "github.com/ipfs/boxo/exchange/offline"
dag "github.com/ipfs/boxo/ipld/merkledag"
ft "github.com/ipfs/boxo/ipld/unixfs"
uio "github.com/ipfs/boxo/ipld/unixfs/io"
mfs "github.com/ipfs/boxo/mfs"
"github.com/ipfs/boxo/path"
cid "github.com/ipfs/go-cid"
cidenc "github.com/ipfs/go-cidutil/cidenc"
"github.com/ipfs/go-datastore"
cmds "github.com/ipfs/go-ipfs-cmds"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log/v2"
@ -120,18 +126,19 @@ performance.`,
cmds.BoolOption(filesFlushOptionName, "f", "Flush target and ancestors after write.").WithDefault(true),
},
Subcommands: map[string]*cmds.Command{
"read": filesReadCmd,
"write": filesWriteCmd,
"mv": filesMvCmd,
"cp": filesCpCmd,
"ls": filesLsCmd,
"mkdir": filesMkdirCmd,
"stat": filesStatCmd,
"rm": filesRmCmd,
"flush": filesFlushCmd,
"chcid": filesChcidCmd,
"chmod": filesChmodCmd,
"touch": filesTouchCmd,
"read": filesReadCmd,
"write": filesWriteCmd,
"mv": filesMvCmd,
"cp": filesCpCmd,
"ls": filesLsCmd,
"mkdir": filesMkdirCmd,
"stat": filesStatCmd,
"rm": filesRmCmd,
"flush": filesFlushCmd,
"chcid": filesChcidCmd,
"chmod": filesChmodCmd,
"chroot": filesChrootCmd,
"touch": filesTouchCmd,
},
}
@ -493,7 +500,12 @@ being GC'ed.
return err
}
prefix, err := getPrefixNew(req)
cfg, err := nd.Repo.Config()
if err != nil {
return err
}
prefix, err := getPrefixNew(req, &cfg.Import)
if err != nil {
return err
}
@ -544,7 +556,9 @@ being GC'ed.
mkParents, _ := req.Options[filesParentsOptionName].(bool)
if mkParents {
err := ensureContainingDirectoryExists(nd.FilesRoot, dst, prefix)
maxDirLinks := int(cfg.Import.UnixFSDirectoryMaxLinks.WithDefault(config.DefaultUnixFSDirectoryMaxLinks))
sizeEstimationMode := cfg.Import.HAMTSizeEstimationMode()
err := ensureContainingDirectoryExists(nd.FilesRoot, dst, prefix, maxDirLinks, &sizeEstimationMode)
if err != nil {
return err
}
@ -983,9 +997,13 @@ stat' on the file or any of its ancestors.
WARNING:
The CID produced by 'files write' will be different from 'ipfs add' because
'ipfs file write' creates a trickle-dag optimized for append-only operations
'ipfs files write' creates a trickle-dag optimized for append-only operations.
See '--trickle' in 'ipfs add --help' for more information.
NOTE: The 'Import.UnixFSFileMaxLinks' config option does not apply to this command.
Trickle DAG has a fixed internal structure optimized for append operations.
To use configurable max-links, use 'ipfs add' with balanced DAG layout.
If you want to add a file without modifying an existing one,
use 'ipfs add' with '--to-files':
@ -1042,7 +1060,7 @@ See '--to-files' in 'ipfs add --help' for more information.
rawLeaves = cfg.Import.UnixFSRawLeaves.WithDefault(config.DefaultUnixFSRawLeaves)
}
prefix, err := getPrefixNew(req)
prefix, err := getPrefixNew(req, &cfg.Import)
if err != nil {
return err
}
@ -1053,7 +1071,9 @@ See '--to-files' in 'ipfs add --help' for more information.
}
if mkParents {
err := ensureContainingDirectoryExists(nd.FilesRoot, path, prefix)
maxDirLinks := int(cfg.Import.UnixFSDirectoryMaxLinks.WithDefault(config.DefaultUnixFSDirectoryMaxLinks))
sizeEstimationMode := cfg.Import.HAMTSizeEstimationMode()
err := ensureContainingDirectoryExists(nd.FilesRoot, path, prefix, maxDirLinks, &sizeEstimationMode)
if err != nil {
return err
}
@ -1157,6 +1177,11 @@ Examples:
return err
}
cfg, err := n.Repo.Config()
if err != nil {
return err
}
dashp, _ := req.Options[filesParentsOptionName].(bool)
dirtomake, err := checkPath(req.Arguments[0])
if err != nil {
@ -1169,16 +1194,21 @@ Examples:
return err
}
prefix, err := getPrefix(req)
prefix, err := getPrefix(req, &cfg.Import)
if err != nil {
return err
}
root := n.FilesRoot
maxDirLinks := int(cfg.Import.UnixFSDirectoryMaxLinks.WithDefault(config.DefaultUnixFSDirectoryMaxLinks))
sizeEstimationMode := cfg.Import.HAMTSizeEstimationMode()
err = mfs.Mkdir(root, dirtomake, mfs.MkdirOpts{
Mkparents: dashp,
Flush: flush,
CidBuilder: prefix,
Mkparents: dashp,
Flush: flush,
CidBuilder: prefix,
MaxLinks: maxDirLinks,
SizeEstimationMode: &sizeEstimationMode,
})
return err
@ -1256,7 +1286,9 @@ Change the CID version or hash function of the root node of a given path.
flush, _ := req.Options[filesFlushOptionName].(bool)
prefix, err := getPrefix(req)
// Note: files chcid is for explicitly changing CID format, so we don't
// fall back to Import config here. If no options are provided, it does nothing.
prefix, err := getPrefix(req, nil)
if err != nil {
return err
}
@ -1414,10 +1446,20 @@ func removePath(filesRoot *mfs.Root, path string, force bool, dashr bool) error
return pdir.Flush()
}
func getPrefixNew(req *cmds.Request) (cid.Builder, error) {
func getPrefixNew(req *cmds.Request, importCfg *config.Import) (cid.Builder, error) {
cidVer, cidVerSet := req.Options[filesCidVersionOptionName].(int)
hashFunStr, hashFunSet := req.Options[filesHashOptionName].(string)
// Fall back to Import config if CLI options not set
if !cidVerSet && importCfg != nil && !importCfg.CidVersion.IsDefault() {
cidVer = int(importCfg.CidVersion.WithDefault(config.DefaultCidVersion))
cidVerSet = true
}
if !hashFunSet && importCfg != nil && !importCfg.HashFunction.IsDefault() {
hashFunStr = importCfg.HashFunction.WithDefault(config.DefaultHashFunction)
hashFunSet = true
}
if !cidVerSet && !hashFunSet {
return nil, nil
}
@ -1443,10 +1485,20 @@ func getPrefixNew(req *cmds.Request) (cid.Builder, error) {
return &prefix, nil
}
func getPrefix(req *cmds.Request) (cid.Builder, error) {
func getPrefix(req *cmds.Request, importCfg *config.Import) (cid.Builder, error) {
cidVer, cidVerSet := req.Options[filesCidVersionOptionName].(int)
hashFunStr, hashFunSet := req.Options[filesHashOptionName].(string)
// Fall back to Import config if CLI options not set
if !cidVerSet && importCfg != nil && !importCfg.CidVersion.IsDefault() {
cidVer = int(importCfg.CidVersion.WithDefault(config.DefaultCidVersion))
cidVerSet = true
}
if !hashFunSet && importCfg != nil && !importCfg.HashFunction.IsDefault() {
hashFunStr = importCfg.HashFunction.WithDefault(config.DefaultHashFunction)
hashFunSet = true
}
if !cidVerSet && !hashFunSet {
return nil, nil
}
@ -1472,7 +1524,7 @@ func getPrefix(req *cmds.Request) (cid.Builder, error) {
return &prefix, nil
}
func ensureContainingDirectoryExists(r *mfs.Root, path string, builder cid.Builder) error {
func ensureContainingDirectoryExists(r *mfs.Root, path string, builder cid.Builder, maxLinks int, sizeEstimationMode *uio.SizeEstimationMode) error {
dirtomake := gopath.Dir(path)
if dirtomake == "/" {
@ -1480,8 +1532,10 @@ func ensureContainingDirectoryExists(r *mfs.Root, path string, builder cid.Build
}
return mfs.Mkdir(r, dirtomake, mfs.MkdirOpts{
Mkparents: true,
CidBuilder: builder,
Mkparents: true,
CidBuilder: builder,
MaxLinks: maxLinks,
SizeEstimationMode: sizeEstimationMode,
})
}
@ -1648,3 +1702,141 @@ Examples:
return mfs.Touch(nd.FilesRoot, path, ts)
},
}
const chrootConfirmOptionName = "confirm"
var filesChrootCmd = &cmds.Command{
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "Change the MFS root CID.",
ShortDescription: `
'ipfs files chroot' changes the root CID used by MFS (Mutable File System).
This is a recovery command for when MFS becomes corrupted and prevents the
daemon from starting.
When run without a CID argument, resets MFS to an empty directory.
WARNING: The old MFS root and its unpinned children will be removed during
the next garbage collection. Pin the old root first if you want to preserve.
This command can only run when the daemon is not running.
Examples:
# Reset MFS to empty directory (recovery from corruption)
$ ipfs files chroot --confirm
# Restore MFS to a known good directory CID
$ ipfs files chroot --confirm QmYourBackupCID
`,
},
Arguments: []cmds.Argument{
cmds.StringArg("cid", false, false, "New root CID (defaults to empty directory if not specified)."),
},
Options: []cmds.Option{
cmds.BoolOption(chrootConfirmOptionName, "Confirm this potentially destructive operation."),
},
NoRemote: true,
Extra: CreateCmdExtras(SetDoesNotUseRepo(true)),
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
confirm, _ := req.Options[chrootConfirmOptionName].(bool)
if !confirm {
return errors.New("this is a potentially destructive operation; pass --confirm to proceed")
}
// Determine new root CID
var newRootCid cid.Cid
if len(req.Arguments) > 0 {
var err error
newRootCid, err = cid.Decode(req.Arguments[0])
if err != nil {
return fmt.Errorf("invalid CID %q: %w", req.Arguments[0], err)
}
} else {
// Default to empty directory
newRootCid = ft.EmptyDirNode().Cid()
}
// Get config root to open repo directly
cctx := env.(*oldcmds.Context)
cfgRoot := cctx.ConfigRoot
// Open repo directly (daemon must not be running)
repo, err := fsrepo.Open(cfgRoot)
if err != nil {
return fmt.Errorf("opening repo (is the daemon running?): %w", err)
}
defer repo.Close()
localDS := repo.Datastore()
bs := bstore.NewBlockstore(localDS)
// Check new root exists locally and is a directory
hasBlock, err := bs.Has(req.Context, newRootCid)
if err != nil {
return fmt.Errorf("checking if new root exists: %w", err)
}
if !hasBlock {
// Special case: empty dir is always available (hardcoded in boxo)
emptyDirCid := ft.EmptyDirNode().Cid()
if !newRootCid.Equals(emptyDirCid) {
return fmt.Errorf("new root %s does not exist locally; fetch it first with 'ipfs block get'", newRootCid)
}
}
// Validate it's a directory (not a file)
if hasBlock {
blk, err := bs.Get(req.Context, newRootCid)
if err != nil {
return fmt.Errorf("reading new root block: %w", err)
}
pbNode, err := dag.DecodeProtobuf(blk.RawData())
if err != nil {
return fmt.Errorf("new root is not a valid dag-pb node: %w", err)
}
fsNode, err := ft.FSNodeFromBytes(pbNode.Data())
if err != nil {
return fmt.Errorf("new root is not a valid UnixFS node: %w", err)
}
if fsNode.Type() != ft.TDirectory && fsNode.Type() != ft.THAMTShard {
return fmt.Errorf("new root must be a directory, got %s", fsNode.Type())
}
}
// Get old root for display (if exists)
var oldRootStr string
oldRootBytes, err := localDS.Get(req.Context, node.FilesRootDatastoreKey)
if err == nil {
oldRootCid, err := cid.Cast(oldRootBytes)
if err == nil {
oldRootStr = oldRootCid.String()
}
} else if !errors.Is(err, datastore.ErrNotFound) {
return fmt.Errorf("reading current MFS root: %w", err)
}
// Write new root
err = localDS.Put(req.Context, node.FilesRootDatastoreKey, newRootCid.Bytes())
if err != nil {
return fmt.Errorf("writing new MFS root: %w", err)
}
// Build output message
var msg string
if oldRootStr != "" {
msg = fmt.Sprintf("MFS root changed from %s to %s\n", oldRootStr, newRootCid)
msg += fmt.Sprintf("The old root %s will be garbage collected unless pinned.\n", oldRootStr)
} else {
msg = fmt.Sprintf("MFS root set to %s\n", newRootCid)
}
return cmds.EmitOnce(res, &MessageOutput{Message: msg})
},
Type: MessageOutput{},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *MessageOutput) error {
_, err := fmt.Fprint(w, out.Message)
return err
}),
},
}

View File

@ -45,6 +45,9 @@ To output a TAR archive instead of unpacked files, use '--archive' or '-a'.
To compress the output with GZIP compression, use '--compress' or '-C'. You
may also specify the level of compression by specifying '-l=<1-9>'.
`,
HTTP: &cmds.HTTPHelpText{
ResponseContentType: "application/x-tar, or application/gzip when compress=true",
},
},
Arguments: []cmds.Argument{
@ -103,6 +106,16 @@ may also specify the level of compression by specifying '-l=<1-9>'.
reader.Close()
}()
// Set Content-Type based on output format.
// When compression is enabled, output is gzip (or tar.gz for directories).
// Otherwise, tar is used as the transport format.
res.SetEncodingType(cmds.OctetStream)
if cmplvl != gzip.NoCompression {
res.SetContentType("application/gzip")
} else {
res.SetContentType("application/x-tar")
}
return res.Emit(reader)
},
PostRun: cmds.PostRunMap{

View File

@ -38,9 +38,9 @@ publish'.
> ipfs key gen --type=rsa --size=2048 mykey
> ipfs name publish --key=mykey QmSomeHash
'ipfs key list' lists the available keys.
'ipfs key ls' lists the available keys.
> ipfs key list
> ipfs key ls
self
mykey
`,
@ -49,7 +49,8 @@ publish'.
"gen": keyGenCmd,
"export": keyExportCmd,
"import": keyImportCmd,
"list": keyListCmd,
"list": keyListDeprecatedCmd,
"ls": keyListCmd,
"rename": keyRenameCmd,
"rm": keyRmCmd,
"rotate": keyRotateCmd,
@ -458,7 +459,7 @@ var keyListCmd = &cmds.Command{
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
keyEnc, err := ke.KeyEncoderFromString(req.Options[ke.OptionIPNSBase.Name()].(string))
if err != nil {
return err
return fmt.Errorf("cannot get key encoder: %w", err)
}
api, err := cmdenv.GetApi(env, req)
@ -468,7 +469,7 @@ var keyListCmd = &cmds.Command{
keys, err := api.Key().List(req.Context)
if err != nil {
return err
return fmt.Errorf("listing keys failed: %w", err)
}
list := make([]KeyOutput, 0, len(keys))
@ -488,6 +489,17 @@ var keyListCmd = &cmds.Command{
Type: KeyOutputList{},
}
var keyListDeprecatedCmd = &cmds.Command{
Status: cmds.Deprecated,
Helptext: cmds.HelpText{
Tagline: "Deprecated: use 'ipfs key ls' instead.",
},
Options: keyListCmd.Options,
Run: keyListCmd.Run,
Encoders: keyListCmd.Encoders,
Type: keyListCmd.Type,
}
const (
keyStoreForceOptionName = "force"
)
@ -773,7 +785,7 @@ the signed payload is always prefixed with "libp2p-key signed message:".
`,
},
Options: []cmds.Option{
cmds.StringOption("key", "k", "The name of the key to use for signing."),
cmds.StringOption("key", "k", "The name of the key to use for verifying."),
cmds.StringOption("signature", "s", "Multibase-encoded signature to verify."),
ke.OptionIPNSBase,
},

View File

@ -48,6 +48,7 @@ const (
lsResolveTypeOptionName = "resolve-type"
lsSizeOptionName = "size"
lsStreamOptionName = "stream"
lsLongOptionName = "long"
)
var LsCmd = &cmds.Command{
@ -57,7 +58,26 @@ var LsCmd = &cmds.Command{
Displays the contents of an IPFS or IPNS object(s) at the given path, with
the following format:
<link base58 hash> <link size in bytes> <link name>
<cid> <size> <name>
With the --long (-l) option, display optional file mode (permissions) and
modification time in a format similar to Unix 'ls -l':
<mode> <cid> <size> <mtime> <name>
Mode and mtime are optional UnixFS metadata. They are only present if the
content was imported with 'ipfs add --preserve-mode' and '--preserve-mtime'.
Without preserved metadata, both mode and mtime display '-'. Times are in UTC.
Example with --long and preserved metadata:
-rw-r--r-- QmZULkCELmmk5XNf... 1234 Jan 15 10:30 document.txt
-rwxr-xr-x QmaRGe7bVmVaLmxb... 5678 Dec 01 2023 script.sh
drwxr-xr-x QmWWEQhcLufF3qPm... - Nov 20 2023 subdir/
Example with --long without preserved metadata:
- QmZULkCELmmk5XNf... 1234 - document.txt
The JSON output contains type information.
`,
@ -71,6 +91,7 @@ The JSON output contains type information.
cmds.BoolOption(lsResolveTypeOptionName, "Resolve linked objects to find out their types.").WithDefault(true),
cmds.BoolOption(lsSizeOptionName, "Resolve linked objects to find out their file size.").WithDefault(true),
cmds.BoolOption(lsStreamOptionName, "s", "Enable experimental streaming of directory entries as they are traversed."),
cmds.BoolOption(lsLongOptionName, "l", "Use a long listing format, showing file mode and modification time."),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
api, err := cmdenv.GetApi(env, req)
@ -215,10 +236,121 @@ The JSON output contains type information.
Type: LsOutput{},
}
// formatMode converts os.FileMode to a 10-character Unix ls-style string.
//
// Format: [type][owner rwx][group rwx][other rwx]
//
// Type indicators: - (regular), d (directory), l (symlink), p (named pipe),
// s (socket), c (char device), b (block device).
//
// Special bits replace the execute position: setuid on owner (s/S),
// setgid on group (s/S), sticky on other (t/T). Lowercase when the
// underlying execute bit is also set, uppercase when not.
func formatMode(mode os.FileMode) string {
var buf [10]byte
// File type - handle all special file types like ls does
switch {
case mode&os.ModeDir != 0:
buf[0] = 'd'
case mode&os.ModeSymlink != 0:
buf[0] = 'l'
case mode&os.ModeNamedPipe != 0:
buf[0] = 'p'
case mode&os.ModeSocket != 0:
buf[0] = 's'
case mode&os.ModeDevice != 0:
if mode&os.ModeCharDevice != 0 {
buf[0] = 'c'
} else {
buf[0] = 'b'
}
default:
buf[0] = '-'
}
// Owner permissions (bits 8,7,6)
buf[1] = permBit(mode, 0400, 'r') // read
buf[2] = permBit(mode, 0200, 'w') // write
// Handle setuid bit for owner execute
if mode&os.ModeSetuid != 0 {
if mode&0100 != 0 {
buf[3] = 's'
} else {
buf[3] = 'S'
}
} else {
buf[3] = permBit(mode, 0100, 'x') // execute
}
// Group permissions (bits 5,4,3)
buf[4] = permBit(mode, 0040, 'r') // read
buf[5] = permBit(mode, 0020, 'w') // write
// Handle setgid bit for group execute
if mode&os.ModeSetgid != 0 {
if mode&0010 != 0 {
buf[6] = 's'
} else {
buf[6] = 'S'
}
} else {
buf[6] = permBit(mode, 0010, 'x') // execute
}
// Other permissions (bits 2,1,0)
buf[7] = permBit(mode, 0004, 'r') // read
buf[8] = permBit(mode, 0002, 'w') // write
// Handle sticky bit for other execute
if mode&os.ModeSticky != 0 {
if mode&0001 != 0 {
buf[9] = 't'
} else {
buf[9] = 'T'
}
} else {
buf[9] = permBit(mode, 0001, 'x') // execute
}
return string(buf[:])
}
// permBit returns the permission character if the bit is set.
func permBit(mode os.FileMode, bit os.FileMode, char byte) byte {
if mode&bit != 0 {
return char
}
return '-'
}
// formatModTime formats time.Time for display, following Unix ls conventions.
//
// Returns "-" for zero time. Otherwise returns a 12-character string:
// recent files (within 6 months) show "Jan 02 15:04",
// older or future files show "Jan 02 2006".
//
// The output uses the timezone embedded in t (UTC for IPFS metadata).
func formatModTime(t time.Time) string {
if t.IsZero() {
return "-"
}
// Format: "Jan 02 15:04" for times within the last 6 months
// Format: "Jan 02 2006" for older times (similar to ls)
now := time.Now()
sixMonthsAgo := now.AddDate(0, -6, 0)
if t.After(sixMonthsAgo) && t.Before(now.Add(24*time.Hour)) {
return t.Format("Jan 02 15:04")
}
return t.Format("Jan 02 2006")
}
func tabularOutput(req *cmds.Request, w io.Writer, out *LsOutput, lastObjectHash string, ignoreBreaks bool) string {
headers, _ := req.Options[lsHeadersOptionNameTime].(bool)
stream, _ := req.Options[lsStreamOptionName].(bool)
size, _ := req.Options[lsSizeOptionName].(bool)
long, _ := req.Options[lsLongOptionName].(bool)
// in streaming mode we can't automatically align the tabs
// so we take a best guess
var minTabWidth int
@ -242,9 +374,21 @@ func tabularOutput(req *cmds.Request, w io.Writer, out *LsOutput, lastObjectHash
fmt.Fprintf(tw, "%s:\n", object.Hash)
}
if headers {
s := "Hash\tName"
if size {
s = "Hash\tSize\tName"
var s string
if long {
// Long format: Mode Hash [Size] ModTime Name
if size {
s = "Mode\tHash\tSize\tModTime\tName"
} else {
s = "Mode\tHash\tModTime\tName"
}
} else {
// Standard format: Hash [Size] Name
if size {
s = "Hash\tSize\tName"
} else {
s = "Hash\tName"
}
}
fmt.Fprintln(tw, s)
}
@ -253,23 +397,54 @@ func tabularOutput(req *cmds.Request, w io.Writer, out *LsOutput, lastObjectHash
for _, link := range object.Links {
var s string
switch link.Type {
case unixfs.TDirectory, unixfs.THAMTShard, unixfs.TMetadata:
if size {
s = "%[1]s\t-\t%[3]s/\n"
} else {
s = "%[1]s\t%[3]s/\n"
}
default:
if size {
s = "%s\t%v\t%s\n"
} else {
s = "%[1]s\t%[3]s\n"
}
}
isDir := link.Type == unixfs.TDirectory || link.Type == unixfs.THAMTShard || link.Type == unixfs.TMetadata
// TODO: Print link.Mode and link.ModTime?
fmt.Fprintf(tw, s, link.Hash, link.Size, cmdenv.EscNonPrint(link.Name))
if long {
// Long format: Mode Hash Size ModTime Name
var mode string
if link.Mode == 0 {
// No mode metadata preserved. Show "-" to indicate
// "not available" rather than "----------" (mode 0000).
mode = "-"
} else {
mode = formatMode(link.Mode)
}
modTime := formatModTime(link.ModTime)
if isDir {
if size {
s = "%s\t%s\t-\t%s\t%s/\n"
} else {
s = "%s\t%s\t%s\t%s/\n"
}
fmt.Fprintf(tw, s, mode, link.Hash, modTime, cmdenv.EscNonPrint(link.Name))
} else {
if size {
s = "%s\t%s\t%v\t%s\t%s\n"
fmt.Fprintf(tw, s, mode, link.Hash, link.Size, modTime, cmdenv.EscNonPrint(link.Name))
} else {
s = "%s\t%s\t%s\t%s\n"
fmt.Fprintf(tw, s, mode, link.Hash, modTime, cmdenv.EscNonPrint(link.Name))
}
}
} else {
// Standard format: Hash [Size] Name
switch {
case isDir:
if size {
s = "%[1]s\t-\t%[3]s/\n"
} else {
s = "%[1]s\t%[3]s/\n"
}
default:
if size {
s = "%s\t%v\t%s\n"
} else {
s = "%[1]s\t%[3]s\n"
}
}
fmt.Fprintf(tw, s, link.Hash, link.Size, cmdenv.EscNonPrint(link.Name))
}
}
}
tw.Flush()

189
core/commands/ls_test.go Normal file
View File

@ -0,0 +1,189 @@
package commands
import (
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestFormatMode(t *testing.T) {
t.Parallel()
tests := []struct {
name string
mode os.FileMode
expected string
}{
// File types
{
name: "regular file with rw-r--r--",
mode: 0644,
expected: "-rw-r--r--",
},
{
name: "regular file with rwxr-xr-x",
mode: 0755,
expected: "-rwxr-xr-x",
},
{
name: "regular file with no permissions",
mode: 0,
expected: "----------",
},
{
name: "regular file with full permissions",
mode: 0777,
expected: "-rwxrwxrwx",
},
{
name: "directory with rwxr-xr-x",
mode: os.ModeDir | 0755,
expected: "drwxr-xr-x",
},
{
name: "directory with rwx------",
mode: os.ModeDir | 0700,
expected: "drwx------",
},
{
name: "symlink with rwxrwxrwx",
mode: os.ModeSymlink | 0777,
expected: "lrwxrwxrwx",
},
{
name: "named pipe with rw-r--r--",
mode: os.ModeNamedPipe | 0644,
expected: "prw-r--r--",
},
{
name: "socket with rw-rw-rw-",
mode: os.ModeSocket | 0666,
expected: "srw-rw-rw-",
},
{
name: "block device with rw-rw----",
mode: os.ModeDevice | 0660,
expected: "brw-rw----",
},
{
name: "character device with rw-rw-rw-",
mode: os.ModeDevice | os.ModeCharDevice | 0666,
expected: "crw-rw-rw-",
},
// Special permission bits - setuid
{
name: "setuid with execute",
mode: os.ModeSetuid | 0755,
expected: "-rwsr-xr-x",
},
{
name: "setuid without execute",
mode: os.ModeSetuid | 0644,
expected: "-rwSr--r--",
},
// Special permission bits - setgid
{
name: "setgid with execute",
mode: os.ModeSetgid | 0755,
expected: "-rwxr-sr-x",
},
{
name: "setgid without execute",
mode: os.ModeSetgid | 0745,
expected: "-rwxr-Sr-x",
},
// Special permission bits - sticky
{
name: "sticky with execute",
mode: os.ModeSticky | 0755,
expected: "-rwxr-xr-t",
},
{
name: "sticky without execute",
mode: os.ModeSticky | 0754,
expected: "-rwxr-xr-T",
},
// Combined special bits
{
name: "setuid + setgid + sticky all with execute",
mode: os.ModeSetuid | os.ModeSetgid | os.ModeSticky | 0777,
expected: "-rwsrwsrwt",
},
{
name: "setuid + setgid + sticky none with execute",
mode: os.ModeSetuid | os.ModeSetgid | os.ModeSticky | 0666,
expected: "-rwSrwSrwT",
},
// Directory with special bits
{
name: "directory with sticky bit",
mode: os.ModeDir | os.ModeSticky | 0755,
expected: "drwxr-xr-t",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
result := formatMode(tc.mode)
assert.Equal(t, tc.expected, result)
})
}
}
func TestFormatModTime(t *testing.T) {
t.Parallel()
t.Run("zero time returns dash", func(t *testing.T) {
t.Parallel()
result := formatModTime(time.Time{})
assert.Equal(t, "-", result)
})
t.Run("old time shows year format", func(t *testing.T) {
t.Parallel()
// Use a time clearly in the past (more than 6 months ago)
oldTime := time.Date(2020, time.March, 15, 10, 30, 0, 0, time.UTC)
result := formatModTime(oldTime)
// Format: "Jan 02 2006" (note: two spaces before year)
assert.Equal(t, "Mar 15 2020", result)
})
t.Run("very old time shows year format", func(t *testing.T) {
t.Parallel()
veryOldTime := time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)
result := formatModTime(veryOldTime)
assert.Equal(t, "Jan 01 2000", result)
})
t.Run("future time shows year format", func(t *testing.T) {
t.Parallel()
// Times more than 24h in the future should show year format
futureTime := time.Now().AddDate(1, 0, 0)
result := formatModTime(futureTime)
// Should contain the future year
assert.Contains(t, result, " ") // two spaces before year
assert.Regexp(t, `^[A-Z][a-z]{2} \d{2} \d{4}$`, result) // matches "Mon DD YYYY"
assert.Contains(t, result, futureTime.Format("2006")) // contains the year
})
t.Run("format lengths are consistent", func(t *testing.T) {
t.Parallel()
// Both formats should produce 12-character strings for alignment
oldTime := time.Date(2020, time.March, 15, 10, 30, 0, 0, time.UTC)
oldResult := formatModTime(oldTime)
assert.Len(t, oldResult, 12, "old time format should be 12 chars")
// Recent time: use 1 month ago to ensure it's always within the 6-month window
recentTime := time.Now().AddDate(0, -1, 0)
recentResult := formatModTime(recentTime)
assert.Len(t, recentResult, 12, "recent time format should be 12 chars")
})
}

View File

@ -3,15 +3,18 @@ package name
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"io"
"strings"
"text/tabwriter"
"time"
"github.com/ipfs/boxo/ipns"
ipns_pb "github.com/ipfs/boxo/ipns/pb"
cmds "github.com/ipfs/go-ipfs-cmds"
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/ipfs/kubo/core/coreiface/options"
"google.golang.org/protobuf/proto"
)
@ -42,29 +45,30 @@ Examples:
Publish an <ipfs-path> with your default name:
> ipfs name publish /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
Published to QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
> ipfs name publish /ipfs/bafkreifjjcie6lypi6ny7amxnfftagclbuxndqonfipmb64f2km2devei4
Published to k51qzi5uqu5dgklc20hksmmzhoy5lfrn5xcnryq6xp4r50b5yc0vnivpywfu9p: /ipfs/bafk...
Publish an <ipfs-path> with another name, added by an 'ipfs key' command:
> ipfs key gen --type=rsa --size=2048 mykey
> ipfs name publish --key=mykey /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
Published to QmSrPmbaUKA3ZodhzPWZnpFgcPMFWF4QsxXbkWfEptTBJd: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
> ipfs key gen --type=ed25519 mykey
k51qzi5uqu5dlz49qkb657myg6f1buu6rauv8c6b489a9i1e4dkt7a3yo9j2wr
> ipfs name publish --key=mykey /ipfs/bafkreifjjcie6lypi6ny7amxnfftagclbuxndqonfipmb64f2km2devei4
Published to k51qzi5uqu5dlz49qkb657myg6f1buu6rauv8c6b489a9i1e4dkt7a3yo9j2wr: /ipfs/bafk...
Resolve the value of your name:
> ipfs name resolve
/ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
/ipfs/bafk...
Resolve the value of another name:
> ipfs name resolve QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ
/ipfs/QmSiTko9JZyabH56y2fussEt1A5oDqsFXB3CkvAqraFryz
> ipfs name resolve k51qzi5uqu5dlz49qkb657myg6f1buu6rauv8c6b489a9i1e4dkt7a3yo9j2wr
/ipfs/bafk...
Resolve the value of a dnslink:
> ipfs name resolve ipfs.io
/ipfs/QmaBvfZooxWkrv7D3r8LS9moNjzD2o525XMZze69hhoxf5
> ipfs name resolve specs.ipfs.tech
/ipfs/bafy...
`,
},
@ -74,6 +78,8 @@ Resolve the value of a dnslink:
"resolve": IpnsCmd,
"pubsub": IpnsPubsubCmd,
"inspect": IpnsInspectCmd,
"get": IpnsGetCmd,
"put": IpnsPutCmd,
},
}
@ -123,6 +129,9 @@ in Multibase. The Data field is DAG-CBOR represented as DAG-JSON.
Passing --verify will verify signature against provided public key.
`,
HTTP: &cmds.HTTPHelpText{
Description: "Request body should be `multipart/form-data` with the IPNS record bytes.",
},
},
Arguments: []cmds.Argument{
cmds.FileArg("record", true, false, "The IPNS record payload to be verified.").EnableStdin(),
@ -267,3 +276,266 @@ Passing --verify will verify signature against provided public key.
}),
},
}
var IpnsGetCmd = &cmds.Command{
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "Retrieve a signed IPNS record.",
ShortDescription: `
Retrieves the signed IPNS record for a given name from the routing system.
The output is the raw IPNS record (protobuf) as defined in the IPNS spec:
https://specs.ipfs.tech/ipns/ipns-record/
The record can be inspected with 'ipfs name inspect':
ipfs name get <name> | ipfs name inspect
This is equivalent to 'ipfs routing get /ipns/<name>' but only accepts
IPNS names (not arbitrary routing keys).
Note: The routing system returns the "best" IPNS record it knows about.
For IPNS, "best" means the record with the highest sequence number.
If multiple records exist (e.g., after using 'ipfs name put'), this command
returns the one the routing system considers most current.
`,
HTTP: &cmds.HTTPHelpText{
ResponseContentType: "application/vnd.ipfs.ipns-record",
},
},
Arguments: []cmds.Argument{
cmds.StringArg("name", true, false, "The IPNS name to look up."),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
api, err := cmdenv.GetApi(env, req)
if err != nil {
return err
}
// Normalize the argument: accept both "k51..." and "/ipns/k51..."
name := req.Arguments[0]
if !strings.HasPrefix(name, "/ipns/") {
name = "/ipns/" + name
}
data, err := api.Routing().Get(req.Context, name)
if err != nil {
return err
}
res.SetEncodingType(cmds.OctetStream)
res.SetContentType("application/vnd.ipfs.ipns-record")
return res.Emit(bytes.NewReader(data))
},
}
const (
forceOptionName = "force"
putAllowOfflineOption = "allow-offline"
allowDelegatedOption = "allow-delegated"
maxIPNSRecordSize = 10 << 10 // 10 KiB per IPNS spec
)
var errPutAllowOffline = errors.New("can't put while offline: pass `--allow-offline` to store locally or `--allow-delegated` if Ipns.DelegatedPublishers are set up")
var IpnsPutCmd = &cmds.Command{
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "Store a pre-signed IPNS record in the routing system.",
ShortDescription: `
Stores a pre-signed IPNS record in the routing system.
This command accepts a raw IPNS record (protobuf) as defined in the IPNS spec:
https://specs.ipfs.tech/ipns/ipns-record/
The record must be signed by the private key corresponding to the IPNS name.
Use 'ipfs name get' to retrieve records and 'ipfs name inspect' to examine.
`,
LongDescription: `
Stores a pre-signed IPNS record in the routing system.
This command accepts a raw IPNS record (protobuf) as defined in the IPNS spec:
https://specs.ipfs.tech/ipns/ipns-record/
The record must be signed by the private key corresponding to the IPNS name.
Use 'ipfs name get' to retrieve records and 'ipfs name inspect' to examine.
Use Cases:
- Re-publishing third-party records: store someone else's signed record
- Cross-node sync: import records exported from another node
- Backup/restore: export with 'name get', restore with 'name put'
Validation:
By default, the command validates that:
- The record is a valid IPNS record (protobuf)
- The record size is within 10 KiB limit
- The signature matches the provided IPNS name
- The record's sequence number is higher than any existing record
The --force flag skips this command's validation and passes the record
directly to the routing system. Note that --force only affects this command;
it does not control how the routing system handles the record. The routing
system may still reject invalid records or prefer records with higher sequence
numbers. Use --force primarily for testing (e.g., to observe how the routing
system reacts to incorrectly signed or malformed records).
Important: Even after a successful 'name put', a subsequent 'name get' may
return a different record if one with a higher sequence number exists.
This is expected IPNS behavior, not a bug.
Publishing Modes:
By default, IPNS records are published to both the DHT and any configured
HTTP delegated publishers. You can control this behavior with:
--allow-offline Store locally without requiring network connectivity
--allow-delegated Publish via HTTP delegated publishers only (no DHT)
Examples:
Export and re-import a record:
> ipfs name get k51... > record.bin
> ipfs name put k51... record.bin
Store a record received from someone else:
> ipfs name put k51... third-party-record.bin
Force store a record to test routing validation:
> ipfs name put --force k51... possibly-invalid-record.bin
`,
HTTP: &cmds.HTTPHelpText{
Description: "Request body should be `multipart/form-data` with the IPNS record bytes.",
},
},
Arguments: []cmds.Argument{
cmds.StringArg("name", true, false, "The IPNS name to store the record for (e.g., k51... or /ipns/k51...)."),
cmds.FileArg("record", true, false, "Path to file containing the signed IPNS record.").EnableStdin(),
},
Options: []cmds.Option{
cmds.BoolOption(forceOptionName, "f", "Skip validation (signature, sequence, size)."),
cmds.BoolOption(putAllowOfflineOption, "Store locally without broadcasting to the network."),
cmds.BoolOption(allowDelegatedOption, "Publish via HTTP delegated publishers only (no DHT)."),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
nd, err := cmdenv.GetNode(env)
if err != nil {
return err
}
api, err := cmdenv.GetApi(env, req)
if err != nil {
return err
}
// Parse options
force, _ := req.Options[forceOptionName].(bool)
allowOffline, _ := req.Options[putAllowOfflineOption].(bool)
allowDelegated, _ := req.Options[allowDelegatedOption].(bool)
// Validate flag combinations
if allowOffline && allowDelegated {
return errors.New("cannot use both --allow-offline and --allow-delegated flags")
}
// Handle different publishing modes
if allowDelegated {
// AllowDelegated mode: check if delegated publishers are configured
cfg, err := nd.Repo.Config()
if err != nil {
return fmt.Errorf("failed to read config: %w", err)
}
delegatedPublishers := cfg.DelegatedPublishersWithAutoConf()
if len(delegatedPublishers) == 0 {
return errors.New("no delegated publishers configured: add Ipns.DelegatedPublishers or use --allow-offline for local-only publishing")
}
// For allow-delegated mode, we proceed even if offline
// since we're using HTTP publishing via delegated publishers
}
// Parse the IPNS name argument
nameArg := req.Arguments[0]
if !strings.HasPrefix(nameArg, "/ipns/") {
nameArg = "/ipns/" + nameArg
}
// Extract the name part after /ipns/
namePart := strings.TrimPrefix(nameArg, "/ipns/")
name, err := ipns.NameFromString(namePart)
if err != nil {
return fmt.Errorf("invalid IPNS name: %w", err)
}
// Read raw record bytes from file/stdin
file, err := cmdenv.GetFileArg(req.Files.Entries())
if err != nil {
return err
}
defer file.Close()
// Read record data (limit to 1 MiB for memory safety)
data, err := io.ReadAll(io.LimitReader(file, 1<<20))
if err != nil {
return fmt.Errorf("failed to read record: %w", err)
}
if len(data) == 0 {
return errors.New("record is empty")
}
// Validate unless --force
if !force {
// Check size limit per IPNS spec
if len(data) > maxIPNSRecordSize {
return fmt.Errorf("record exceeds maximum size of %d bytes, use --force to skip size check", maxIPNSRecordSize)
}
rec, err := ipns.UnmarshalRecord(data)
if err != nil {
return fmt.Errorf("invalid IPNS record: %w", err)
}
// Validate signature against provided name
err = ipns.ValidateWithName(rec, name)
if err != nil {
return fmt.Errorf("record validation failed: %w", err)
}
// Check for sequence conflicts with existing record
existingData, err := api.Routing().Get(req.Context, nameArg)
if err == nil {
// We have an existing record, check sequence
existingRec, parseErr := ipns.UnmarshalRecord(existingData)
if parseErr == nil {
existingSeq, seqErr := existingRec.Sequence()
newSeq, newSeqErr := rec.Sequence()
if seqErr == nil && newSeqErr == nil {
if existingSeq >= newSeq {
return fmt.Errorf("existing record has sequence %d >= new record sequence %d, use --force to overwrite", existingSeq, newSeq)
}
}
}
}
// If Get fails (no existing record), that's fine - proceed with put
}
// Publish the original bytes as-is
// When allowDelegated is true, we set allowOffline to allow the operation
// even without DHT connectivity (delegated publishers use HTTP)
opts := []options.RoutingPutOption{
options.Routing.AllowOffline(allowOffline || allowDelegated),
}
err = api.Routing().Put(req.Context, nameArg, data, opts...)
if err != nil {
if err.Error() == "can't put while offline" {
return errPutAllowOffline
}
return err
}
return nil
},
}

View File

@ -50,9 +50,17 @@ type P2PStreamsOutput struct {
Streams []P2PStreamInfoOutput
}
// P2PForegroundOutput is output type for foreground mode status messages
type P2PForegroundOutput struct {
Status string // "active" or "closing"
Protocol string
Address string
}
const (
allowCustomProtocolOptionName = "allow-custom-protocol"
reportPeerIDOptionName = "report-peer-id"
foregroundOptionName = "foreground"
)
var resolveTimeout = 10 * time.Second
@ -83,15 +91,37 @@ var p2pForwardCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Forward connections to libp2p service.",
ShortDescription: `
Forward connections made to <listen-address> to <target-address>.
Forward connections made to <listen-address> to <target-address> via libp2p.
<protocol> specifies the libp2p protocol name to use for libp2p
connections and/or handlers. It must be prefixed with '` + P2PProtoPrefix + `'.
Creates a local TCP listener that tunnels connections through libp2p to a
remote peer's p2p listener. Similar to SSH port forwarding (-L flag).
Example:
ipfs p2p forward ` + P2PProtoPrefix + `myproto /ip4/127.0.0.1/tcp/4567 /p2p/QmPeer
- Forward connections to 127.0.0.1:4567 to '` + P2PProtoPrefix + `myproto' service on /p2p/QmPeer
ARGUMENTS:
<protocol> Protocol name (must start with '` + P2PProtoPrefix + `')
<listen-address> Local multiaddr (e.g., /ip4/127.0.0.1/tcp/3000)
<target-address> Remote peer multiaddr (e.g., /p2p/PeerID)
FOREGROUND MODE (--foreground, -f):
By default, the forwarder runs in the daemon and the command returns
immediately. Use --foreground to block until interrupted:
- Ctrl+C or SIGTERM: Removes the forwarder and exits
- 'ipfs p2p close': Removes the forwarder and exits
- Daemon shutdown: Forwarder is automatically removed
Useful for systemd services or scripts that need cleanup on exit.
EXAMPLES:
# Persistent forwarder (command returns immediately)
ipfs p2p forward /x/myapp /ip4/127.0.0.1/tcp/3000 /p2p/PeerID
# Temporary forwarder (removed when command exits)
ipfs p2p forward -f /x/myapp /ip4/127.0.0.1/tcp/3000 /p2p/PeerID
Learn more: https://github.com/ipfs/kubo/blob/master/docs/p2p-tunnels.md
`,
},
Arguments: []cmds.Argument{
@ -101,6 +131,7 @@ Example:
},
Options: []cmds.Option{
cmds.BoolOption(allowCustomProtocolOptionName, "Don't require /x/ prefix"),
cmds.BoolOption(foregroundOptionName, "f", "Run in foreground; forwarder is removed when command exits"),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
n, err := p2pGetNode(env)
@ -130,7 +161,51 @@ Example:
return errors.New("protocol name must be within '" + P2PProtoPrefix + "' namespace")
}
return forwardLocal(n.Context(), n.P2P, n.Peerstore, proto, listen, targets)
listener, err := forwardLocal(n.Context(), n.P2P, n.Peerstore, proto, listen, targets)
if err != nil {
return err
}
foreground, _ := req.Options[foregroundOptionName].(bool)
if foreground {
if err := res.Emit(&P2PForegroundOutput{
Status: "active",
Protocol: protoOpt,
Address: listenOpt,
}); err != nil {
return err
}
// Wait for either context cancellation (Ctrl+C/daemon shutdown)
// or listener removal (ipfs p2p close)
select {
case <-req.Context.Done():
// SIGTERM/Ctrl+C - cleanup silently (CLI stream already closing)
n.P2P.ListenersLocal.Close(func(l p2p.Listener) bool {
return l == listener
})
return nil
case <-listener.Done():
// Closed via "ipfs p2p close" - emit closing message
return res.Emit(&P2PForegroundOutput{
Status: "closing",
Protocol: protoOpt,
Address: listenOpt,
})
}
}
return nil
},
Type: P2PForegroundOutput{},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *P2PForegroundOutput) error {
if out.Status == "active" {
fmt.Fprintf(w, "Forwarding %s to %s, waiting for interrupt...\n", out.Protocol, out.Address)
} else if out.Status == "closing" {
fmt.Fprintf(w, "Received interrupt, removing forwarder for %s\n", out.Protocol)
}
return nil
}),
},
}
@ -185,14 +260,40 @@ var p2pListenCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Create libp2p service.",
ShortDescription: `
Create libp2p service and forward connections made to <target-address>.
Create a libp2p protocol handler that forwards incoming connections to
<target-address>.
<protocol> specifies the libp2p handler name. It must be prefixed with '` + P2PProtoPrefix + `'.
When a remote peer connects using 'ipfs p2p forward', the connection is
forwarded to your local service. Similar to SSH port forwarding (server side).
Example:
ipfs p2p listen ` + P2PProtoPrefix + `myproto /ip4/127.0.0.1/tcp/1234
- Forward connections to 'myproto' libp2p service to 127.0.0.1:1234
ARGUMENTS:
<protocol> Protocol name (must start with '` + P2PProtoPrefix + `')
<target-address> Local multiaddr (e.g., /ip4/127.0.0.1/tcp/3000)
FOREGROUND MODE (--foreground, -f):
By default, the listener runs in the daemon and the command returns
immediately. Use --foreground to block until interrupted:
- Ctrl+C or SIGTERM: Removes the listener and exits
- 'ipfs p2p close': Removes the listener and exits
- Daemon shutdown: Listener is automatically removed
Useful for systemd services or scripts that need cleanup on exit.
EXAMPLES:
# Persistent listener (command returns immediately)
ipfs p2p listen /x/myapp /ip4/127.0.0.1/tcp/3000
# Temporary listener (removed when command exits)
ipfs p2p listen -f /x/myapp /ip4/127.0.0.1/tcp/3000
# Report connecting peer ID to the target application
ipfs p2p listen -r /x/myapp /ip4/127.0.0.1/tcp/3000
Learn more: https://github.com/ipfs/kubo/blob/master/docs/p2p-tunnels.md
`,
},
Arguments: []cmds.Argument{
@ -202,6 +303,7 @@ Example:
Options: []cmds.Option{
cmds.BoolOption(allowCustomProtocolOptionName, "Don't require /x/ prefix"),
cmds.BoolOption(reportPeerIDOptionName, "r", "Send remote base58 peerid to target when a new connection is established"),
cmds.BoolOption(foregroundOptionName, "f", "Run in foreground; listener is removed when command exits"),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
n, err := p2pGetNode(env)
@ -231,8 +333,51 @@ Example:
return errors.New("protocol name must be within '" + P2PProtoPrefix + "' namespace")
}
_, err = n.P2P.ForwardRemote(n.Context(), proto, target, reportPeerID)
return err
listener, err := n.P2P.ForwardRemote(n.Context(), proto, target, reportPeerID)
if err != nil {
return err
}
foreground, _ := req.Options[foregroundOptionName].(bool)
if foreground {
if err := res.Emit(&P2PForegroundOutput{
Status: "active",
Protocol: protoOpt,
Address: targetOpt,
}); err != nil {
return err
}
// Wait for either context cancellation (Ctrl+C/daemon shutdown)
// or listener removal (ipfs p2p close)
select {
case <-req.Context.Done():
// SIGTERM/Ctrl+C - cleanup silently (CLI stream already closing)
n.P2P.ListenersP2P.Close(func(l p2p.Listener) bool {
return l == listener
})
return nil
case <-listener.Done():
// Closed via "ipfs p2p close" - emit closing message
return res.Emit(&P2PForegroundOutput{
Status: "closing",
Protocol: protoOpt,
Address: targetOpt,
})
}
}
return nil
},
Type: P2PForegroundOutput{},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *P2PForegroundOutput) error {
if out.Status == "active" {
fmt.Fprintf(w, "Listening on %s, forwarding to %s, waiting for interrupt...\n", out.Protocol, out.Address)
} else if out.Status == "closing" {
fmt.Fprintf(w, "Received interrupt, removing listener for %s\n", out.Protocol)
}
return nil
}),
},
}
@ -271,11 +416,9 @@ func checkPort(target ma.Multiaddr) error {
}
// forwardLocal forwards local connections to a libp2p service
func forwardLocal(ctx context.Context, p *p2p.P2P, ps pstore.Peerstore, proto protocol.ID, bindAddr ma.Multiaddr, addr *peer.AddrInfo) error {
func forwardLocal(ctx context.Context, p *p2p.P2P, ps pstore.Peerstore, proto protocol.ID, bindAddr ma.Multiaddr, addr *peer.AddrInfo) (p2p.Listener, error) {
ps.AddAddrs(addr.ID, addr.Addrs, pstore.TempAddrTTL)
// TODO: return some info
_, err := p.ForwardLocal(ctx, addr.ID, proto, bindAddr)
return err
return p.ForwardLocal(ctx, addr.ID, proto, bindAddr)
}
const (

View File

@ -70,6 +70,9 @@ However, it could reveal:
- Memory offsets of various data structures.
- Any modifications you've made to go-ipfs.
`,
HTTP: &cmds.HTTPHelpText{
ResponseContentType: "application/zip",
},
},
NoLocal: true,
Options: []cmds.Option{
@ -121,6 +124,8 @@ However, it could reveal:
archive.Close()
_ = w.CloseWithError(err)
}()
res.SetEncodingType(cmds.OctetStream)
res.SetContentType("application/zip")
return res.Emit(r)
},
PostRun: cmds.PostRunMap{

View File

@ -8,26 +8,35 @@ import (
"net/http"
"slices"
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
mbase "github.com/multiformats/go-multibase"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
cmds "github.com/ipfs/go-ipfs-cmds"
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
options "github.com/ipfs/kubo/core/coreiface/options"
"github.com/ipfs/kubo/core/node/libp2p"
"github.com/libp2p/go-libp2p/core/peer"
mbase "github.com/multiformats/go-multibase"
)
var PubsubCmd = &cmds.Command{
Status: cmds.Deprecated,
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "An experimental publish-subscribe system on ipfs.",
ShortDescription: `
ipfs pubsub allows you to publish messages to a given topic, and also to
subscribe to new messages on a given topic.
DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717)
EXPERIMENTAL FEATURE
It is not intended in its current state to be used in a production
environment. To use, the daemon must be run with
'--enable-pubsub-experiment'.
This is an opt-in feature optimized for IPNS over PubSub
(https://specs.ipfs.tech/ipns/ipns-pubsub-router/).
The default message validator is designed for IPNS record protocol.
For custom pubsub applications requiring different validation logic,
use go-libp2p-pubsub (https://github.com/libp2p/go-libp2p-pubsub)
directly in a dedicated binary.
To enable, set 'Pubsub.Enabled' config to true.
`,
},
Subcommands: map[string]*cmds.Command{
@ -35,6 +44,7 @@ DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717)
"sub": PubsubSubCmd,
"ls": PubsubLsCmd,
"peers": PubsubPeersCmd,
"reset": PubsubResetCmd,
},
}
@ -46,17 +56,18 @@ type pubsubMessage struct {
}
var PubsubSubCmd = &cmds.Command{
Status: cmds.Deprecated,
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "Subscribe to messages on a given topic.",
ShortDescription: `
ipfs pubsub sub subscribes to messages on a given topic.
DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717)
EXPERIMENTAL FEATURE
It is not intended in its current state to be used in a production
environment. To use, the daemon must be run with
'--enable-pubsub-experiment'.
This is an opt-in feature optimized for IPNS over PubSub
(https://specs.ipfs.tech/ipns/ipns-pubsub-router/).
To enable, set 'Pubsub.Enabled' config to true.
PEER ENCODING
@ -145,18 +156,19 @@ TOPIC AND DATA ENCODING
}
var PubsubPubCmd = &cmds.Command{
Status: cmds.Deprecated,
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "Publish data to a given pubsub topic.",
ShortDescription: `
ipfs pubsub pub publishes a message to a specified topic.
It reads binary data from stdin or a file.
DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717)
EXPERIMENTAL FEATURE
It is not intended in its current state to be used in a production
environment. To use, the daemon must be run with
'--enable-pubsub-experiment'.
This is an opt-in feature optimized for IPNS over PubSub
(https://specs.ipfs.tech/ipns/ipns-pubsub-router/).
To enable, set 'Pubsub.Enabled' config to true.
HTTP RPC ENCODING
@ -201,17 +213,18 @@ HTTP RPC ENCODING
}
var PubsubLsCmd = &cmds.Command{
Status: cmds.Deprecated,
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "List subscribed topics by name.",
ShortDescription: `
ipfs pubsub ls lists out the names of topics you are currently subscribed to.
DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717)
EXPERIMENTAL FEATURE
It is not intended in its current state to be used in a production
environment. To use, the daemon must be run with
'--enable-pubsub-experiment'.
This is an opt-in feature optimized for IPNS over PubSub
(https://specs.ipfs.tech/ipns/ipns-pubsub-router/).
To enable, set 'Pubsub.Enabled' config to true.
TOPIC ENCODING
@ -273,7 +286,7 @@ func safeTextListEncoder(req *cmds.Request, w io.Writer, list *stringList) error
}
var PubsubPeersCmd = &cmds.Command{
Status: cmds.Deprecated,
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "List peers we are currently pubsubbing with.",
ShortDescription: `
@ -281,11 +294,12 @@ ipfs pubsub peers with no arguments lists out the pubsub peers you are
currently connected to. If given a topic, it will list connected peers who are
subscribed to the named topic.
DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717)
EXPERIMENTAL FEATURE
It is not intended in its current state to be used in a production
environment. To use, the daemon must be run with
'--enable-pubsub-experiment'.
This is an opt-in feature optimized for IPNS over PubSub
(https://specs.ipfs.tech/ipns/ipns-pubsub-router/).
To enable, set 'Pubsub.Enabled' config to true.
TOPIC AND DATA ENCODING
@ -367,3 +381,122 @@ func urlArgsDecoder(req *cmds.Request, env cmds.Environment) error {
}
return nil
}
type pubsubResetResult struct {
Deleted int64 `json:"deleted"`
}
var PubsubResetCmd = &cmds.Command{
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "Reset pubsub validator state.",
ShortDescription: `
Clears persistent sequence number state used by the pubsub validator.
WARNING: FOR TESTING ONLY - DO NOT USE IN PRODUCTION
Resets validator state that protects against replay attacks. After reset,
previously seen messages may be accepted again until their sequence numbers
are re-learned.
Use cases:
- Testing pubsub functionality
- Recovery from a peer sending artificially high sequence numbers
(which would cause subsequent messages from that peer to be rejected)
The --peer flag limits the reset to a specific peer's state.
Without --peer, all validator state is cleared.
NOTE: This only resets the persistent seqno validator state. The in-memory
seen messages cache (Pubsub.SeenMessagesTTL) auto-expires and can only be
fully cleared by restarting the daemon.
`,
},
Options: []cmds.Option{
cmds.StringOption(peerOptionName, "p", "Only reset state for this peer ID"),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
n, err := cmdenv.GetNode(env)
if err != nil {
return err
}
ds := n.Repo.Datastore()
ctx := req.Context
peerOpt, _ := req.Options[peerOptionName].(string)
var deleted int64
if peerOpt != "" {
// Reset specific peer
pid, err := peer.Decode(peerOpt)
if err != nil {
return fmt.Errorf("invalid peer ID: %w", err)
}
key := datastore.NewKey(libp2p.SeqnoStorePrefix + pid.String())
exists, err := ds.Has(ctx, key)
if err != nil {
return fmt.Errorf("failed to check seqno state: %w", err)
}
if exists {
if err := ds.Delete(ctx, key); err != nil {
return fmt.Errorf("failed to delete seqno state: %w", err)
}
deleted = 1
}
} else {
// Reset all peers using batched delete for efficiency
q := query.Query{
Prefix: libp2p.SeqnoStorePrefix,
KeysOnly: true,
}
results, err := ds.Query(ctx, q)
if err != nil {
return fmt.Errorf("failed to query seqno state: %w", err)
}
defer results.Close()
batch, err := ds.Batch(ctx)
if err != nil {
return fmt.Errorf("failed to create batch: %w", err)
}
for result := range results.Next() {
if result.Error != nil {
return fmt.Errorf("query error: %w", result.Error)
}
if err := batch.Delete(ctx, datastore.NewKey(result.Key)); err != nil {
return fmt.Errorf("failed to batch delete key %s: %w", result.Key, err)
}
deleted++
}
if err := batch.Commit(ctx); err != nil {
return fmt.Errorf("failed to commit batch delete: %w", err)
}
}
// Sync to ensure deletions are persisted
if err := ds.Sync(ctx, datastore.NewKey(libp2p.SeqnoStorePrefix)); err != nil {
return fmt.Errorf("failed to sync datastore: %w", err)
}
return cmds.EmitOnce(res, &pubsubResetResult{Deleted: deleted})
},
Type: pubsubResetResult{},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, result *pubsubResetResult) error {
peerOpt, _ := req.Options[peerOptionName].(string)
if peerOpt != "" {
if result.Deleted == 0 {
_, err := fmt.Fprintf(w, "No validator state found for peer %s\n", peerOpt)
return err
}
_, err := fmt.Fprintf(w, "Reset validator state for peer %s\n", peerOpt)
return err
}
_, err := fmt.Fprintf(w, "Reset validator state for %d peer(s)\n", result.Deleted)
return err
}),
},
}

View File

@ -11,11 +11,13 @@ import (
"github.com/ipfs/kubo/config"
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/ipfs/kubo/core/commands/cmdutils"
"github.com/ipfs/kubo/core/node"
mh "github.com/multiformats/go-multihash"
dag "github.com/ipfs/boxo/ipld/merkledag"
"github.com/ipfs/boxo/ipns"
"github.com/ipfs/boxo/provider"
cid "github.com/ipfs/go-cid"
cmds "github.com/ipfs/go-ipfs-cmds"
ipld "github.com/ipfs/go-ipld-format"
@ -89,7 +91,7 @@ var findProvidersRoutingCmd = &cmds.Command{
defer cancel()
pchan := n.Routing.FindProvidersAsync(ctx, c, numProviders)
for p := range pchan {
np := p
np := cmdutils.CloneAddrInfo(p)
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
Type: routing.Provider,
Responses: []*peer.AddrInfo{&np},
@ -295,9 +297,9 @@ Trigger reprovider to announce our data to network.
if cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) == 0 {
return errors.New("invalid configuration: Provide.DHT.Interval is set to '0'")
}
provideSys, ok := nd.Provider.(*node.LegacyProvider)
provideSys, ok := nd.Provider.(provider.Reprovider)
if !ok {
return errors.New("manual reprovide not available with experimental sweeping provider (Provide.DHT.SweepEnabled=true)")
return errors.New("manual reprovide only available with legacy provider (Provide.DHT.SweepEnabled=false)")
}
err = provideSys.Reprovide(req.Context)

View File

@ -29,7 +29,7 @@ type key struct {
func newKey(name string, pid peer.ID) (*key, error) {
p, err := path.NewPath("/ipns/" + ipns.NameFromPeer(pid).String())
if err != nil {
return nil, err
return nil, fmt.Errorf("cannot create new key: %w", err)
}
return &key{
name: name,
@ -121,34 +121,37 @@ func (api *KeyAPI) List(ctx context.Context) ([]coreiface.Key, error) {
keys, err := api.repo.Keystore().List()
if err != nil {
return nil, err
return nil, fmt.Errorf("cannot list keys in keystore: %w", err)
}
sort.Strings(keys)
out := make([]coreiface.Key, len(keys)+1)
out := make([]coreiface.Key, 1, len(keys)+1)
out[0], err = newKey("self", api.identity)
if err != nil {
return nil, err
}
for n, k := range keys {
for _, k := range keys {
privKey, err := api.repo.Keystore().Get(k)
if err != nil {
return nil, err
log.Errorf("cannot get key from keystore: %s", err)
continue
}
pubKey := privKey.GetPublic()
pid, err := peer.IDFromPublicKey(pubKey)
if err != nil {
return nil, err
log.Errorf("cannot decode public key: %s", err)
continue
}
out[n+1], err = newKey(k, pid)
k, err := newKey(k, pid)
if err != nil {
return nil, err
}
out = append(out, k)
}
return out, nil
}

View File

@ -177,12 +177,18 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options
if settings.MaxHAMTFanoutSet {
fileAdder.MaxHAMTFanout = settings.MaxHAMTFanout
}
if settings.SizeEstimationModeSet {
fileAdder.SizeEstimationMode = settings.SizeEstimationMode
}
fileAdder.NoCopy = settings.NoCopy
fileAdder.CidBuilder = prefix
fileAdder.PreserveMode = settings.PreserveMode
fileAdder.PreserveMtime = settings.PreserveMtime
fileAdder.FileMode = settings.Mode
fileAdder.FileMtime = settings.Mtime
if settings.IncludeEmptyDirsSet {
fileAdder.IncludeEmptyDirs = settings.IncludeEmptyDirs
}
switch settings.Layout {
case options.BalancedLayout:

View File

@ -78,9 +78,23 @@ func ListenAndServe(n *core.IpfsNode, listeningMultiAddr string, options ...Serv
return Serve(n, manet.NetListener(list), options...)
}
// Serve accepts incoming HTTP connections on the listener and pass them
// Serve accepts incoming HTTP connections on the listener and passes them
// to ServeOption handlers.
func Serve(node *core.IpfsNode, lis net.Listener, options ...ServeOption) error {
return ServeWithReady(node, lis, nil, options...)
}
// ServeWithReady is like Serve but signals on the ready channel when the
// server is about to accept connections. The channel is closed right before
// server.Serve() is called.
//
// This is useful for callers that need to perform actions (like writing
// address files) only after the server is guaranteed to be accepting
// connections, avoiding race conditions where clients see the file before
// the server is ready.
//
// Passing nil for ready is equivalent to calling Serve().
func ServeWithReady(node *core.IpfsNode, lis net.Listener, ready chan<- struct{}, options ...ServeOption) error {
// make sure we close this no matter what.
defer lis.Close()
@ -107,6 +121,9 @@ func Serve(node *core.IpfsNode, lis net.Listener, options ...ServeOption) error
var serverError error
serverClosed := make(chan struct{})
go func() {
if ready != nil {
close(ready)
}
serverError = server.Serve(lis)
close(serverClosed)
}()

View File

@ -112,6 +112,7 @@ func Libp2pGatewayOption() ServeOption {
Menu: nil,
// Apply timeout and concurrency limits from user config
RetrievalTimeout: cfg.Gateway.RetrievalTimeout.WithDefault(config.DefaultRetrievalTimeout),
MaxRequestDuration: cfg.Gateway.MaxRequestDuration.WithDefault(config.DefaultMaxRequestDuration),
MaxConcurrentRequests: int(cfg.Gateway.MaxConcurrentRequests.WithDefault(int64(config.DefaultMaxConcurrentRequests))),
MaxRangeRequestFileSize: int64(cfg.Gateway.MaxRangeRequestFileSize.WithDefault(uint64(config.DefaultMaxRangeRequestFileSize))),
DiagnosticServiceURL: "", // Not used since DisableHTMLErrors=true
@ -273,6 +274,7 @@ func getGatewayConfig(n *core.IpfsNode) (gateway.Config, map[string][]string, er
NoDNSLink: cfg.Gateway.NoDNSLink,
PublicGateways: map[string]*gateway.PublicGateway{},
RetrievalTimeout: cfg.Gateway.RetrievalTimeout.WithDefault(config.DefaultRetrievalTimeout),
MaxRequestDuration: cfg.Gateway.MaxRequestDuration.WithDefault(config.DefaultMaxRequestDuration),
MaxConcurrentRequests: int(cfg.Gateway.MaxConcurrentRequests.WithDefault(int64(config.DefaultMaxConcurrentRequests))),
MaxRangeRequestFileSize: int64(cfg.Gateway.MaxRangeRequestFileSize.WithDefault(uint64(config.DefaultMaxRangeRequestFileSize))),
DiagnosticServiceURL: cfg.Gateway.DiagnosticServiceURL.WithDefault(config.DefaultDiagnosticServiceURL),

View File

@ -12,11 +12,12 @@ import (
)
// WebUI version confirmed to work with this Kubo version
const WebUIPath = "/ipfs/bafybeidsjptidvb6wf6benznq2pxgnt5iyksgtecpmjoimlmswhtx2u5ua" // v4.10.0
const WebUIPath = "/ipfs/bafybeidfgbcqy435sdbhhejifdxq4o64tlsezajc272zpyxcsmz47uyc64" // v4.11.0
// WebUIPaths is a list of all past webUI paths.
var WebUIPaths = []string{
WebUIPath,
"/ipfs/bafybeidsjptidvb6wf6benznq2pxgnt5iyksgtecpmjoimlmswhtx2u5ua", // v4.10.0
"/ipfs/bafybeicg7e6o2eszkfdzxg5233gmuip2a7kfzoloh7voyvt2r6ivdet54u", // v4.9.1
"/ipfs/bafybeifplj2s3yegn7ko7tdnwpoxa4c5uaqnk2ajnw5geqm34slcj6b6mu", // v4.8.0
"/ipfs/bafybeibfd5kbebqqruouji6ct5qku3tay273g7mt24mmrfzrsfeewaal5y", // v4.7.0

View File

@ -24,16 +24,18 @@ type UnixfsAddSettings struct {
CidVersion int
MhType uint64
Inline bool
InlineLimit int
RawLeaves bool
RawLeavesSet bool
MaxFileLinks int
MaxFileLinksSet bool
MaxDirectoryLinks int
MaxDirectoryLinksSet bool
MaxHAMTFanout int
MaxHAMTFanoutSet bool
Inline bool
InlineLimit int
RawLeaves bool
RawLeavesSet bool
MaxFileLinks int
MaxFileLinksSet bool
MaxDirectoryLinks int
MaxDirectoryLinksSet bool
MaxHAMTFanout int
MaxHAMTFanoutSet bool
SizeEstimationMode *io.SizeEstimationMode
SizeEstimationModeSet bool
Chunker string
Layout Layout
@ -48,10 +50,12 @@ type UnixfsAddSettings struct {
Silent bool
Progress bool
PreserveMode bool
PreserveMtime bool
Mode os.FileMode
Mtime time.Time
PreserveMode bool
PreserveMtime bool
Mode os.FileMode
Mtime time.Time
IncludeEmptyDirs bool
IncludeEmptyDirsSet bool
}
type UnixfsLsSettings struct {
@ -93,10 +97,12 @@ func UnixfsAddOptions(opts ...UnixfsAddOption) (*UnixfsAddSettings, cid.Prefix,
Silent: false,
Progress: false,
PreserveMode: false,
PreserveMtime: false,
Mode: 0,
Mtime: time.Time{},
PreserveMode: false,
PreserveMtime: false,
Mode: 0,
Mtime: time.Time{},
IncludeEmptyDirs: true, // default: include empty directories
IncludeEmptyDirsSet: false,
}
for _, opt := range opts {
@ -235,6 +241,15 @@ func (unixfsOpts) MaxHAMTFanout(n int) UnixfsAddOption {
}
}
// SizeEstimationMode specifies how directory size is estimated for HAMT sharding decisions.
func (unixfsOpts) SizeEstimationMode(mode io.SizeEstimationMode) UnixfsAddOption {
return func(settings *UnixfsAddSettings) error {
settings.SizeEstimationMode = &mode
settings.SizeEstimationModeSet = true
return nil
}
}
// Inline tells the adder to inline small blocks into CIDs
func (unixfsOpts) Inline(enable bool) UnixfsAddOption {
return func(settings *UnixfsAddSettings) error {
@ -396,3 +411,12 @@ func (unixfsOpts) Mtime(seconds int64, nsecs uint32) UnixfsAddOption {
return nil
}
}
// IncludeEmptyDirs tells the adder to include empty directories in the DAG
func (unixfsOpts) IncludeEmptyDirs(include bool) UnixfsAddOption {
return func(settings *UnixfsAddSettings) error {
settings.IncludeEmptyDirs = include
settings.IncludeEmptyDirsSet = true
return nil
}
}

View File

@ -26,6 +26,7 @@ import (
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log/v2"
"github.com/ipfs/kubo/config"
coreiface "github.com/ipfs/kubo/core/coreiface"
"github.com/ipfs/kubo/tracing"
@ -52,49 +53,52 @@ func NewAdder(ctx context.Context, p pin.Pinner, bs bstore.GCLocker, ds ipld.DAG
bufferedDS := ipld.NewBufferedDAG(ctx, ds)
return &Adder{
ctx: ctx,
pinning: p,
gcLocker: bs,
dagService: ds,
bufferedDS: bufferedDS,
Progress: false,
Pin: true,
Trickle: false,
MaxLinks: ihelper.DefaultLinksPerBlock,
MaxHAMTFanout: uio.DefaultShardWidth,
Chunker: "",
ctx: ctx,
pinning: p,
gcLocker: bs,
dagService: ds,
bufferedDS: bufferedDS,
Progress: false,
Pin: true,
Trickle: false,
MaxLinks: ihelper.DefaultLinksPerBlock,
MaxHAMTFanout: uio.DefaultShardWidth,
Chunker: "",
IncludeEmptyDirs: config.DefaultUnixFSIncludeEmptyDirs,
}, nil
}
// Adder holds the switches passed to the `add` command.
type Adder struct {
ctx context.Context
pinning pin.Pinner
gcLocker bstore.GCLocker
dagService ipld.DAGService
bufferedDS *ipld.BufferedDAG
Out chan<- interface{}
Progress bool
Pin bool
PinName string
Trickle bool
RawLeaves bool
MaxLinks int
MaxDirectoryLinks int
MaxHAMTFanout int
Silent bool
NoCopy bool
Chunker string
mroot *mfs.Root
unlocker bstore.Unlocker
tempRoot cid.Cid
CidBuilder cid.Builder
liveNodes uint64
ctx context.Context
pinning pin.Pinner
gcLocker bstore.GCLocker
dagService ipld.DAGService
bufferedDS *ipld.BufferedDAG
Out chan<- interface{}
Progress bool
Pin bool
PinName string
Trickle bool
RawLeaves bool
MaxLinks int
MaxDirectoryLinks int
MaxHAMTFanout int
SizeEstimationMode *uio.SizeEstimationMode
Silent bool
NoCopy bool
Chunker string
mroot *mfs.Root
unlocker bstore.Unlocker
tempRoot cid.Cid
CidBuilder cid.Builder
liveNodes uint64
PreserveMode bool
PreserveMtime bool
FileMode os.FileMode
FileMtime time.Time
PreserveMode bool
PreserveMtime bool
FileMode os.FileMode
FileMtime time.Time
IncludeEmptyDirs bool
}
func (adder *Adder) mfsRoot() (*mfs.Root, error) {
@ -104,9 +108,10 @@ func (adder *Adder) mfsRoot() (*mfs.Root, error) {
// Note, this adds it to DAGService already.
mr, err := mfs.NewEmptyRoot(adder.ctx, adder.dagService, nil, nil, mfs.MkdirOpts{
CidBuilder: adder.CidBuilder,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
CidBuilder: adder.CidBuilder,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
SizeEstimationMode: adder.SizeEstimationMode,
})
if err != nil {
return nil, err
@ -270,11 +275,12 @@ func (adder *Adder) addNode(node ipld.Node, path string) error {
dir := gopath.Dir(path)
if dir != "." {
opts := mfs.MkdirOpts{
Mkparents: true,
Flush: false,
CidBuilder: adder.CidBuilder,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
Mkparents: true,
Flush: false,
CidBuilder: adder.CidBuilder,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
SizeEstimationMode: adder.SizeEstimationMode,
}
if err := mfs.Mkdir(mr, dir, opts); err != nil {
return err
@ -480,15 +486,34 @@ func (adder *Adder) addFile(path string, file files.File) error {
func (adder *Adder) addDir(ctx context.Context, path string, dir files.Directory, toplevel bool) error {
log.Infof("adding directory: %s", path)
// Peek at first entry to check if directory is empty.
// We advance the iterator once here and continue from this position
// in the processing loop below. This avoids allocating a slice to
// collect all entries just to check for emptiness.
it := dir.Entries()
hasEntry := it.Next()
if !hasEntry {
if err := it.Err(); err != nil {
return err
}
// Directory is empty. Skip it unless IncludeEmptyDirs is set or
// this is the toplevel directory (we always include the root).
if !adder.IncludeEmptyDirs && !toplevel {
log.Debugf("skipping empty directory: %s", path)
return nil
}
}
// if we need to store mode or modification time then create a new root which includes that data
if toplevel && (adder.FileMode != 0 || !adder.FileMtime.IsZero()) {
mr, err := mfs.NewEmptyRoot(ctx, adder.dagService, nil, nil,
mfs.MkdirOpts{
CidBuilder: adder.CidBuilder,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
ModTime: adder.FileMtime,
Mode: adder.FileMode,
CidBuilder: adder.CidBuilder,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
ModTime: adder.FileMtime,
Mode: adder.FileMode,
SizeEstimationMode: adder.SizeEstimationMode,
})
if err != nil {
return err
@ -502,26 +527,28 @@ func (adder *Adder) addDir(ctx context.Context, path string, dir files.Directory
return err
}
err = mfs.Mkdir(mr, path, mfs.MkdirOpts{
Mkparents: true,
Flush: false,
CidBuilder: adder.CidBuilder,
Mode: adder.FileMode,
ModTime: adder.FileMtime,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
Mkparents: true,
Flush: false,
CidBuilder: adder.CidBuilder,
Mode: adder.FileMode,
ModTime: adder.FileMtime,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
SizeEstimationMode: adder.SizeEstimationMode,
})
if err != nil {
return err
}
}
it := dir.Entries()
for it.Next() {
// Process directory entries. The iterator was already advanced once above
// to peek for emptiness, so we start from that position.
for hasEntry {
fpath := gopath.Join(path, it.Name())
err := adder.addFileNode(ctx, fpath, it.Node(), false)
if err != nil {
if err := adder.addFileNode(ctx, fpath, it.Node(), false); err != nil {
return err
}
hasEntry = it.Next()
}
return it.Err()

View File

@ -30,6 +30,7 @@ import (
const testPeerID = "QmTFauExutTsy4XP6JbMFcw2Wa9645HJt2bTqL6qYDCKfe"
func TestAddMultipleGCLive(t *testing.T) {
ctx := t.Context()
r := &repo.Mock{
C: config.Config{
Identity: config.Identity{
@ -38,13 +39,13 @@ func TestAddMultipleGCLive(t *testing.T) {
},
D: syncds.MutexWrap(datastore.NewMapDatastore()),
}
node, err := core.NewNode(context.Background(), &core.BuildCfg{Repo: r})
node, err := core.NewNode(ctx, &core.BuildCfg{Repo: r})
if err != nil {
t.Fatal(err)
}
out := make(chan interface{}, 10)
adder, err := NewAdder(context.Background(), node.Pinning, node.Blockstore, node.DAG)
adder, err := NewAdder(ctx, node.Pinning, node.Blockstore, node.DAG)
if err != nil {
t.Fatal(err)
}
@ -67,7 +68,7 @@ func TestAddMultipleGCLive(t *testing.T) {
go func() {
defer close(out)
_, _ = adder.AddAllAndPin(context.Background(), slf)
_, _ = adder.AddAllAndPin(ctx, slf)
// Ignore errors for clarity - the real bug would be gc'ing files while adding them, not this resultant error
}()
@ -80,9 +81,12 @@ func TestAddMultipleGCLive(t *testing.T) {
gc1started := make(chan struct{})
go func() {
defer close(gc1started)
gc1out = gc.GC(context.Background(), node.Blockstore, node.Repo.Datastore(), node.Pinning, nil)
gc1out = gc.GC(ctx, node.Blockstore, node.Repo.Datastore(), node.Pinning, nil)
}()
// Give GC goroutine time to reach GCLock (will block there waiting for adder)
time.Sleep(time.Millisecond * 100)
// GC shouldn't get the lock until after the file is completely added
select {
case <-gc1started:
@ -119,9 +123,12 @@ func TestAddMultipleGCLive(t *testing.T) {
gc2started := make(chan struct{})
go func() {
defer close(gc2started)
gc2out = gc.GC(context.Background(), node.Blockstore, node.Repo.Datastore(), node.Pinning, nil)
gc2out = gc.GC(ctx, node.Blockstore, node.Repo.Datastore(), node.Pinning, nil)
}()
// Give GC goroutine time to reach GCLock
time.Sleep(time.Millisecond * 100)
select {
case <-gc2started:
t.Fatal("gc shouldn't have started yet")
@ -155,6 +162,7 @@ func TestAddMultipleGCLive(t *testing.T) {
}
func TestAddGCLive(t *testing.T) {
ctx := t.Context()
r := &repo.Mock{
C: config.Config{
Identity: config.Identity{
@ -163,13 +171,13 @@ func TestAddGCLive(t *testing.T) {
},
D: syncds.MutexWrap(datastore.NewMapDatastore()),
}
node, err := core.NewNode(context.Background(), &core.BuildCfg{Repo: r})
node, err := core.NewNode(ctx, &core.BuildCfg{Repo: r})
if err != nil {
t.Fatal(err)
}
out := make(chan interface{})
adder, err := NewAdder(context.Background(), node.Pinning, node.Blockstore, node.DAG)
adder, err := NewAdder(ctx, node.Pinning, node.Blockstore, node.DAG)
if err != nil {
t.Fatal(err)
}
@ -193,7 +201,7 @@ func TestAddGCLive(t *testing.T) {
go func() {
defer close(addDone)
defer close(out)
_, err := adder.AddAllAndPin(context.Background(), slf)
_, err := adder.AddAllAndPin(ctx, slf)
if err != nil {
t.Error(err)
}
@ -211,7 +219,7 @@ func TestAddGCLive(t *testing.T) {
gcstarted := make(chan struct{})
go func() {
defer close(gcstarted)
gcout = gc.GC(context.Background(), node.Blockstore, node.Repo.Datastore(), node.Pinning, nil)
gcout = gc.GC(ctx, node.Blockstore, node.Repo.Datastore(), node.Pinning, nil)
}()
// gc shouldn't start until we let the add finish its current file.
@ -255,9 +263,6 @@ func TestAddGCLive(t *testing.T) {
last = c
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
set := cid.NewSet()
err = dag.Walk(ctx, dag.GetLinksWithDAG(node.DAG), last, set.Visit)
if err != nil {

View File

@ -30,6 +30,9 @@ import (
"github.com/ipfs/kubo/repo"
)
// FilesRootDatastoreKey is the datastore key for the MFS files root CID.
var FilesRootDatastoreKey = datastore.NewKey("/local/filesroot")
// BlockService creates new blockservice which provides an interface to fetch content-addressable blocks
func BlockService(cfg *config.Config) func(lc fx.Lifecycle, bs blockstore.Blockstore, rem exchange.Interface) blockservice.BlockService {
return func(lc fx.Lifecycle, bs blockstore.Blockstore, rem exchange.Interface) blockservice.BlockService {
@ -181,7 +184,6 @@ func Dag(bs blockservice.BlockService) format.DAGService {
// Files loads persisted MFS root
func Files(strategy string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService, bs blockstore.Blockstore, prov DHTProvider) (*mfs.Root, error) {
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService, bs blockstore.Blockstore, prov DHTProvider) (*mfs.Root, error) {
dsk := datastore.NewKey("/local/filesroot")
pf := func(ctx context.Context, c cid.Cid) error {
rootDS := repo.Datastore()
if err := rootDS.Sync(ctx, blockstore.BlockPrefix); err != nil {
@ -191,15 +193,15 @@ func Files(strategy string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo
return err
}
if err := rootDS.Put(ctx, dsk, c.Bytes()); err != nil {
if err := rootDS.Put(ctx, FilesRootDatastoreKey, c.Bytes()); err != nil {
return err
}
return rootDS.Sync(ctx, dsk)
return rootDS.Sync(ctx, FilesRootDatastoreKey)
}
var nd *merkledag.ProtoNode
ctx := helpers.LifecycleCtx(mctx, lc)
val, err := repo.Datastore().Get(ctx, dsk)
val, err := repo.Datastore().Get(ctx, FilesRootDatastoreKey)
switch {
case errors.Is(err, datastore.ErrNotFound):
@ -241,9 +243,27 @@ func Files(strategy string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo
prov = nil
}
root, err := mfs.NewRoot(ctx, dag, nd, pf, prov)
// Get configured settings from Import config
cfg, err := repo.Config()
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to get config: %w", err)
}
chunkerGen := cfg.Import.UnixFSSplitterFunc()
maxDirLinks := int(cfg.Import.UnixFSDirectoryMaxLinks.WithDefault(config.DefaultUnixFSDirectoryMaxLinks))
maxHAMTFanout := int(cfg.Import.UnixFSHAMTDirectoryMaxFanout.WithDefault(config.DefaultUnixFSHAMTDirectoryMaxFanout))
hamtShardingSize := int(cfg.Import.UnixFSHAMTDirectorySizeThreshold.WithDefault(config.DefaultUnixFSHAMTDirectorySizeThreshold))
sizeEstimationMode := cfg.Import.HAMTSizeEstimationMode()
root, err := mfs.NewRoot(ctx, dag, nd, pf, prov,
mfs.WithChunker(chunkerGen),
mfs.WithMaxLinks(maxDirLinks),
mfs.WithMaxHAMTFanout(maxHAMTFanout),
mfs.WithHAMTShardingSize(hamtShardingSize),
mfs.WithSizeEstimationMode(sizeEstimationMode),
)
if err != nil {
return nil, fmt.Errorf("failed to initialize MFS root from %s stored at %s: %w. "+
"If corrupted, use 'ipfs files chroot' to reset (see --help)", nd.Cid(), FilesRootDatastoreKey, err)
}
lc.Append(fx.Hook{

View File

@ -10,6 +10,10 @@ import (
madns "github.com/multiformats/go-multiaddr-dns"
)
// Compile-time interface check: *madns.Resolver (returned by gateway.NewDNSResolver
// and madns.NewResolver) must implement madns.BasicResolver for p2pForgeResolver fallback.
var _ madns.BasicResolver = (*madns.Resolver)(nil)
func DNSResolver(cfg *config.Config) (*madns.Resolver, error) {
var dohOpts []doh.Option
if !cfg.DNS.MaxCacheTTL.IsDefault() {
@ -19,5 +23,34 @@ func DNSResolver(cfg *config.Config) (*madns.Resolver, error) {
// Replace "auto" DNS resolver placeholders with autoconf values
resolvers := cfg.DNSResolversWithAutoConf()
return gateway.NewDNSResolver(resolvers, dohOpts...)
// Get base resolver from boxo (handles custom DoH resolvers per eTLD)
baseResolver, err := gateway.NewDNSResolver(resolvers, dohOpts...)
if err != nil {
return nil, err
}
// Check if we should skip network DNS lookups for p2p-forge domains
skipAutoTLSDNS := cfg.AutoTLS.SkipDNSLookup.WithDefault(config.DefaultAutoTLSSkipDNSLookup)
if !skipAutoTLSDNS {
// Local resolution disabled, use network DNS for everything
return baseResolver, nil
}
// Build list of p2p-forge domains to resolve locally without network I/O.
// AutoTLS hostnames encode IP addresses directly (e.g., 1-2-3-4.peerID.libp2p.direct),
// so DNS lookups are wasteful. We resolve these in-memory when possible.
forgeDomains := []string{config.DefaultDomainSuffix}
customDomain := cfg.AutoTLS.DomainSuffix.WithDefault(config.DefaultDomainSuffix)
if customDomain != config.DefaultDomainSuffix {
forgeDomains = append(forgeDomains, customDomain)
}
forgeResolver := NewP2PForgeResolver(forgeDomains, baseResolver)
// Register p2p-forge resolver for each domain, fallback to baseResolver for others
opts := []madns.Option{madns.WithDefaultResolver(baseResolver)}
for _, domain := range forgeDomains {
opts = append(opts, madns.WithDomainResolver(domain+".", forgeResolver))
}
return madns.NewResolver(opts...)
}

View File

@ -438,12 +438,13 @@ func IPFS(ctx context.Context, bcfg *BuildCfg) fx.Option {
return fx.Error(err)
}
// Auto-sharding settings
shardSingThresholdInt := cfg.Import.UnixFSHAMTDirectorySizeThreshold.WithDefault(config.DefaultUnixFSHAMTDirectorySizeThreshold)
// Directory sharding settings from Import config.
// These globals affect both `ipfs add` and MFS (`ipfs files` API).
shardSizeThreshold := cfg.Import.UnixFSHAMTDirectorySizeThreshold.WithDefault(config.DefaultUnixFSHAMTDirectorySizeThreshold)
shardMaxFanout := cfg.Import.UnixFSHAMTDirectoryMaxFanout.WithDefault(config.DefaultUnixFSHAMTDirectoryMaxFanout)
// TODO: avoid overriding this globally, see if we can extend Directory interface like Get/SetMaxLinks from https://github.com/ipfs/boxo/pull/906
uio.HAMTShardingSize = int(shardSingThresholdInt)
uio.HAMTShardingSize = int(shardSizeThreshold)
uio.DefaultShardWidth = int(shardMaxFanout)
uio.HAMTSizeEstimation = cfg.Import.HAMTSizeEstimationMode()
providerStrategy := cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy)

View File

@ -1,26 +1,85 @@
package libp2p
import (
"context"
"errors"
"log/slog"
"github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log/v2"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/discovery"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
"go.uber.org/fx"
"github.com/ipfs/kubo/core/node/helpers"
"github.com/ipfs/kubo/repo"
)
type pubsubParams struct {
fx.In
Repo repo.Repo
Host host.Host
Discovery discovery.Discovery
}
func FloodSub(pubsubOptions ...pubsub.Option) interface{} {
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, disc discovery.Discovery) (service *pubsub.PubSub, err error) {
return pubsub.NewFloodSub(helpers.LifecycleCtx(mctx, lc), host, append(pubsubOptions, pubsub.WithDiscovery(disc))...)
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, params pubsubParams) (service *pubsub.PubSub, err error) {
return pubsub.NewFloodSub(
helpers.LifecycleCtx(mctx, lc),
params.Host,
append(pubsubOptions,
pubsub.WithDiscovery(params.Discovery),
pubsub.WithDefaultValidator(newSeqnoValidator(params.Repo.Datastore())))...,
)
}
}
func GossipSub(pubsubOptions ...pubsub.Option) interface{} {
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, disc discovery.Discovery) (service *pubsub.PubSub, err error) {
return pubsub.NewGossipSub(helpers.LifecycleCtx(mctx, lc), host, append(
pubsubOptions,
pubsub.WithDiscovery(disc),
pubsub.WithFloodPublish(true))...,
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, params pubsubParams) (service *pubsub.PubSub, err error) {
return pubsub.NewGossipSub(
helpers.LifecycleCtx(mctx, lc),
params.Host,
append(pubsubOptions,
pubsub.WithDiscovery(params.Discovery),
pubsub.WithFloodPublish(true), // flood own publications to all peers for reliable IPNS delivery
pubsub.WithDefaultValidator(newSeqnoValidator(params.Repo.Datastore())))...,
)
}
}
func newSeqnoValidator(ds datastore.Datastore) pubsub.ValidatorEx {
return pubsub.NewBasicSeqnoValidator(&seqnoStore{ds: ds}, slog.New(logging.SlogHandler()).With("logger", "pubsub"))
}
// SeqnoStorePrefix is the datastore prefix for pubsub seqno validator state.
const SeqnoStorePrefix = "/pubsub/seqno/"
// seqnoStore implements pubsub.PeerMetadataStore using the repo datastore.
// It stores the maximum seen sequence number per peer to prevent message
// cycles when network diameter exceeds the timecache span.
type seqnoStore struct {
ds datastore.Datastore
}
var _ pubsub.PeerMetadataStore = (*seqnoStore)(nil)
// Get returns the stored seqno for a peer, or (nil, nil) if the peer is unknown.
// Returning (nil, nil) for unknown peers allows BasicSeqnoValidator to accept
// the first message from any peer.
func (s *seqnoStore) Get(ctx context.Context, p peer.ID) ([]byte, error) {
key := datastore.NewKey(SeqnoStorePrefix + p.String())
val, err := s.ds.Get(ctx, key)
if errors.Is(err, datastore.ErrNotFound) {
return nil, nil
}
return val, err
}
// Put stores the seqno for a peer.
func (s *seqnoStore) Put(ctx context.Context, p peer.ID, val []byte) error {
key := datastore.NewKey(SeqnoStorePrefix + p.String())
return s.ds.Put(ctx, key, val)
}

View File

@ -0,0 +1,130 @@
package libp2p
import (
"encoding/binary"
"testing"
"github.com/ipfs/go-datastore"
syncds "github.com/ipfs/go-datastore/sync"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/stretchr/testify/require"
)
// TestSeqnoStore tests the seqnoStore implementation which backs the
// BasicSeqnoValidator. The validator prevents message cycles when network
// diameter exceeds the timecache span by tracking the maximum sequence number
// seen from each peer.
func TestSeqnoStore(t *testing.T) {
ctx := t.Context()
ds := syncds.MutexWrap(datastore.NewMapDatastore())
store := &seqnoStore{ds: ds}
peerA, err := peer.Decode("12D3KooWGC6TvWhfapngX6wvJHMYvKpDMXPb3ZnCZ6dMoaMtimQ5")
require.NoError(t, err)
peerB, err := peer.Decode("12D3KooWJRqDKTRjvXeGdUEgwkHNsoghYMBUagNYgLPdA4mqdTeo")
require.NoError(t, err)
// BasicSeqnoValidator expects Get to return (nil, nil) for unknown peers,
// not an error. This allows the validator to accept the first message from
// any peer without special-casing.
t.Run("unknown peer returns nil without error", func(t *testing.T) {
val, err := store.Get(ctx, peerA)
require.NoError(t, err)
require.Nil(t, val, "unknown peer should return nil, not empty slice")
})
// Verify basic store/retrieve functionality with a sequence number encoded
// as big-endian uint64, matching the format used by BasicSeqnoValidator.
t.Run("stores and retrieves seqno", func(t *testing.T) {
seqno := uint64(12345)
data := make([]byte, 8)
binary.BigEndian.PutUint64(data, seqno)
err := store.Put(ctx, peerA, data)
require.NoError(t, err)
val, err := store.Get(ctx, peerA)
require.NoError(t, err)
require.Equal(t, seqno, binary.BigEndian.Uint64(val))
})
// Each peer must have isolated storage. If peer data leaked between peers,
// the validator would incorrectly reject valid messages or accept replays.
t.Run("isolates seqno per peer", func(t *testing.T) {
seqnoA := uint64(100)
seqnoB := uint64(200)
dataA := make([]byte, 8)
dataB := make([]byte, 8)
binary.BigEndian.PutUint64(dataA, seqnoA)
binary.BigEndian.PutUint64(dataB, seqnoB)
err := store.Put(ctx, peerA, dataA)
require.NoError(t, err)
err = store.Put(ctx, peerB, dataB)
require.NoError(t, err)
valA, err := store.Get(ctx, peerA)
require.NoError(t, err)
require.Equal(t, seqnoA, binary.BigEndian.Uint64(valA))
valB, err := store.Get(ctx, peerB)
require.NoError(t, err)
require.Equal(t, seqnoB, binary.BigEndian.Uint64(valB))
})
// The validator updates the stored seqno when accepting messages with
// higher seqnos. This test verifies that updates work correctly.
t.Run("updates seqno to higher value", func(t *testing.T) {
seqno1 := uint64(1000)
seqno2 := uint64(2000)
data1 := make([]byte, 8)
data2 := make([]byte, 8)
binary.BigEndian.PutUint64(data1, seqno1)
binary.BigEndian.PutUint64(data2, seqno2)
err := store.Put(ctx, peerA, data1)
require.NoError(t, err)
err = store.Put(ctx, peerA, data2)
require.NoError(t, err)
val, err := store.Get(ctx, peerA)
require.NoError(t, err)
require.Equal(t, seqno2, binary.BigEndian.Uint64(val))
})
// Verify the datastore key format. This is important for:
// 1. Debugging: operators can inspect/clear pubsub state
// 2. Migrations: future changes need to know the key format
t.Run("uses expected datastore key format", func(t *testing.T) {
seqno := uint64(42)
data := make([]byte, 8)
binary.BigEndian.PutUint64(data, seqno)
err := store.Put(ctx, peerA, data)
require.NoError(t, err)
// Verify we can read directly from datastore with expected key
expectedKey := datastore.NewKey("/pubsub/seqno/" + peerA.String())
val, err := ds.Get(ctx, expectedKey)
require.NoError(t, err)
require.Equal(t, seqno, binary.BigEndian.Uint64(val))
})
// Verify data persists when creating a new store instance with the same
// underlying datastore. This simulates node restart.
t.Run("persists across store instances", func(t *testing.T) {
seqno := uint64(99999)
data := make([]byte, 8)
binary.BigEndian.PutUint64(data, seqno)
err := store.Put(ctx, peerB, data)
require.NoError(t, err)
// Create new store instance with same datastore
store2 := &seqnoStore{ds: ds}
val, err := store2.Get(ctx, peerB)
require.NoError(t, err)
require.Equal(t, seqno, binary.BigEndian.Uint64(val))
})
}

View File

@ -12,7 +12,6 @@ import (
"github.com/ipfs/kubo/core/node/helpers"
"github.com/ipfs/kubo/repo"
"github.com/filecoin-project/go-clock"
logging "github.com/ipfs/go-log/v2"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/network"
@ -112,7 +111,6 @@ filled in with autocomputed defaults.`)
return nil, opts, fmt.Errorf("creating libp2p resource manager: %w", err)
}
lrm := &loggingResourceManager{
clock: clock.New(),
logger: &logging.Logger("resourcemanager").SugaredLogger,
delegate: manager,
}

View File

@ -7,7 +7,6 @@ import (
"sync"
"time"
"github.com/filecoin-project/go-clock"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
@ -17,7 +16,6 @@ import (
)
type loggingResourceManager struct {
clock clock.Clock
logger *zap.SugaredLogger
delegate network.ResourceManager
logInterval time.Duration
@ -42,7 +40,7 @@ func (n *loggingResourceManager) start(ctx context.Context) {
if logInterval == 0 {
logInterval = 10 * time.Second
}
ticker := n.clock.Ticker(logInterval)
ticker := time.NewTicker(logInterval)
go func() {
defer ticker.Stop()
for {

View File

@ -2,9 +2,9 @@ package libp2p
import (
"testing"
"testing/synctest"
"time"
"github.com/filecoin-project/go-clock"
"github.com/libp2p/go-libp2p/core/network"
rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
ma "github.com/multiformats/go-multiaddr"
@ -14,48 +14,49 @@ import (
)
func TestLoggingResourceManager(t *testing.T) {
clock := clock.NewMock()
orig := rcmgr.DefaultLimits.AutoScale()
limits := orig.ToPartialLimitConfig()
limits.System.Conns = 1
limits.System.ConnsInbound = 1
limits.System.ConnsOutbound = 1
limiter := rcmgr.NewFixedLimiter(limits.Build(orig))
rm, err := rcmgr.NewResourceManager(limiter)
if err != nil {
t.Fatal(err)
}
oCore, oLogs := observer.New(zap.WarnLevel)
oLogger := zap.New(oCore)
lrm := &loggingResourceManager{
clock: clock,
logger: oLogger.Sugar(),
delegate: rm,
logInterval: 1 * time.Second,
}
// 2 of these should result in resource limit exceeded errors and subsequent log messages
for i := 0; i < 3; i++ {
_, _ = lrm.OpenConnection(network.DirInbound, false, ma.StringCast("/ip4/127.0.0.1/tcp/1234"))
}
// run the logger which will write an entry for those errors
ctx := t.Context()
lrm.start(ctx)
clock.Add(3 * time.Second)
timer := time.NewTimer(1 * time.Second)
for {
select {
case <-timer.C:
t.Fatalf("expected logs never arrived")
default:
if oLogs.Len() == 0 {
continue
}
require.Equal(t, "Protected from exceeding resource limits 2 times. libp2p message: \"system: cannot reserve inbound connection: resource limit exceeded\".", oLogs.All()[0].Message)
return
synctest.Test(t, func(t *testing.T) {
orig := rcmgr.DefaultLimits.AutoScale()
limits := orig.ToPartialLimitConfig()
limits.System.Conns = 1
limits.System.ConnsInbound = 1
limits.System.ConnsOutbound = 1
limiter := rcmgr.NewFixedLimiter(limits.Build(orig))
rm, err := rcmgr.NewResourceManager(limiter)
if err != nil {
t.Fatal(err)
}
}
defer rm.Close()
oCore, oLogs := observer.New(zap.WarnLevel)
oLogger := zap.New(oCore)
lrm := &loggingResourceManager{
logger: oLogger.Sugar(),
delegate: rm,
logInterval: 1 * time.Second,
}
// 2 of these should result in resource limit exceeded errors and subsequent log messages
for i := 0; i < 3; i++ {
_, _ = lrm.OpenConnection(network.DirInbound, false, ma.StringCast("/ip4/127.0.0.1/tcp/1234"))
}
// run the logger which will write an entry for those errors
ctx := t.Context()
lrm.start(ctx)
time.Sleep(3 * time.Second)
timer := time.NewTimer(1 * time.Second)
for {
select {
case <-timer.C:
t.Fatalf("expected logs never arrived")
default:
if oLogs.Len() == 0 {
continue
}
require.Equal(t, "Protected from exceeding resource limits 2 times. libp2p message: \"system: cannot reserve inbound connection: resource limit exceeded\".", oLogs.All()[0].Message)
return
}
}
})
}

View File

@ -0,0 +1,120 @@
package node
import (
"context"
"net"
"net/netip"
"strings"
"github.com/libp2p/go-libp2p/core/peer"
madns "github.com/multiformats/go-multiaddr-dns"
)
// p2pForgeResolver implements madns.BasicResolver for deterministic resolution
// of p2p-forge domains (e.g., *.libp2p.direct) without network I/O for A/AAAA queries.
//
// p2p-forge encodes IP addresses in DNS hostnames:
// - IPv4: 1-2-3-4.peerID.libp2p.direct -> 1.2.3.4
// - IPv6: 2001-db8--1.peerID.libp2p.direct -> 2001:db8::1
//
// When local parsing fails (invalid format, invalid peerID, etc.), the resolver
// falls back to network DNS. This ensures future <peerID>.libp2p.direct records
// can still resolve if the authoritative DNS adds support for them.
//
// TXT queries always delegate to the fallback resolver. This is important for
// p2p-forge/client ACME DNS-01 challenges to work correctly, as Let's Encrypt
// needs to verify TXT records at _acme-challenge.peerID.libp2p.direct.
//
// See: https://github.com/ipshipyard/p2p-forge
type p2pForgeResolver struct {
suffixes []string
fallback madns.BasicResolver
}
// Compile-time check that p2pForgeResolver implements madns.BasicResolver.
var _ madns.BasicResolver = (*p2pForgeResolver)(nil)
// NewP2PForgeResolver creates a resolver for the given p2p-forge domain suffixes.
// Each suffix should be a bare domain like "libp2p.direct" (without leading dot).
// When local IP parsing fails, queries fall back to the provided resolver.
// TXT queries always delegate to the fallback resolver for ACME compatibility.
func NewP2PForgeResolver(suffixes []string, fallback madns.BasicResolver) *p2pForgeResolver {
normalized := make([]string, len(suffixes))
for i, s := range suffixes {
normalized[i] = strings.ToLower(strings.TrimSuffix(s, "."))
}
return &p2pForgeResolver{suffixes: normalized, fallback: fallback}
}
// LookupIPAddr parses IP addresses encoded in the hostname.
//
// Format: <encoded-ip>.<peerID>.<suffix>
// - IPv4: 192-168-1-1.peerID.libp2p.direct -> [192.168.1.1]
// - IPv6: 2001-db8--1.peerID.libp2p.direct -> [2001:db8::1]
//
// If the hostname doesn't match the expected format (wrong suffix, invalid peerID,
// invalid IP encoding, or peerID-only), the lookup falls back to network DNS.
// This allows future DNS records like <peerID>.libp2p.direct to resolve normally.
func (r *p2pForgeResolver) LookupIPAddr(ctx context.Context, hostname string) ([]net.IPAddr, error) {
// DNS is case-insensitive, normalize to lowercase
hostname = strings.ToLower(strings.TrimSuffix(hostname, "."))
// find matching suffix and extract subdomain
var subdomain string
for _, suffix := range r.suffixes {
if sub, found := strings.CutSuffix(hostname, "."+suffix); found {
subdomain = sub
break
}
}
if subdomain == "" {
// not a p2p-forge domain, fallback to network
return r.fallback.LookupIPAddr(ctx, hostname)
}
// split subdomain into parts: should be [ip-prefix, peerID]
parts := strings.Split(subdomain, ".")
if len(parts) != 2 {
// not the expected <ip>.<peerID> format, fallback to network
return r.fallback.LookupIPAddr(ctx, hostname)
}
encodedIP := parts[0]
peerIDStr := parts[1]
// validate peerID (same validation as libp2p.direct DNS server)
if _, err := peer.Decode(peerIDStr); err != nil {
// invalid peerID, fallback to network
return r.fallback.LookupIPAddr(ctx, hostname)
}
// RFC 1123: hostname labels cannot start or end with hyphen
if len(encodedIP) == 0 || encodedIP[0] == '-' || encodedIP[len(encodedIP)-1] == '-' {
// invalid hostname label, fallback to network
return r.fallback.LookupIPAddr(ctx, hostname)
}
// try parsing as IPv4 first: segments joined by "-" become "."
segments := strings.Split(encodedIP, "-")
if len(segments) == 4 {
ipv4Str := strings.Join(segments, ".")
if ip, err := netip.ParseAddr(ipv4Str); err == nil && ip.Is4() {
return []net.IPAddr{{IP: ip.AsSlice()}}, nil
}
}
// try parsing as IPv6: segments joined by "-" become ":"
ipv6Str := strings.Join(segments, ":")
if ip, err := netip.ParseAddr(ipv6Str); err == nil && ip.Is6() {
return []net.IPAddr{{IP: ip.AsSlice()}}, nil
}
// IP parsing failed, fallback to network
return r.fallback.LookupIPAddr(ctx, hostname)
}
// LookupTXT delegates to the fallback resolver to support ACME DNS-01 challenges
// and any other TXT record lookups on p2p-forge domains.
func (r *p2pForgeResolver) LookupTXT(ctx context.Context, hostname string) ([]string, error) {
return r.fallback.LookupTXT(ctx, hostname)
}

View File

@ -0,0 +1,172 @@
package node
import (
"context"
"errors"
"net"
"testing"
"github.com/ipfs/kubo/config"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Test constants matching p2p-forge production format
const (
// testPeerID is a valid peerID in CIDv1 base36 format as used by p2p-forge.
// Base36 is lowercase-only, making it safe for case-insensitive DNS.
// Corresponds to 12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN in base58btc.
testPeerID = "k51qzi5uqu5dhnwe629wdlncpql6frppdpwnz4wtlcw816aysd5wwlk63g4wmh"
// domainSuffix is the default p2p-forge domain used in tests.
domainSuffix = config.DefaultDomainSuffix
)
// mockResolver implements madns.BasicResolver for testing
type mockResolver struct {
txtRecords map[string][]string
ipRecords map[string][]net.IPAddr
ipErr error
}
func (m *mockResolver) LookupIPAddr(_ context.Context, hostname string) ([]net.IPAddr, error) {
if m.ipErr != nil {
return nil, m.ipErr
}
if m.ipRecords != nil {
return m.ipRecords[hostname], nil
}
return nil, nil
}
func (m *mockResolver) LookupTXT(_ context.Context, name string) ([]string, error) {
if m.txtRecords != nil {
return m.txtRecords[name], nil
}
return nil, nil
}
// newTestResolver creates a p2pForgeResolver with default suffix.
func newTestResolver(t *testing.T) *p2pForgeResolver {
t.Helper()
return NewP2PForgeResolver([]string{domainSuffix}, &mockResolver{})
}
// assertLookupIP verifies that hostname resolves to wantIP.
func assertLookupIP(t *testing.T, r *p2pForgeResolver, hostname, wantIP string) {
t.Helper()
addrs, err := r.LookupIPAddr(t.Context(), hostname)
require.NoError(t, err)
require.Len(t, addrs, 1)
assert.Equal(t, wantIP, addrs[0].IP.String())
}
func TestP2PForgeResolver_LookupIPAddr(t *testing.T) {
r := newTestResolver(t)
tests := []struct {
name string
hostname string
wantIP string
}{
// IPv4
{"ipv4/basic", "192-168-1-1." + testPeerID + "." + domainSuffix, "192.168.1.1"},
{"ipv4/zeros", "0-0-0-0." + testPeerID + "." + domainSuffix, "0.0.0.0"},
{"ipv4/max", "255-255-255-255." + testPeerID + "." + domainSuffix, "255.255.255.255"},
{"ipv4/trailing dot", "10-0-0-1." + testPeerID + "." + domainSuffix + ".", "10.0.0.1"},
{"ipv4/uppercase suffix", "192-168-1-1." + testPeerID + ".LIBP2P.DIRECT", "192.168.1.1"},
// IPv6
{"ipv6/full", "2001-db8-0-0-0-0-0-1." + testPeerID + "." + domainSuffix, "2001:db8::1"},
{"ipv6/compressed", "2001-db8--1." + testPeerID + "." + domainSuffix, "2001:db8::1"},
{"ipv6/loopback", "0--1." + testPeerID + "." + domainSuffix, "::1"},
{"ipv6/all zeros", "0--0." + testPeerID + "." + domainSuffix, "::"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assertLookupIP(t, r, tt.hostname, tt.wantIP)
})
}
}
func TestP2PForgeResolver_LookupIPAddr_MultipleSuffixes(t *testing.T) {
r := NewP2PForgeResolver([]string{domainSuffix, "custom.example.com"}, &mockResolver{})
tests := []struct {
hostname string
wantIP string
}{
{"192-168-1-1." + testPeerID + "." + domainSuffix, "192.168.1.1"},
{"10-0-0-1." + testPeerID + ".custom.example.com", "10.0.0.1"},
}
for _, tt := range tests {
t.Run(tt.hostname, func(t *testing.T) {
assertLookupIP(t, r, tt.hostname, tt.wantIP)
})
}
}
func TestP2PForgeResolver_LookupIPAddr_FallbackToNetwork(t *testing.T) {
fallbackIP := []net.IPAddr{{IP: net.ParseIP("93.184.216.34")}}
tests := []struct {
name string
hostname string
}{
{"peerID only", testPeerID + "." + domainSuffix},
{"invalid peerID", "192-168-1-1.invalid-peer-id." + domainSuffix},
{"invalid IP encoding", "not-an-ip." + testPeerID + "." + domainSuffix},
{"leading hyphen", "-192-168-1-1." + testPeerID + "." + domainSuffix},
{"too many parts", "extra.192-168-1-1." + testPeerID + "." + domainSuffix},
{"wrong suffix", "192-168-1-1." + testPeerID + ".example.com"},
}
// Build fallback records from test cases
ipRecords := make(map[string][]net.IPAddr, len(tests))
for _, tt := range tests {
ipRecords[tt.hostname] = fallbackIP
}
fallback := &mockResolver{ipRecords: ipRecords}
r := NewP2PForgeResolver([]string{domainSuffix}, fallback)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
addrs, err := r.LookupIPAddr(t.Context(), tt.hostname)
require.NoError(t, err)
require.Len(t, addrs, 1, "should fallback to network")
assert.Equal(t, "93.184.216.34", addrs[0].IP.String())
})
}
}
func TestP2PForgeResolver_LookupIPAddr_FallbackError(t *testing.T) {
expectedErr := errors.New("network error")
r := NewP2PForgeResolver([]string{domainSuffix}, &mockResolver{ipErr: expectedErr})
// peerID-only triggers fallback, which returns error
_, err := r.LookupIPAddr(t.Context(), testPeerID+"."+domainSuffix)
require.ErrorIs(t, err, expectedErr)
}
func TestP2PForgeResolver_LookupTXT(t *testing.T) {
t.Run("delegates to fallback for ACME DNS-01", func(t *testing.T) {
acmeHost := "_acme-challenge." + testPeerID + "." + domainSuffix
fallback := &mockResolver{
txtRecords: map[string][]string{acmeHost: {"acme-token-value"}},
}
r := NewP2PForgeResolver([]string{domainSuffix}, fallback)
records, err := r.LookupTXT(t.Context(), acmeHost)
require.NoError(t, err)
assert.Equal(t, []string{"acme-token-value"}, records)
})
t.Run("returns empty when fallback has no records", func(t *testing.T) {
r := NewP2PForgeResolver([]string{domainSuffix}, &mockResolver{})
records, err := r.LookupTXT(t.Context(), "anything."+domainSuffix)
require.NoError(t, err)
assert.Empty(t, records)
})
}

View File

@ -16,6 +16,7 @@ import (
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
"github.com/ipfs/go-datastore/query"
log "github.com/ipfs/go-log/v2"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/repo"
irouting "github.com/ipfs/kubo/routing"
@ -53,6 +54,8 @@ const (
keystoreDatastorePath = "keystore"
)
var errAcceleratedDHTNotReady = errors.New("AcceleratedDHTClient: routing table not ready")
// Interval between reprovide queue monitoring checks for slow reprovide alerts.
// Used when Provide.DHT.SweepEnabled=true
const reprovideAlertPollInterval = 15 * time.Minute
@ -322,6 +325,46 @@ type dhtImpl interface {
Host() host.Host
MessageSender() dht_pb.MessageSender
}
type fullrtRouter struct {
*fullrt.FullRT
ready bool
logger *log.ZapEventLogger
}
func newFullRTRouter(fr *fullrt.FullRT, loggerName string) *fullrtRouter {
return &fullrtRouter{
FullRT: fr,
ready: true,
logger: log.Logger(loggerName),
}
}
// GetClosestPeers overrides fullrt.FullRT's GetClosestPeers and returns an
// error if the fullrt's initial network crawl isn't complete yet.
func (fr *fullrtRouter) GetClosestPeers(ctx context.Context, key string) ([]peer.ID, error) {
if fr.ready {
if !fr.Ready() {
fr.ready = false
fr.logger.Info("AcceleratedDHTClient: waiting for routing table initialization (5-10 min, depends on DHT size and network) to complete before providing")
return nil, errAcceleratedDHTNotReady
}
} else {
if fr.Ready() {
fr.ready = true
fr.logger.Info("AcceleratedDHTClient: routing table ready, providing can begin")
} else {
return nil, errAcceleratedDHTNotReady
}
}
return fr.FullRT.GetClosestPeers(ctx, key)
}
var (
_ dhtImpl = &dht.IpfsDHT{}
_ dhtImpl = &fullrtRouter{}
)
type addrsFilter interface {
FilteredAddrs() []ma.Multiaddr
}
@ -363,6 +406,9 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
// provides happen as fast as possible via a dedicated worker that continuously
// processes the queue regardless of this timing.
bufferedIdleWriteTime = time.Minute
// loggerName is the name of the go-log logger used by the provider.
loggerName = dhtprovider.DefaultLoggerName
)
bufferedProviderOpts := []buffered.Option{
@ -392,6 +438,8 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
ddhtprovider.WithDedicatedPeriodicWorkers(int(cfg.Provide.DHT.DedicatedPeriodicWorkers.WithDefault(config.DefaultProvideDHTDedicatedPeriodicWorkers))),
ddhtprovider.WithDedicatedBurstWorkers(int(cfg.Provide.DHT.DedicatedBurstWorkers.WithDefault(config.DefaultProvideDHTDedicatedBurstWorkers))),
ddhtprovider.WithMaxProvideConnsPerWorker(int(cfg.Provide.DHT.MaxProvideConnsPerWorker.WithDefault(config.DefaultProvideDHTMaxProvideConnsPerWorker))),
ddhtprovider.WithLoggerName(loggerName),
)
if err != nil {
return nil, nil, err
@ -400,7 +448,7 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
}
case *fullrt.FullRT:
if inDht != nil {
impl = inDht
impl = newFullRTRouter(inDht, loggerName)
}
}
if impl == nil {
@ -435,6 +483,8 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
dhtprovider.WithDedicatedPeriodicWorkers(int(cfg.Provide.DHT.DedicatedPeriodicWorkers.WithDefault(config.DefaultProvideDHTDedicatedPeriodicWorkers))),
dhtprovider.WithDedicatedBurstWorkers(int(cfg.Provide.DHT.DedicatedBurstWorkers.WithDefault(config.DefaultProvideDHTDedicatedBurstWorkers))),
dhtprovider.WithMaxProvideConnsPerWorker(int(cfg.Provide.DHT.MaxProvideConnsPerWorker.WithDefault(config.DefaultProvideDHTMaxProvideConnsPerWorker))),
dhtprovider.WithLoggerName(loggerName),
}
prov, err := dhtprovider.New(opts...)
@ -489,10 +539,14 @@ func SweepingProviderOpt(cfg *config.Config) fx.Option {
strategy := cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy)
logger.Infow("provider keystore sync started", "strategy", strategy)
if err := syncKeystore(ctx); err != nil {
logger.Errorw("provider keystore sync failed", "err", err, "strategy", strategy)
} else {
logger.Infow("provider keystore sync completed", "strategy", strategy)
if ctx.Err() == nil {
logger.Errorw("provider keystore sync failed", "err", err, "strategy", strategy)
} else {
logger.Debugw("provider keystore sync interrupted by shutdown", "err", err, "strategy", strategy)
}
return
}
logger.Infow("provider keystore sync completed", "strategy", strategy)
}()
gcCtx, c := context.WithCancel(context.Background())
@ -692,6 +746,48 @@ See docs: https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtmaxw
// ONLINE/OFFLINE
// hasDHTRouting checks if the routing configuration includes a DHT component.
// Returns false for HTTP-only custom routing configurations (e.g., Routing.Type="custom"
// with only HTTP routers). This is used to determine whether SweepingProviderOpt
// can be used, since it requires a DHT client.
func hasDHTRouting(cfg *config.Config) bool {
routingType := cfg.Routing.Type.WithDefault(config.DefaultRoutingType)
switch routingType {
case "auto", "autoclient", "dht", "dhtclient", "dhtserver":
return true
case "custom":
// Check if any router in custom config is DHT-based
for _, router := range cfg.Routing.Routers {
if routerIncludesDHT(router, cfg) {
return true
}
}
return false
default: // "none", "delegated"
return false
}
}
// routerIncludesDHT recursively checks if a router configuration includes DHT.
// Handles parallel and sequential composite routers by checking their children.
func routerIncludesDHT(rp config.RouterParser, cfg *config.Config) bool {
switch rp.Type {
case config.RouterTypeDHT:
return true
case config.RouterTypeParallel, config.RouterTypeSequential:
if children, ok := rp.Parameters.(*config.ComposableRouterParams); ok {
for _, child := range children.Routers {
if childRouter, exists := cfg.Routing.Routers[child.RouterName]; exists {
if routerIncludesDHT(childRouter, cfg) {
return true
}
}
}
}
}
return false
}
// OnlineProviders groups units managing provide routing records online
func OnlineProviders(provide bool, cfg *config.Config) fx.Option {
if !provide {
@ -708,7 +804,15 @@ func OnlineProviders(provide bool, cfg *config.Config) fx.Option {
opts := []fx.Option{
fx.Provide(setReproviderKeyProvider(providerStrategy)),
}
if cfg.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled) {
sweepEnabled := cfg.Provide.DHT.SweepEnabled.WithDefault(config.DefaultProvideDHTSweepEnabled)
dhtAvailable := hasDHTRouting(cfg)
// Use SweepingProvider only when both sweep is enabled AND DHT is available.
// For HTTP-only routing (e.g., Routing.Type="custom" with only HTTP routers),
// fall back to LegacyProvider which works with ProvideManyRouter.
// See https://github.com/ipfs/kubo/issues/11089
if sweepEnabled && dhtAvailable {
opts = append(opts, SweepingProviderOpt(cfg))
} else {
reprovideInterval := cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval)

View File

@ -3,33 +3,14 @@ include mk/header.mk
GOCC ?= go
$(d)/coverage_deps: $$(DEPS_GO) cmd/ipfs/ipfs
rm -rf $(@D)/unitcover && mkdir $(@D)/unitcover
rm -rf $(@D)/sharnesscover && mkdir $(@D)/sharnesscover
ifneq ($(IPFS_SKIP_COVER_BINS),1)
$(d)/coverage_deps: test/bin/gocovmerge
endif
.PHONY: $(d)/coverage_deps
# unit tests coverage
UTESTS_$(d) := $(shell $(GOCC) list -f '{{if (or (len .TestGoFiles) (len .XTestGoFiles))}}{{.ImportPath}}{{end}}' $(go-flags-with-tags) ./... | grep -v go-ipfs/vendor | grep -v go-ipfs/Godeps)
# unit tests coverage is now produced by test_unit target in mk/golang.mk
# (outputs coverage/unit_tests.coverprofile and test/unit/gotest.json)
UCOVER_$(d) := $(addsuffix .coverprofile,$(addprefix $(d)/unitcover/, $(subst /,_,$(UTESTS_$(d)))))
$(UCOVER_$(d)): $(d)/coverage_deps ALWAYS
$(eval TMP_PKG := $(subst _,/,$(basename $(@F))))
$(eval TMP_DEPS := $(shell $(GOCC) list -f '{{range .Deps}}{{.}} {{end}}' $(go-flags-with-tags) $(TMP_PKG) | sed 's/ /\n/g' | grep ipfs/go-ipfs) $(TMP_PKG))
$(eval TMP_DEPS_LIST := $(call join-with,$(comma),$(TMP_DEPS)))
$(GOCC) test $(go-flags-with-tags) $(GOTFLAGS) -v -covermode=atomic -json -coverpkg=$(TMP_DEPS_LIST) -coverprofile=$@ $(TMP_PKG) | tee -a test/unit/gotest.json
$(d)/unit_tests.coverprofile: $(UCOVER_$(d))
gocovmerge $^ > $@
TGTS_$(d) := $(d)/unit_tests.coverprofile
.PHONY: $(d)/unit_tests.coverprofile
TGTS_$(d) :=
# sharness tests coverage
$(d)/ipfs: GOTAGS += testrunmain
@ -46,7 +27,7 @@ endif
export IPFS_COVER_DIR:= $(realpath $(d))/sharnesscover/
$(d)/sharness_tests.coverprofile: export TEST_PLUGIN=0
$(d)/sharness_tests.coverprofile: $(d)/ipfs cmd/ipfs/ipfs-test-cover $(d)/coverage_deps test_sharness
$(d)/sharness_tests.coverprofile: $(d)/ipfs cmd/ipfs/ipfs-test-cover $(d)/coverage_deps test/bin/gocovmerge test_sharness
(cd $(@D)/sharnesscover && find . -type f | gocovmerge -list -) > $@

View File

@ -1,112 +0,0 @@
# This file lists all individuals having contributed content to the repository.
# For how it is generated, see `docs/generate-authors.sh`.
Aaron Hill <aa1ronham@gmail.com>
Adam Gashlin <agashlin@gmail.com>
Adrian Ulrich <adrian@blinkenlights.ch>
Alex <alexgbahm@gmail.com>
anarcat <anarcat@users.noreply.github.com>
Andres Buritica <andres@thelinuxkid.com>
Andrew Chin <achin@eminence32.net>
Andy Leap <andyleap@gmail.com>
Artem Andreenko <mio@volmy.com>
Baptiste Jonglez <baptiste--git@jonglez.org>
Brendan Benshoof <brendan@glidr.net>
Brendan Mc <Bren2010@users.noreply.github.com>
Brian Tiger Chow <brian.holderchow@gmail.com>
Caio Alonso <caio@caioalonso.com>
Carlos Cobo <toqueteos@gmail.com>
Cayman Nava <caymannava@gmail.com>
Chas Leichner <chas@chas.io>
Chris Grimmett <xtoast@gmail.com>
Chris P <sahib@online.de>
Chris Sasarak <chris.sasarak@gmail.com>
Christian Couder <chriscool@tuxfamily.org>
Christian Kniep <christian@qnib.org>
Christopher Sasarak <chris.sasarak@gmail.com>
David <github@kattfest.se>
David Braun <David.Braun@Toptal.com>
David Dias <daviddias.p@gmail.com>
David Wagner <wagdav@gmail.com>
dignifiedquire <dignifiedquire@gmail.com>
Dominic Della Valle <DDVpublic@Gmail.com>
Dominic Tarr <dominic.tarr@gmail.com>
drathir <drathir87@gmail.com>
Dylan Powers <dylan.kyle.powers@gmail.com>
Emery Hemingway <emery@vfemail.net>
epitron <chris@ill-logic.com>
Ethan Buchman <ethan@coinculture.info>
Etienne Laurin <etienne@atnnn.com>
Forrest Weston <fweston@eecs.wsu.edu>
Francesco Canessa <makevoid@gmail.com>
gatesvp <gatesvp@gmail.com>
Giuseppe Bertone <bertone.giuseppe@gmail.com>
Harlan T Wood <harlantwood@users.noreply.github.com>
Hector Sanjuan <code@hector.link>
Henry <cryptix@riseup.net>
Ho-Sheng Hsiao <talktohosh@gmail.com>
Jakub Sztandera <kubuxu@protonmail.ch>
Jason Carver <jacarver@linkedin.com>
Jonathan Dahan <jonathan@jonathan.is>
Juan Batiz-Benet <juan@benet.ai>
Karthik Bala <drmelonhead@gmail.com>
Kevin Atkinson <k@kevina.org>
Kevin Wallace <kevin@pentabarf.net>
klauspost <klauspost@gmail.com>
Knut Ahlers <knut@ahlers.me>
Konstantin Koroviev <kkoroviev@gmail.com>
kpcyrd <git@rxv.cc>
Kristoffer Ström <kristoffer@rymdkoloni.se>
Lars Gierth <larsg@systemli.org>
llSourcell <sirajravel@gmail.com>
Marcin Janczyk <marcinjanczyk@gmail.com>
Marcin Rataj <lidel@lidel.org>
Markus Amalthea Magnuson <markus.magnuson@gmail.com>
michael <pfista@gmail.com>
Michael Lovci <michaeltlovci@gmail.com>
Michael Muré <mure.michael@gmail.com>
Michael Pfister <pfista@gmail.com>
Mildred Ki'Lya <mildred-pub.git@mildred.fr>
Muneeb Ali <muneeb@ali.vc>
Nick Hamann <nick@wabbo.org>
palkeo <contact@palkeo.com>
Patrick Connolly <patrick.c.connolly@gmail.com>
Pavol Rusnak <stick@gk2.sk>
Peter Borzov <peter@sowingo.com>
Philip Nelson <me@pnelson.ca>
Quinn Slack <sqs@sourcegraph.com>
ReadmeCritic <frankensteinbot@gmail.com>
rht <rhtbot@gmail.com>
Richard Littauer <richard.littauer@gmail.com>
Robert Carlsen <rwcarlsen@gmail.com>
Roerick Sweeney <sroerick@gmail.com>
Sean Lang <slang800@gmail.com>
SH <github@hertenberger.bayern>
Shanti Bouchez-Mongardé <shanti-pub.git@mildred.fr>
Shaun Bruce <shaun.m.bruce@gmail.com>
Simon Kirkby <tigger@interthingy.com>
Siraj Ravel <jason.ravel@cbsinteractive.com>
Siva Chandran <siva.chandran@realimage.com>
slothbag <slothbag>
sroerick <sroerick@gmail.com>
Stephan Seidt <evilhackerdude@gmail.com>
Stephen Sugden <me@stephensugden.com>
Stephen Whitmore <stephen.whitmore@gmail.com>
Steven Allen <steven@stebalien.com>
Tarnay Kálmán <kalmisoft@gmail.com>
theswitch <theswitch@users.noreply.github.com>
Thomas Gardner <tmg@fastmail.com>
Tim Groeneveld <tim@timg.ws>
Tommi Virtanen <tv@eagain.net>
Tonis Tiigi <tonistiigi@gmail.com>
Tor Arne Vestbø <torarnv@gmail.com>
Travis Person <travis.person@gmail.com>
verokarhu <andreas.metsala@gmail.com>
Vijayee Kulkaa <vijayee.kulkaa@.husmail.com>
Vitor Baptista <vitor@vitorbaptista.com>
vitzli <vitzli@gmail.com>
W. Trevor King <wking@tremily.us>
Whyrusleeping <why@ipfs.io>
wzhd <dev@wzhd.org>
Yuval Langer <yuval.langer@gmail.com>
ᴍᴀᴛᴛ ʙᴇʟʟ <mappum@gmail.com>

View File

@ -1,39 +1,58 @@
# Developer Documentation and Guides
If you are looking for User Documentation & Guides, please visit [docs.ipfs.tech](https://docs.ipfs.tech/) or check [General Documentation](#general-documentation).
If you're looking for User Documentation & Guides, visit [docs.ipfs.tech](https://docs.ipfs.tech/).
If youre experiencing an issue with IPFS, **please follow [our issue guide](github-issue-guide.md) when filing an issue!**
If you're experiencing an issue with IPFS, please [file an issue](https://github.com/ipfs/kubo/issues/new/choose) in this repository.
Otherwise, check out the following guides to using and developing IPFS:
## General Documentation
## Configuration
- [Configuration reference](config.md)
- [Datastore configuration](datastores.md)
- [Experimental features](experimental-features.md)
- [Datastore configuration](datastores.md)
- [Experimental features](experimental-features.md)
- [Environment variables](environment-variables.md)
## Developing `kubo`
## Running Kubo
- First, please read the Contributing Guidelines [for IPFS projects](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md) and then the Contributing Guidelines for [Go code specifically](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md)
- Building on…
- [Windows](windows.md)
- [Performance Debugging Guidelines](debug-guide.md)
- [Release Checklist](releases.md)
- [Gateway configuration](gateway.md)
- [Delegated routing](delegated-routing.md)
- [Content blocking](content-blocking.md) (for public node operators)
- [libp2p resource management](libp2p-resource-management.md)
- [Mounting IPFS with FUSE](fuse.md)
## Metrics & Monitoring
- [Prometheus metrics](metrics.md)
- [Telemetry plugin](telemetry.md)
- [Provider statistics](provide-stats.md)
- [Performance debugging](debug-guide.md)
## Development
- **[Developer Guide](developer-guide.md)** - prerequisites, build, test, and contribute
- Contributing Guidelines [for IPFS projects](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md) and for [Go code specifically](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md)
- [Building on Windows](windows.md)
- [Customizing Kubo](customizing.md)
- [Installing plugins](plugins.md)
- [Release checklist](releases.md)
## Guides
- [How to Implement an API Client](implement-api-bindings.md)
- [Connecting with Websockets](transports.md) — if you want `js-ipfs` nodes in web browsers to connect to your `kubo` node, you will need to turn on websocket support in your `kubo` node.
- [Transferring files over IPFS](file-transfer.md)
- [How to implement an API client](implement-api-bindings.md)
- [HTTP/RPC clients](http-rpc-clients.md)
- [Websocket transports](transports.md)
- [Command completion](command-completion.md)
## Advanced User Guides
## Production
- [Transferring a File Over IPFS](file-transfer.md)
- [Installing command completion](command-completion.md)
- [Mounting IPFS with FUSE](fuse.md)
- [Installing plugins](plugins.md)
- [Setting up an IPFS Gateway](https://github.com/ipfs/kubo/blob/master/docs/gateway.md)
- [Reverse proxy setup](production/reverse-proxy.md)
## Other
## Specifications
- [Thanks to all our contributors ❤️](AUTHORS) (We use the `generate-authors.sh` script to regenerate this list.)
- [How to file a GitHub Issue](github-issue-guide.md)
- [Repository structure](specifications/repository.md)
- [Filesystem datastore](specifications/repository_fs.md)
- [Keystore](specifications/keystore.md)
## Examples
- [Kubo as a library](examples/kubo-as-a-library/README.md)

View File

@ -1,102 +1,209 @@
# IPFS : The `Add` command demystified
# How `ipfs add` Works
The goal of this document is to capture the code flow for adding a file (see the `coreapi` package) using the IPFS CLI, in the process exploring some data structures and packages like `ipld.Node` (aka `dagnode`), `FSNode`, `MFS`, etc.
This document explains what happens when you run `ipfs add` to import files into IPFS. Understanding this flow helps when debugging, optimizing imports, or building applications on top of IPFS.
## Concepts
- [Files](https://github.com/ipfs/docs/issues/133)
- [The Big Picture](#the-big-picture)
- [Try It Yourself](#try-it-yourself)
- [Step by Step](#step-by-step)
- [Step 1: Chunking](#step-1-chunking)
- [Step 2: Building the DAG](#step-2-building-the-dag)
- [Step 3: Storing Blocks](#step-3-storing-blocks)
- [Step 4: Pinning](#step-4-pinning)
- [Alternative: Organizing with MFS](#alternative-organizing-with-mfs)
- [Options](#options)
- [UnixFS Format](#unixfs-format)
- [Code Architecture](#code-architecture)
- [Key Files](#key-files)
- [The Adder](#the-adder)
- [Further Reading](#further-reading)
---
## The Big Picture
**Try this yourself**
>
> ```
> # Convert a file to the IPFS format.
> echo "Hello World" > new-file
> ipfs add new-file
> added QmWATWQ7fVPP2EFGu71UkfnqhYXDYH566qy47CnJDgvs8u new-file
> 12 B / 12 B [=========================================================] 100.00%
>
> # Add a file to the MFS.
> NEW_FILE_HASH=$(ipfs add new-file -Q)
> ipfs files cp /ipfs/$NEW_FILE_HASH /new-file
>
> # Get information from the file in MFS.
> ipfs files stat /new-file
> # QmWATWQ7fVPP2EFGu71UkfnqhYXDYH566qy47CnJDgvs8u
> # Size: 12
> # CumulativeSize: 20
> # ChildBlocks: 0
> # Type: file
>
> # Retrieve the contents.
> ipfs files read /new-file
> # Hello World
> ```
When you add a file to IPFS, three main things happen:
## Code Flow
1. **Chunking** - The file is split into smaller pieces
2. **DAG Building** - Those pieces are organized into a tree structure (a [Merkle DAG](https://docs.ipfs.tech/concepts/merkle-dag/))
3. **Pinning** - The root of the tree is pinned so it persists in your local node
**[`UnixfsAPI.Add()`](https://github.com/ipfs/go-ipfs/blob/v0.4.18/core/coreapi/unixfs.go#L31)** - *Entrypoint into the `Unixfs` package*
The result is a Content Identifier (CID) - a hash that uniquely identifies your content and can be used to retrieve it from anywhere in the IPFS network.
The `UnixfsAPI.Add()` acts on the input data or files, to build a _merkledag_ node (in essence it is the entire tree represented by the root node) and adds it to the _blockstore_.
Within the function, a new `Adder` is created with the configured `Blockstore` and __DAG service__`.
```mermaid
flowchart LR
A["Your File<br/>(bytes)"] --> B["Chunker<br/>(split data)"]
B --> C["DAG Builder<br/>(tree)"]
C --> D["CID<br/>(hash)"]
```
- **[`adder.AddAllAndPin(files)`](https://github.com/ipfs/go-ipfs/blob/v0.4.18/core/coreunix/add.go#L403)** - *Entrypoint to the `Add` logic*
encapsulates a lot of the underlying functionality that will be investigated in the following sections.
## Try It Yourself
Our focus will be on the simplest case, a single file, handled by `Adder.addFile(file files.File)`.
```bash
# Add a simple file
echo "Hello World" > hello.txt
ipfs add hello.txt
# added QmWATWQ7fVPP2EFGu71UkfnqhYXDYH566qy47CnJDgvs8u hello.txt
- **[`adder.addFile(file files.File)`](https://github.com/ipfs/go-ipfs/blob/v0.4.18/core/coreunix/add.go#L450)** - *Create the _DAG_ and add to `MFS`*
# See what's inside
ipfs cat QmWATWQ7fVPP2EFGu71UkfnqhYXDYH566qy47CnJDgvs8u
# Hello World
The `addFile(file)` method takes the data and converts it into a __DAG__ tree and adds the root of the tree into the `MFS`.
# View the DAG structure
ipfs dag get QmWATWQ7fVPP2EFGu71UkfnqhYXDYH566qy47CnJDgvs8u
```
https://github.com/ipfs/go-ipfs/blob/v0.4.18/core/coreunix/add.go#L508-L521
## Step by Step
There are two main methods to focus on -
### Step 1: Chunking
1. **[`adder.add(io.Reader)`](https://github.com/ipfs/go-ipfs/blob/v0.4.18/core/coreunix/add.go#L115)** - *Create and return the **root** __DAG__ node*
Big files are split into chunks because:
This method converts the input data (`io.Reader`) to a __DAG__ tree, by splitting the data into _chunks_ using the `Chunker` and organizing them into a __DAG__ (with a *trickle* or *balanced* layout. See [balanced](https://github.com/ipfs/go-unixfs/blob/6b769632e7eb8fe8f302e3f96bf5569232e7a3ee/importer/balanced/builder.go) for more info).
- Large files need to be broken down for efficient transfer
- Identical chunks across files are stored only once (deduplication)
- You can fetch parts of a file without downloading the whole thing
The method returns the **root** `ipld.Node` of the __DAG__.
**Chunking strategies** (set with `--chunker`):
2. **[`adder.addNode(ipld.Node, path)`](https://github.com/ipfs/go-ipfs/blob/v0.4.18/core/coreunix/add.go#L366)** - *Add **root** __DAG__ node to the `MFS`*
| Strategy | Description | Best For |
|----------|-------------|----------|
| `size-N` | Fixed size chunks | General use |
| `rabin` | Content-defined chunks using rolling hash | Deduplication across similar files |
| `buzhash` | Alternative content-defined chunking | Similar to rabin |
Now that we have the **root** node of the `DAG`, this needs to be added to the `MFS` file system.
Fetch (or create, if doesn't already exist) the `MFS` **root** using `mfsRoot()`.
See `ipfs add --help` for current defaults, or [Import](config.md#import) for making them permanent.
> NOTE: The `MFS` **root** is an ephemeral root, created and destroyed solely for the `add` functionality.
Content-defined chunking (rabin/buzhash) finds natural boundaries in the data. This means if you edit the middle of a file, only the changed chunks need to be re-stored - the rest can be deduplicated.
Assuming the directory already exists in the MFS file system, (if it doesn't exist it will be created using `mfs.Mkdir()`), the **root** __DAG__ node is added to the `MFS` File system using the `mfs.PutNode()` function.
### Step 2: Building the DAG
- **[MFS] [`PutNode(mfs.Root, path, ipld.Node)`](https://github.com/ipfs/go-mfs/blob/v0.1.18/ops.go#L86)** - *Insert node at path into given `MFS`*
Each chunk becomes a leaf node in a tree. If a file has many chunks, intermediate nodes group them together. This creates a Merkle DAG (Directed Acyclic Graph) where:
The `path` param is used to determine the `MFS Directory`, which is first looked up in the `MFS` using `lookupDir()` function. This is followed by adding the **root** __DAG__ node (`ipld.Node`) into this `Directory` using `directory.AddChild()` method.
- Each node is identified by a hash of its contents
- Parent nodes contain links (hashes) to their children
- The root node's hash becomes the file's CID
- **[MFS] Add Child To `UnixFS`**
- **[`directory.AddChild(filename, ipld.Node)`](https://github.com/ipfs/go-mfs/blob/v0.1.18/dir.go#L350)** - *Add **root** __DAG__ node under this directory*
**Layout strategies**:
Within this method the node is added to the `Directory`'s __DAG service__ using the `dserv.Add()` method, followed by adding the **root** __DAG__ node with the given name, in the `directory.addUnixFSChild(directory.child{name, ipld.Node})` method.
**Balanced layout** (default):
- **[MFS] [`directory.addUnixFSChild(child)`](https://github.com/ipfs/go-mfs/blob/v0.1.18/dir.go#L375)** - *Add child to inner UnixFS Directory*
```mermaid
graph TD
Root --> Node1[Node]
Root --> Node2[Node]
Node1 --> Leaf1[Leaf]
Node1 --> Leaf2[Leaf]
Node2 --> Leaf3[Leaf]
```
The node is then added as a child to the inner `UnixFS` directory using the `(BasicDirectory).AddChild()` method.
All leaves at similar depth. Good for random access - you can jump to any part of the file efficiently.
> NOTE: This is not to be confused with the `directory.AddChild(filename, ipld.Node)`, as this operates on the `UnixFS` `BasicDirectory` object.
**Trickle layout** (`--trickle`):
- **[UnixFS] [`(BasicDirectory).AddChild(ctx, name, ipld.Node)`](https://github.com/ipfs/go-unixfs/blob/v1.1.16/io/directory.go#L137)** - *Add child to `BasicDirectory`*
```mermaid
graph TD
Root --> Leaf1[Leaf]
Root --> Node1[Node]
Root --> Node2[Node]
Node1 --> Leaf2[Leaf]
Node2 --> Leaf3[Leaf]
```
> IMPORTANT: It should be noted that the `BasicDirectory` object uses the `ProtoNode` type object which is an implementation of the `ipld.Node` interface, seen and used throughout this document. Ideally the `ipld.Node` should always be used, unless we need access to specific functions from `ProtoNode` (like `Copy()`) that are not available in the interface.
Leaves added progressively. Good for streaming - you can start reading before the whole file is added.
This method first attempts to remove any old links (`ProtoNode.RemoveNodeLink(name)`) to the `ProtoNode` prior to adding a link to the newly added `ipld.Node`, using `ProtoNode.AddNodeLink(name, ipld.Node)`.
### Step 3: Storing Blocks
- **[Merkledag] [`AddNodeLink()`](https://github.com/ipfs/go-merkledag/blob/v1.1.15/node.go#L99)**
As the DAG is built, each node is stored in the blockstore:
The `AddNodeLink()` method is where an `ipld.Link` is created with the `ipld.Node`'s `CID` and size in the `ipld.MakeLink(ipld.Node)` method, and is then appended to the `ProtoNode`'s links in the `ProtoNode.AddRawLink(name)` method.
- **Normal mode**: Data is copied into IPFS's internal storage (`~/.ipfs/blocks/`)
- **Filestore mode** (`--nocopy`): Only references to the original file are stored (saves disk space but the original file must remain in place)
- **[`adder.Finalize()`](https://github.com/ipfs/go-ipfs/blob/v0.4.18/core/coreunix/add.go#L200)** - *Fetch and return the __DAG__ **root** from the `MFS` and `UnixFS` directory*
### Step 4: Pinning
The `Finalize` method returns the `ipld.Node` from the `UnixFS` `Directory`.
By default, added content is pinned (`ipfs add --pin=true`). This tells your IPFS node to keep this data - without pinning, content may eventually be removed to free up space.
- **[`adder.PinRoot()`](https://github.com/ipfs/go-ipfs/blob/v0.4.18/core/coreunix/add.go#L171)** - *Pin all files under the `MFS` **root***
### Alternative: Organizing with MFS
The whole process ends with `PinRoot` recursively pinning all the files under the `MFS` **root**
Instead of pinning, you can use the [Mutable File System (MFS)](https://docs.ipfs.tech/concepts/file-systems/#mutable-file-system-mfs) to organize content using familiar paths like `/photos/vacation.jpg` instead of raw CIDs:
```bash
# Add directly to MFS path
ipfs add --to-files=/backups/ myfile.txt
# Or copy an existing CID into MFS
ipfs files cp /ipfs/QmWATWQ7fVPP2EFGu71UkfnqhYXDYH566qy47CnJDgvs8u /docs/hello.txt
```
Content in MFS is implicitly pinned and stays organized across node restarts.
## Options
Run `ipfs add --help` to see all available options for controlling chunking, DAG layout, CID format, pinning behavior, and more.
## UnixFS Format
IPFS uses [UnixFS](https://specs.ipfs.tech/unixfs/) to represent files and directories. UnixFS is an abstraction layer that:
- Gives names to raw data blobs (so you can have `/foo/bar.txt` instead of just hashes)
- Represents directories as lists of named links to other nodes
- Organizes large files as trees of smaller chunks
- Makes these structures cryptographically verifiable - any tampering is detectable because it would change the hashes
With `--raw-leaves`, leaf nodes store raw data without the UnixFS wrapper. This is more efficient and is the default when using CIDv1.
## Code Architecture
The add flow spans several layers:
```mermaid
flowchart TD
subgraph CLI ["CLI Layer (kubo)"]
A["core/commands/add.go<br/>parses flags, shows progress"]
end
subgraph API ["CoreAPI Layer (kubo)"]
B["core/coreapi/unixfs.go<br/>UnixfsAPI.Add() entry point"]
end
subgraph Adder ["Adder (kubo)"]
C["core/coreunix/add.go<br/>orchestrates chunking, DAG building, MFS, pinning"]
end
subgraph Boxo ["boxo libraries"]
D["chunker/ - splits data into chunks"]
E["ipld/unixfs/ - DAG layout and UnixFS format"]
F["mfs/ - mutable filesystem abstraction"]
G["pinning/ - pin management"]
H["blockstore/ - block storage"]
end
A --> B --> C --> Boxo
```
### Key Files
| Component | Location |
|-----------|----------|
| CLI command | `core/commands/add.go` |
| API implementation | `core/coreapi/unixfs.go` |
| Adder logic | `core/coreunix/add.go` |
| Chunking | [boxo/chunker](https://github.com/ipfs/boxo/tree/main/chunker) |
| DAG layouts | [boxo/ipld/unixfs/importer](https://github.com/ipfs/boxo/tree/main/ipld/unixfs/importer) |
| MFS | [boxo/mfs](https://github.com/ipfs/boxo/tree/main/mfs) |
| Pinning | [boxo/pinning/pinner](https://github.com/ipfs/boxo/tree/main/pinning/pinner) |
### The Adder
The `Adder` type in `core/coreunix/add.go` is the workhorse. It:
1. **Creates an MFS root** - temporary in-memory filesystem for building the DAG
2. **Processes files recursively** - chunks each file and builds DAG nodes
3. **Commits to blockstore** - persists all blocks
4. **Pins the result** - keeps content from being removed
5. **Returns the root CID**
Key methods:
- `AddAllAndPin()` - main entry point
- `addFileNode()` - handles a single file or directory
- `add()` - chunks data and builds the DAG using boxo's layout builders
## Further Reading
- [UnixFS specification](https://specs.ipfs.tech/unixfs/)
- [IPLD and Merkle DAGs](https://docs.ipfs.tech/concepts/merkle-dag/)
- [Pinning](https://docs.ipfs.tech/concepts/persistence/)
- [MFS (Mutable File System)](https://docs.ipfs.tech/concepts/file-systems/#mutable-file-system-mfs)

View File

@ -265,11 +265,11 @@ since Kubo 0.13, but in this release it will also include the size column.
#### QUIC and WebTransport
##### WebTransport enabled by default
[WebTransport](https://docs.libp2p.io/concepts/transports/webtransport/) is a new libp2p transport that [was introduced in v0.16](https://github.com/ipfs/kubo/blob/master/docs/changelogs/v0.16.md#-webtransport-new-experimental-transport) that is based on top of QUIC and HTTP3.
[WebTransport](https://web.archive.org/web/20260128152314/https://docs.libp2p.io/concepts/transports/webtransport/) is a new libp2p transport that [was introduced in v0.16](https://github.com/ipfs/kubo/blob/master/docs/changelogs/v0.16.md#-webtransport-new-experimental-transport) that is based on top of QUIC and HTTP3.
This allows browser-based nodes to contact Kubo nodes, so now instead of just serving requests for other system-level application nodes, you can also serve requests directly to a node running inside a browser page.
For the full story see [connectivity.libp2p.io](https://connectivity.libp2p.io/).
For the full story see [connectivity.libp2p.io](https://web.archive.org/web/20251118040510/https://connectivity.libp2p.io/).
##### QUIC and WebTransport share a single port
WebTransport is enabled by default in part because [go-libp2p now supports running WebTransport and QUIC transports on the same QUIC listener](https://github.com/libp2p/go-libp2p/issues/1759). No additional port needs to be opened.

View File

@ -78,7 +78,7 @@ introduced in [`go-libp2p`](https://github.com/libp2p/go-libp2p/releases/tag/v0.
> allows browser nodes to connect to go-libp2p nodes directly,
> without any configuration (e.g. TLS certificates) needed on the go-libp2p
> side. This is useful for browser nodes that arent able to use
> [WebTransport](https://blog.libp2p.io/2022-12-19-libp2p-webtransport/).
> [WebTransport](https://web.archive.org/web/20260107053250/https://blog.libp2p.io/2022-12-19-libp2p-webtransport/).
The `/webrtc-direct` transport is disabled by default in Kubo 0.24,
and not ready for production use yet, but we plan to enable it in a future release.

View File

@ -50,7 +50,7 @@ AutoTLS will remain disabled under the following conditions:
To troubleshoot, use `GOLOG_LOG_LEVEL="error,autotls=info`.
For more details, check out the [`AutoTLS` configuration documentation](https://github.com/ipfs/kubo/blob/master/docs/config.md#autotls) or dive deeper with [AutoTLS libp2p blog post](https://blog.libp2p.io/autotls/).
For more details, check out the [`AutoTLS` configuration documentation](https://github.com/ipfs/kubo/blob/master/docs/config.md#autotls) or dive deeper with [AutoTLS libp2p blog post](https://web.archive.org/web/20260112031855/https://blog.libp2p.io/autotls/).
#### New WebUI features

View File

@ -10,9 +10,26 @@ This release was brought to you by the [Shipyard](https://ipshipyard.com/) team.
- [Overview](#overview)
- [🔦 Highlights](#-highlights)
- [🔢 UnixFS CID Profiles (IPIP-499)](#-unixfs-cid-profiles-ipip-499)
- [🧹 Automatic cleanup of interrupted imports](#-automatic-cleanup-of-interrupted-imports)
- [Routing V1 HTTP API now exposed by default](#routing-v1-http-api-now-exposed-by-default)
- [Track total size when adding pins](#track-total-size-when-adding-pins)
- [IPIP-0524: Gateway codec conversion disabled by default](#ipip-0524-gateway-codec-conversion-disabled-by-default)
- [IPIP-523: `?format=` takes precedence over `Accept` header](#ipip-523-format-takes-precedence-over-accept-header)
- [IPIP-524: Gateway codec conversion disabled by default](#ipip-524-gateway-codec-conversion-disabled-by-default)
- [Improved IPNS over PubSub validation](#improved-ipns-over-pubsub-validation)
- [New `ipfs diag datastore` commands](#new-ipfs-diag-datastore-commands)
- [🚇 Improved `ipfs p2p` tunnels with foreground mode](#-improved-ipfs-p2p-tunnels-with-foreground-mode)
- [Improved `ipfs dag stat` output](#improved-ipfs-dag-stat-output)
- [🔑 `ipfs key` improvements](#-ipfs-key-improvements)
- [Accelerated DHT Client and Provide Sweep now work together](#accelerated-dht-client-and-provide-sweep-now-work-together)
- [🌐 No unnecessary DNS lookups for AutoTLS addresses](#-no-unnecessary-dns-lookups-for-autotls-addresses)
- [⏱️ Configurable gateway request duration limit](#-configurable-gateway-request-duration-limit)
- [🔧 Recovery from corrupted MFS root](#-recovery-from-corrupted-mfs-root)
- [📡 RPC `Content-Type` headers for binary responses](#-rpc-content-type-headers-for-binary-responses)
- [🔖 New `ipfs name get|put` commands](#-new-ipfs-name-getput-commands)
- [📋 Long listing format for `ipfs ls`](#-long-listing-format-for-ipfs-ls)
- [🖥️ WebUI Improvements](#-webui-improvements)
- [📦️ Dependency updates](#-dependency-updates)
- [📝 Changelog](#-changelog)
- [👨‍👩‍👧‍👦 Contributors](#-contributors)
@ -20,6 +37,47 @@ This release was brought to you by the [Shipyard](https://ipshipyard.com/) team.
### 🔦 Highlights
#### 🔢 UnixFS CID Profiles (IPIP-499)
[IPIP-499](https://github.com/ipfs/specs/pull/499) CID Profiles are presets that pin down how files get split into blocks and organized into directories. Useful when you need the same CID for the same data across different software or versions.
**New configuration [profiles](https://github.com/ipfs/kubo/blob/master/docs/config.md#profiles)**
- `unixfs-v1-2025`: modern CIDv1 profile with improved defaults
- `unixfs-v0-2015` (alias `legacy-cid-v0`): best-effort legacy CIDv0 behavior
Apply with: `ipfs config profile apply unixfs-v1-2025`
The `test-cid-v1` and `test-cid-v1-wide` profiles have been removed. Use `unixfs-v1-2025` or manually set specific `Import.*` settings instead.
**New [`Import.*`](https://github.com/ipfs/kubo/blob/master/docs/config.md#import) options**
- `Import.UnixFSHAMTDirectorySizeEstimation`: estimation mode (`links`, `block`, or `disabled`)
- `Import.UnixFSDAGLayout`: DAG layout (`balanced` or `trickle`)
**New [`ipfs add`](https://docs.ipfs.tech/reference/kubo/cli/#ipfs-add) CLI flags**
- `--dereference-symlinks` resolves all symlinks to their target content, replacing the deprecated `--dereference-args` which only resolved CLI argument symlinks
- `--empty-dirs` / `-E` controls inclusion of empty directories (default: true)
- `--hidden` / `-H` includes hidden files (default: false)
- `--trickle` implicit default can be adjusted via `Import.UnixFSDAGLayout`
**`ipfs files write` fix for CIDv1 directories**
When writing to MFS directories that use CIDv1 (via `--cid-version=1` or `ipfs files chcid`), single-block files now produce raw block CIDs (like `bafkrei...`), matching the behavior of `ipfs add --raw-leaves`. Previously, MFS would wrap single-block files in dag-pb even when raw leaves were enabled. CIDv0 directories continue to use dag-pb.
**HAMT Threshold Fix**
HAMT directory sharding threshold changed from `>=` to `>` to match the Go docs and JS implementation ([ipfs/boxo@6707376](https://github.com/ipfs/boxo/commit/6707376002a3d4ba64895749ce9be2e00d265ed5)). A directory exactly at 256 KiB now stays as a basic directory instead of converting to HAMT. This is a theoretical breaking change, but unlikely to impact real-world users as it requires a directory to be exactly at the threshold boundary. If you depend on the old behavior, adjust [`Import.UnixFSHAMTShardingSize`](https://github.com/ipfs/kubo/blob/master/docs/config.md#importunixfshamtshardingsize) to be 1 byte lower.
#### 🧹 Automatic cleanup of interrupted imports
If you cancel `ipfs add` or `ipfs dag import` mid-operation, Kubo now automatically cleans up incomplete data on the next daemon start. Previously, interrupted imports would leave orphan blocks in your repository that were difficult to identify and remove without pins and running explicit garbage collection.
Batch operations also use less memory now. Block data is written to disk immediately rather than held in RAM until the batch commits.
Under the hood, the block storage layer (flatfs) was rewritten to use atomic batch operations via a temporary staging directory. See [go-ds-flatfs#142](https://github.com/ipfs/go-ds-flatfs/pull/142) for details.
#### Routing V1 HTTP API now exposed by default
The [Routing V1 HTTP API](https://specs.ipfs.tech/routing/http-routing-v1/) is now exposed by default at `http://127.0.0.1:8080/routing/v1`. This allows light clients in browsers to use Kubo Gateway as a delegated routing backend instead of running a full DHT client. Support for [IPIP-476: Delegated Routing DHT Closest Peers API](https://github.com/ipfs/specs/pull/476) is included. Can be disabled via [`Gateway.ExposeRoutingAPI`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewayexposeroutingapi).
@ -29,13 +87,22 @@ The [Routing V1 HTTP API](https://specs.ipfs.tech/routing/http-routing-v1/) is n
Adds total size progress tracking of pinned nodes during `ipfs pin add --progress`. The output now shows the total size of the pinned dag.
Example output:
```
Fetched/Processed 336 nodes (83 MB)
```
#### IPIP-0524: Gateway codec conversion disabled by default
#### IPIP-523: `?format=` takes precedence over `Accept` header
Codec conversion is now disabled by default per [IPIP-0524](https://github.com/ipfs/specs/pull/524).
[IPIP-523](https://github.com/ipfs/specs/pull/523) simplifies HTTP content negotiation by making the `?format=` URL query parameter always take priority over the `Accept` HTTP header when both are present.
This ensures deterministic HTTP caching behavior and protects against CDNs that commingle different response types under the same cache key. It also allows browsers to use `?format=` reliably even when they send `Accept` headers with specific content types.
The only breaking change is for edge cases where a client sends both a specific `Accept` header and a different `?format=` value for an explicitly supported format (`tar`, `raw`, `car`, `dag-json`, `dag-cbor`, etc.). Previously `Accept` would win. Now `?format=` always wins.
#### IPIP-524: Gateway codec conversion disabled by default
Codec conversion is now disabled by default per [IPIP-524](https://github.com/ipfs/specs/pull/524).
Requests for a format that differs from the block's codec will return `406 Not Acceptable`.
**Migration**: Clients should fetch raw blocks (`?format=raw` or `Accept: application/vnd.ipld.raw`)
@ -45,6 +112,171 @@ This change removes gateways from a gatekeeping role: new codecs can now be adop
immediately without waiting for gateway operator updates. Set [`Gateway.AllowCodecConversion`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewayallowcodecconversion)
to `true` to restore previous behavior.
#### Improved IPNS over PubSub validation
[IPNS over PubSub](https://specs.ipfs.tech/ipns/ipns-pubsub-router/) implementation in Kubo is now more reliable. Duplicate messages are rejected even in large networks where messages may cycle back after the in-memory cache expires.
Kubo now persists the maximum seen sequence number per peer to the datastore ([go-libp2p-pubsub#BasicSeqnoValidator](https://pkg.go.dev/github.com/libp2p/go-libp2p-pubsub#BasicSeqnoValidator)), providing stronger duplicate detection that survives node restarts. This addresses message flooding issues reported in [#9665](https://github.com/ipfs/kubo/issues/9665).
Kubo's pubsub is optimized for IPNS use case. For custom pubsub applications requiring different validation logic, use [go-libp2p-pubsub](https://github.com/libp2p/go-libp2p-pubsub) directly in a dedicated binary.
#### New `ipfs diag datastore` commands
New experimental commands for low-level datastore inspection:
- `ipfs diag datastore get <key>` - Read raw value at a datastore key (use `--hex` for hex dump)
- `ipfs diag datastore count <prefix>` - Count entries matching a datastore prefix
The daemon must not be running when using these commands. Run `ipfs diag datastore --help` for usage examples.
#### 🚇 Improved `ipfs p2p` tunnels with foreground mode
P2P tunnels can now run like SSH port forwarding: start a tunnel, use it, and it cleans up automatically when you're done.
The new `--foreground` (`-f`) flag for `ipfs p2p listen` and `ipfs p2p forward` keeps the command running until interrupted. When you Ctrl+C, send SIGTERM, or stop the service, the tunnel is removed automatically:
```console
$ ipfs p2p listen /x/ssh /ip4/127.0.0.1/tcp/22 --foreground
Listening on /x/ssh, forwarding to /ip4/127.0.0.1/tcp/22, waiting for interrupt...
^C
Received interrupt, removing listener for /x/ssh
```
Without `--foreground`, commands return immediately and tunnels persist until explicitly closed (existing behavior).
See [docs/p2p-tunnels.md](https://github.com/ipfs/kubo/blob/master/docs/p2p-tunnels.md) for usage examples.
#### Improved `ipfs dag stat` output
The `ipfs dag stat` command has been improved for better terminal UX:
- Progress output now uses a single line with carriage return, avoiding terminal flooding
- Progress is auto-detected: shown only in interactive terminals by default
- Human-readable sizes are now displayed alongside raw byte counts
Example progress (interactive terminal):
```
Fetched/Processed 84 blocks, 2097152 bytes (2.1 MB)
```
Example summary output:
```
Summary
Total Size: 2097152 (2.1 MB)
Unique Blocks: 42
Shared Size: 1048576 (1.0 MB)
Ratio: 1.500000
```
Use `--progress=true` to force progress even when piped, or `--progress=false` to disable it.
#### 🔑 `ipfs key` improvements
`ipfs key ls` is now the canonical command for listing keys, matching `ipfs pin ls` and `ipfs files ls`. The old `ipfs key list` still works but is deprecated.
Listing also became more resilient: bad keys are now skipped with an error log instead of failing the entire operation.
#### Accelerated DHT Client and Provide Sweep now work together
Previously, provide operations could start before the Accelerated DHT Client discovered enough peers, causing sweep mode to lose its efficiency benefits. Now, providing waits for the initial network crawl (about 10 minutes). Your content will be properly distributed across DHT regions after initial DHT map is created. Check `ipfs provide stat` to see when providing begins.
#### 🌐 No unnecessary DNS lookups for AutoTLS addresses
Kubo no longer makes DNS queries for [AutoTLS](https://web.archive.org/web/20260112031855/https://blog.libp2p.io/autotls/) addresses like `1-2-3-4.peerid.libp2p.direct`. Since the IP is encoded in the hostname (`1-2-3-4` means `1.2.3.4`), Kubo extracts it locally. This reduces load on the public good DNS servers at `libp2p.direct` run by [Shipyard](https://ipshipyard.com), reserving them for web browsers which lack direct DNS access and must rely on the browser's resolver.
To disable, set [`AutoTLS.SkipDNSLookup`](https://github.com/ipfs/kubo/blob/master/docs/config.md#autotlsskipdnslookup) to `false`.
#### ⏱️ Configurable gateway request duration limit
[`Gateway.MaxRequestDuration`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewaymaxrequestduration) sets an absolute deadline for gateway requests. Unlike `RetrievalTimeout` (which resets on each data write and catches stalled transfers), this is a hard limit on the total time a request can take.
The default 1 hour limit (previously hardcoded) can now be adjusted to fit your deployment needs. This is a fallback that prevents requests from hanging indefinitely when subsystem timeouts are misconfigured or fail to trigger. Returns 504 Gateway Timeout when exceeded.
#### 🔧 Recovery from corrupted MFS root
If your daemon fails to start because the MFS root is not a directory (due to misconfiguration, operational error, or disk corruption), you can now recover without deleting and recreating your repository in a new `IPFS_PATH`.
The new `ipfs files chroot` command lets you reset the MFS (Mutable File System) root or restore it to a known valid CID:
```console
# Reset MFS to an empty directory
$ ipfs files chroot --confirm
# Or restore from a previously saved directory CID
$ ipfs files chroot --confirm QmYourBackupCID
```
See `ipfs files chroot --help` for details.
#### 📡 RPC `Content-Type` headers for binary responses
HTTP RPC endpoints that return binary data now set appropriate `Content-Type` headers, making it easier to integrate with HTTP clients and tooling that rely on MIME types. On CLI these commands behave the same as before, but over HTTP RPC you now get proper headers:
| Endpoint | Content-Type |
|------------------------|-------------------------------------------|
| `/api/v0/get` | `application/x-tar` or `application/gzip` |
| `/api/v0/dag/export` | `application/vnd.ipld.car` |
| `/api/v0/block/get` | `application/vnd.ipld.raw` |
| `/api/v0/name/get` | `application/vnd.ipfs.ipns-record` |
| `/api/v0/diag/profile` | `application/zip` |
#### 🔖 New `ipfs name get|put` commands
You can now backup, restore, and share IPNS records without needing the private key.
```console
$ ipfs name get /ipns/k51... > record.bin
$ ipfs name get /ipns/k51... | ipfs name inspect
$ ipfs name put k51... record.bin
```
These are low-level tools primarily for debugging and testing IPNS.
The `put` command validates records by default. Use `--force` to skip validation and test how routing systems handle malformed or outdated records. Note that `--force` only bypasses this command's checks; the routing system may still reject invalid records.
#### 📋 Long listing format for `ipfs ls`
The `ipfs ls` command now supports `--long` (`-l`) flag for displaying Unix-style file permissions and modification times. This works with files added using `--preserve-mode` and `--preserve-mtime`. See `ipfs ls --help` for format details and examples.
#### 🖥️ WebUI Improvements
IPFS Web UI has been updated to [v4.11.0](https://github.com/ipfs/ipfs-webui/releases/tag/v4.11.0).
##### Search and filter files
You can now search and filter files directly in the Files screen. Type a name, CID, or file extension and the list narrows down in real time. Works in both list and grid view.
> ![Search and filter files](https://github.com/user-attachments/assets/cc266dbc-8424-4a8a-a6b7-a80a5a25683b)
##### DHT Provide diagnostic screen
New screen under Diagnostics that shows the health of DHT Provide operations. You can see reprovide cycle progress, worker utilization, queue status, and network throughput at a glance, without having to use the [`ipfs provide stat`](https://docs.ipfs.tech/reference/kubo/cli/#ipfs-provide-stat) CLI.
> ![DHT Provide diagnostic screen](https://github.com/user-attachments/assets/c577309a-2249-46f8-87d9-f0da42955f32)
##### Better path handling in Files
The Inspect button now resolves `/ipfs/` and `/ipns/` paths to their final CID before opening the IPLD Explorer. The Explore form also accepts `ipfs://` and `ipns://` protocol URLs. Previously, these would show a blank screen or an infinite spinner. Path resolution errors now also show better error pages:
> ![Better path handling in Files](https://github.com/user-attachments/assets/3494835b-0b93-4990-9971-078273671928)
#### 📦️ Dependency updates
- update `go-libp2p` to [v0.47.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.47.0) (incl. [v0.46.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.46.0))
- Reduced WebRTC log noise by using debug level for pion errors ([go-libp2p#3426](https://github.com/libp2p/go-libp2p/pull/3426)).
- Fixed mDNS discovery on Windows and macOS by filtering addresses to reduce packet size ([go-libp2p#3434](https://github.com/libp2p/go-libp2p/pull/3434)).
- AutoTLS addresses no longer get marked unreachable when peers lack WebSockets support. Swarm heals over time ([go-libp2p#3435](https://github.com/libp2p/go-libp2p/pull/3435)).
- Fixed `stream.Close()` blocking indefinitely on unresponsive peers ([go-libp2p#3448](https://github.com/libp2p/go-libp2p/pull/3448)).
- update `quic-go` to [v0.59.0](https://github.com/quic-go/quic-go/releases/tag/v0.59.0) (incl. [v0.58.0](https://github.com/quic-go/quic-go/releases/tag/v0.58.0) + [v0.57.0](https://github.com/quic-go/quic-go/releases/tag/v0.57.0))
- update `p2p-forge` to [v0.7.0](https://github.com/ipshipyard/p2p-forge/releases/tag/v0.7.0)
- update `go-ds-pebble` to [v0.5.9](https://github.com/ipfs/go-ds-pebble/releases/tag/v0.5.9)
- updates `github.com/cockroachdb/pebble` to [v2.1.4](https://github.com/cockroachdb/pebble/releases/tag/v2.1.4) to enable Go 1.26 support
- update `go-libp2p-pubsub` to [v0.15.0](https://github.com/libp2p/go-libp2p-pubsub/releases/tag/v0.15.0)
- update `boxo` to [v0.36.0](https://github.com/ipfs/boxo/releases/tag/v0.36.0)
- update `go-libp2p-kad-dht` to [v0.37.1](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.37.1) (includes [v0.37.0](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.37.0))
- update `ipfs-webui` to [v4.11.0](https://github.com/ipfs/ipfs-webui/releases/tag/v4.11.0)
- update `gateway-conformance` tests to [v0.9](https://github.com/ipfs/gateway-conformance/releases/tag/v0.9.0)
### 📝 Changelog
### 👨‍👩‍👧‍👦 Contributors

View File

@ -59,6 +59,7 @@ config file at runtime.
- [`Discovery.MDNS.Enabled`](#discoverymdnsenabled)
- [`Discovery.MDNS.Interval`](#discoverymdnsinterval)
- [`Experimental`](#experimental)
- [`Experimental.Libp2pStreamMounting`](#experimentallibp2pstreammounting)
- [`Gateway`](#gateway)
- [`Gateway.NoFetch`](#gatewaynofetch)
- [`Gateway.NoDNSLink`](#gatewaynodnslink)
@ -67,6 +68,7 @@ config file at runtime.
- [`Gateway.DisableHTMLErrors`](#gatewaydisablehtmlerrors)
- [`Gateway.ExposeRoutingAPI`](#gatewayexposeroutingapi)
- [`Gateway.RetrievalTimeout`](#gatewayretrievaltimeout)
- [`Gateway.MaxRequestDuration`](#gatewaymaxrequestduration)
- [`Gateway.MaxRangeRequestFileSize`](#gatewaymaxrangerequestfilesize)
- [`Gateway.MaxConcurrentRequests`](#gatewaymaxconcurrentrequests)
- [`Gateway.HTTPHeaders`](#gatewayhttpheaders)
@ -145,6 +147,8 @@ config file at runtime.
- [`Provider.Strategy`](#providerstrategy)
- [`Provider.WorkerCount`](#providerworkercount)
- [`Pubsub`](#pubsub)
- [When to use a dedicated pubsub node](#when-to-use-a-dedicated-pubsub-node)
- [Message deduplication](#message-deduplication)
- [`Pubsub.Enabled`](#pubsubenabled)
- [`Pubsub.Router`](#pubsubrouter)
- [`Pubsub.DisableSigning`](#pubsubdisablesigning)
@ -157,14 +161,14 @@ config file at runtime.
- [`Reprovider.Strategy`](#providestrategy)
- [`Routing`](#routing)
- [`Routing.Type`](#routingtype)
- [`Routing.DelegatedRouters`](#routingdelegatedrouters)
- [`Routing.AcceleratedDHTClient`](#routingaccelerateddhtclient)
- [`Routing.LoopbackAddressesOnLanDHT`](#routingloopbackaddressesonlandht)
- [`Routing.IgnoreProviders`](#routingignoreproviders)
- [`Routing.DelegatedRouters`](#routingdelegatedrouters)
- [`Routing.Routers`](#routingrouters)
- [`Routing.Routers: Type`](#routingrouters-type)
- [`Routing.Routers: Parameters`](#routingrouters-parameters)
- [`Routing: Methods`](#routing-methods)
- [`Routing.Routers.[name].Type`](#routingroutersnametype)
- [`Routing.Routers.[name].Parameters`](#routingroutersnameparameters)
- [`Routing.Methods`](#routingmethods)
- [`Swarm`](#swarm)
- [`Swarm.AddrFilters`](#swarmaddrfilters)
- [`Swarm.DisableBandwidthMetrics`](#swarmdisablebandwidthmetrics)
@ -239,6 +243,8 @@ config file at runtime.
- [`Import.UnixFSDirectoryMaxLinks`](#importunixfsdirectorymaxlinks)
- [`Import.UnixFSHAMTDirectoryMaxFanout`](#importunixfshamtdirectorymaxfanout)
- [`Import.UnixFSHAMTDirectorySizeThreshold`](#importunixfshamtdirectorysizethreshold)
- [`Import.UnixFSHAMTDirectorySizeEstimation`](#importunixfshamtdirectorysizeestimation)
- [`Import.UnixFSDAGLayout`](#importunixfsdaglayout)
- [`Version`](#version)
- [`Version.AgentSuffix`](#versionagentsuffix)
- [`Version.SwarmCheckEnabled`](#versionswarmcheckenabled)
@ -260,9 +266,9 @@ config file at runtime.
- [`lowpower` profile](#lowpower-profile)
- [`announce-off` profile](#announce-off-profile)
- [`announce-on` profile](#announce-on-profile)
- [`unixfs-v0-2015` profile](#unixfs-v0-2015-profile)
- [`legacy-cid-v0` profile](#legacy-cid-v0-profile)
- [`test-cid-v1` profile](#test-cid-v1-profile)
- [`test-cid-v1-wide` profile](#test-cid-v1-wide-profile)
- [`unixfs-v1-2025` profile](#unixfs-v1-2025-profile)
- [Security](#security)
- [Port and Network Exposure](#port-and-network-exposure)
- [Security Best Practices](#security-best-practices)
@ -714,7 +720,7 @@ Type: `flag`
## `AutoTLS`
The [AutoTLS](https://blog.libp2p.io/autotls/) feature enables publicly reachable Kubo nodes (those dialable from the public
The [AutoTLS](https://web.archive.org/web/20260112031855/https://blog.libp2p.io/autotls/) feature enables publicly reachable Kubo nodes (those dialable from the public
internet) to automatically obtain a wildcard TLS certificate for a DNS name
unique to their PeerID at `*.[PeerID].libp2p.direct`. This enables direct
libp2p connections and retrieval of IPFS content from browsers [Secure Context](https://developer.mozilla.org/en-US/docs/Web/Security/Secure_Contexts)
@ -722,7 +728,7 @@ using transports such as [Secure WebSockets](https://github.com/libp2p/specs/blo
without requiring user to do any manual domain registration and certificate configuration.
Under the hood, [p2p-forge] client uses public utility service at `libp2p.direct` as an [ACME DNS-01 Challenge](https://letsencrypt.org/docs/challenge-types/#dns-01-challenge)
broker enabling peer to obtain a wildcard TLS certificate tied to public key of their [PeerID](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-id).
broker enabling peer to obtain a wildcard TLS certificate tied to public key of their [PeerID](https://web.archive.org/web/20251112181025/https://docs.libp2p.io/concepts/fundamentals/peers/#peer-id).
By default, the certificates are requested from Let's Encrypt. Origin and rationale for this project can be found in [community.letsencrypt.org discussion](https://community.letsencrypt.org/t/feedback-on-raising-certificates-per-registered-domain-to-enable-peer-to-peer-networking/223003).
@ -776,6 +782,22 @@ Default: `true`
Type: `flag`
### `AutoTLS.SkipDNSLookup`
Optional. Controls whether to skip network DNS lookups for [p2p-forge] domains like `*.libp2p.direct`.
This applies to DNS resolution performed via [`DNS.Resolvers`](#dnsresolvers), including `/dns*` multiaddrs resolved by go-libp2p (e.g., peer addresses from DHT or delegated routing).
When enabled (default), A/AAAA queries for hostnames matching [`AutoTLS.DomainSuffix`](#autotlsdomainsuffix) are resolved locally by parsing the IP address directly from the hostname (e.g., `1-2-3-4.peerID.libp2p.direct` resolves to `1.2.3.4` without network I/O). This avoids unnecessary DNS queries since the IP is already encoded in the hostname.
If the hostname format is invalid (wrong peerID, malformed IP encoding), the resolver falls back to network DNS, ensuring forward compatibility with potential future DNS record types.
Set to `false` to always use network DNS for these domains. This is primarily useful for debugging or if you need to override resolution behavior via [`DNS.Resolvers`](#dnsresolvers).
Default: `true`
Type: `flag`
### `AutoTLS.DomainSuffix`
Optional override of the parent domain suffix that will be used in DNS+TLS+WebSockets multiaddrs generated by [p2p-forge] client.
@ -1070,11 +1092,26 @@ in the [new mDNS implementation](https://github.com/libp2p/zeroconf#readme).
Toggle and configure experimental features of Kubo. Experimental features are listed [here](./experimental-features.md).
### `Experimental.Libp2pStreamMounting`
Enables the `ipfs p2p` commands for tunneling TCP connections through libp2p
streams, similar to SSH port forwarding.
See [docs/p2p-tunnels.md](p2p-tunnels.md) for usage examples.
Default: `false`
Type: `bool`
## `Gateway`
Options for the HTTP gateway.
**NOTE:** support for `/api/v0` under the gateway path is now deprecated. It will be removed in future versions: <https://github.com/ipfs/kubo/issues/10312>.
> [!IMPORTANT]
> By default, Kubo's gateway is configured for local use at `127.0.0.1` and `localhost`.
> To run a public gateway, configure your domain names in [`Gateway.PublicGateways`](#gatewaypublicgateways).
> For production deployment considerations (reverse proxy, timeouts, rate limiting, CDN),
> see [Running in Production](gateway.md#running-in-production).
### `Gateway.NoFetch`
@ -1113,7 +1150,7 @@ dag-pb or dag-cbor to dag-json).
When disabled (the default), the gateway returns `406 Not Acceptable` for
codec mismatches, following behavior specified in
[IPIP-0524](https://github.com/ipfs/specs/pull/524).
[IPIP-524](https://github.com/ipfs/specs/pull/524).
**Recommended approach**: Instead of relying on gateway-side conversion,
fetch the raw block using `?format=raw` (`application/vnd.ipld.raw`) and
@ -1185,6 +1222,16 @@ Default: `30s`
Type: `optionalDuration`
### `Gateway.MaxRequestDuration`
An absolute deadline for the entire gateway request. Unlike [`RetrievalTimeout`](#gatewayretrievaltimeout) (which resets on each data write and catches stalled transfers), this is a hard limit on the total time a request can take.
Returns 504 Gateway Timeout when exceeded. This protects the gateway from edge cases and slow client attacks.
Default: `1h`
Type: `optionalDuration`
### `Gateway.MaxRangeRequestFileSize`
Maximum file size for HTTP range requests on deserialized responses. Range requests for files larger than this limit return 501 Not Implemented.
@ -1291,6 +1338,11 @@ Examples:
- `*.example.com` will match requests to `http://foo.example.com/ipfs/*` or `http://{cid}.ipfs.bar.example.com/*`.
- `foo-*.example.com` will match requests to `http://foo-bar.example.com/ipfs/*` or `http://{cid}.ipfs.foo-xyz.example.com/*`.
> [!IMPORTANT]
> **Reverse Proxy:** If running behind nginx or another reverse proxy, ensure
> `Host` and `X-Forwarded-*` headers are forwarded correctly.
> See [Reverse Proxy Caveats](gateway.md#reverse-proxy) in gateway documentation.
#### `Gateway.PublicGateways: Paths`
An array of paths that should be exposed on the hostname.
@ -1357,6 +1409,9 @@ Default: `false`
Type: `bool`
> [!IMPORTANT]
> See [Reverse Proxy Caveats](gateway.md#reverse-proxy) if running behind nginx or another reverse proxy.
#### `Gateway.PublicGateways: NoDNSLink`
A boolean to configure whether DNSLink for hostname present in `Host`
@ -1367,6 +1422,9 @@ Default: `false` (DNSLink lookup enabled by default for every defined hostname)
Type: `bool`
> [!IMPORTANT]
> See [Reverse Proxy Caveats](gateway.md#reverse-proxy) if running behind nginx or another reverse proxy.
#### `Gateway.PublicGateways: InlineDNSLink`
An optional flag to explicitly configure whether subdomain gateway's redirects
@ -1434,6 +1492,9 @@ ipfs config --json Gateway.PublicGateways '{"localhost": null }'
Below is a list of the most common gateway setups.
> [!IMPORTANT]
> See [Reverse Proxy Caveats](gateway.md#reverse-proxy) if running behind nginx or another reverse proxy.
- Public [subdomain gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#subdomain-gateway) at `http://{cid}.ipfs.dweb.link` (each content root gets its own Origin)
```console
@ -1769,7 +1830,7 @@ Type: `optionalDuration`
### `Ipns.UsePubsub`
Enables IPFS over pubsub experiment for publishing IPNS records in real time.
Enables [IPNS over PubSub](https://specs.ipfs.tech/ipns/ipns-pubsub-router/) for publishing and resolving IPNS records in real time.
**EXPERIMENTAL:** read about current limitations at [experimental-features.md#ipns-pubsub](./experimental-features.md#ipns-pubsub).
@ -2218,6 +2279,9 @@ You can compare the effectiveness of sweep mode vs legacy mode by monitoring the
> [!NOTE]
> This is the default provider system as of Kubo v0.39. To use the legacy provider instead, set `Provide.DHT.SweepEnabled=false`.
> [!NOTE]
> When DHT routing is unavailable (e.g., `Routing.Type=custom` with only HTTP routers), the provider automatically falls back to the legacy provider regardless of this setting.
Default: `true`
Type: `flag`
@ -2384,16 +2448,56 @@ Replaced with [`Provide.DHT.MaxWorkers`](#providedhtmaxworkers).
## `Pubsub`
**DEPRECATED**: See [#9717](https://github.com/ipfs/kubo/issues/9717)
Pubsub configures Kubo's opt-in, opinionated [libp2p pubsub](https://web.archive.org/web/20260116065034/https://docs.libp2p.io/concepts/pubsub/overview/) instance.
To enable, set `Pubsub.Enabled` to `true`.
Pubsub configures the `ipfs pubsub` subsystem. To use, it must be enabled by
passing the `--enable-pubsub-experiment` flag to the daemon
or via the `Pubsub.Enabled` flag below.
**EXPERIMENTAL:** This is an opt-in feature. Its primary use case is
[IPNS over PubSub](https://specs.ipfs.tech/ipns/ipns-pubsub-router/), which
enables real-time IPNS record propagation. See [`Ipns.UsePubsub`](#ipnsusepubsub)
for details.
The `ipfs pubsub` commands can also be used for basic publish/subscribe
operations, but only if Kubo's built-in message validation (described below) is
acceptable for your use case.
### When to use a dedicated pubsub node
Kubo's pubsub is optimized for IPNS. It uses opinionated message validation
that may not fit all applications. If you need custom Message ID computation,
different deduplication logic, or validation rules beyond what Kubo provides,
consider building a dedicated pubsub node using
[go-libp2p-pubsub](https://github.com/libp2p/go-libp2p-pubsub) directly.
### Message deduplication
Kubo uses two layers of message deduplication to handle duplicate messages that
may arrive via different network paths:
**Layer 1: In-memory TimeCache (Message ID)**
When a message arrives, Kubo computes its Message ID (hash of the message
content) and checks an in-memory cache. If the ID was seen recently, the
message is dropped. This cache is controlled by:
- [`Pubsub.SeenMessagesTTL`](#pubsubseenmessagesttl) - how long Message IDs are remembered (default: 120s)
- [`Pubsub.SeenMessagesStrategy`](#pubsubseenmessagesstrategy) - whether TTL resets on each sighting
This cache is fast but limited: it only works within the TTL window and is
cleared on node restart.
**Layer 2: Persistent Seqno Validator (per-peer)**
For stronger deduplication, Kubo tracks the maximum sequence number seen from
each peer and persists it to the datastore. Messages with sequence numbers
lower than the recorded maximum are rejected. This prevents replay attacks and
handles message cycles in large networks where messages may take longer than
the TimeCache TTL to propagate.
This layer survives node restarts. The state can be inspected or cleared using
`ipfs pubsub reset` (for testing/recovery only).
### `Pubsub.Enabled`
**DEPRECATED**: See [#9717](https://github.com/ipfs/kubo/issues/9717)
Enables the pubsub system.
Default: `false`
@ -2402,8 +2506,6 @@ Type: `flag`
### `Pubsub.Router`
**DEPRECATED**: See [#9717](https://github.com/ipfs/kubo/issues/9717)
Sets the default router used by pubsub to route messages to peers. This can be one of:
- `"floodsub"` - floodsub is a basic router that simply _floods_ messages to all
@ -2419,10 +2521,9 @@ Type: `string` (one of `"floodsub"`, `"gossipsub"`, or `""` (apply default))
### `Pubsub.DisableSigning`
**DEPRECATED**: See [#9717](https://github.com/ipfs/kubo/issues/9717)
Disables message signing and signature verification.
Disables message signing and signature verification. Enable this option if
you're operating in a completely trusted network.
**FOR TESTING ONLY - DO NOT USE IN PRODUCTION**
It is _not_ safe to disable signing even if you don't care _who_ sent the
message because spoofed messages can be used to silence real messages by
@ -2434,20 +2535,12 @@ Type: `bool`
### `Pubsub.SeenMessagesTTL`
**DEPRECATED**: See [#9717](https://github.com/ipfs/kubo/issues/9717)
Controls the time window for the in-memory Message ID cache (Layer 1
deduplication). Messages with the same ID seen within this window are dropped.
Controls the time window within which duplicate messages, identified by Message
ID, will be identified and won't be emitted again.
A smaller value for this parameter means that Pubsub messages in the cache will
be garbage collected sooner, which can result in a smaller cache. At the same
time, if there are slower nodes in the network that forward older messages,
this can cause more duplicates to be propagated through the network.
Conversely, a larger value for this parameter means that Pubsub messages in the
cache will be garbage collected later, which can result in a larger cache for
the same traffic pattern. However, it is less likely that duplicates will be
propagated through the network.
A smaller value reduces memory usage but may cause more duplicates in networks
with slow nodes. A larger value uses more memory but provides better duplicate
detection within the time window.
Default: see `TimeCacheDuration` from [go-libp2p-pubsub](https://github.com/libp2p/go-libp2p-pubsub)
@ -2455,24 +2548,12 @@ Type: `optionalDuration`
### `Pubsub.SeenMessagesStrategy`
**DEPRECATED**: See [#9717](https://github.com/ipfs/kubo/issues/9717)
Determines how the TTL countdown for the Message ID cache works.
Determines how the time-to-live (TTL) countdown for deduplicating Pubsub
messages is calculated.
The Pubsub seen messages cache is a LRU cache that keeps messages for up to a
specified time duration. After this duration has elapsed, expired messages will
be purged from the cache.
The `last-seen` cache is a sliding-window cache. Every time a message is seen
again with the SeenMessagesTTL duration, its timestamp slides forward. This
keeps frequently occurring messages cached and prevents them from being
continually propagated, especially because of issues that might increase the
number of duplicate messages in the network.
The `first-seen` cache will store new messages and purge them after the
SeenMessagesTTL duration, even if they are seen multiple times within this
duration.
- `last-seen` - Sliding window: TTL resets each time the message is seen again.
Keeps frequently-seen messages in cache longer, preventing continued propagation.
- `first-seen` - Fixed window: TTL counts from first sighting only. Messages are
purged after the TTL regardless of how many times they're seen.
Default: `last-seen` (see [go-libp2p-pubsub](https://github.com/libp2p/go-libp2p-pubsub))
@ -2565,58 +2646,70 @@ Contains options for content, peer, and IPNS routing mechanisms.
### `Routing.Type`
There are multiple routing options: "auto", "autoclient", "none", "dht", "dhtclient", "delegated", and "custom".
Controls how your node discovers content and peers on the network.
- **DEFAULT:** If unset, or set to "auto", your node will use the public IPFS DHT (aka "Amino")
and parallel [`Routing.DelegatedRouters`](#routingdelegatedrouters) for additional speed.
**Production options:**
- If set to "autoclient", your node will behave as in "auto" but without running a DHT server.
- **`auto`** (default): Uses both the public IPFS DHT (Amino) and HTTP routers
from [`Routing.DelegatedRouters`](#routingdelegatedrouters) for faster lookups.
Your node starts as a DHT client and automatically switches to server mode
when reachable from the public internet.
- If set to "none", your node will use _no_ routing system. You'll have to
explicitly connect to peers that have the content you're looking for.
- **`autoclient`**: Same as `auto`, but never runs a DHT server.
Use this if your node is behind a firewall or NAT.
- If set to "dht" (or "dhtclient"/"dhtserver"), your node will ONLY use the Amino DHT (no HTTP routers).
- **`dht`**: Uses only the Amino DHT (no HTTP routers). Automatically switches
between client and server mode based on reachability.
- If set to "custom", all default routers are disabled, and only ones defined in `Routing.Routers` will be used.
- **`dhtclient`**: DHT-only, always running as a client. Lower resource usage.
When the DHT is enabled, it can operate in two modes: client and server.
- **`dhtserver`**: DHT-only, always running as a server.
Only use this if your node is reachable from the public internet.
- In server mode, your node will query other peers for DHT records, and will
respond to requests from other peers (both requests to store records and
requests to retrieve records).
- **`none`**: Disables all routing. You must manually connect to peers.
- In client mode, your node will query the DHT as a client but will not respond
to requests from other peers. This mode is less resource-intensive than server
mode.
**About DHT client vs server mode:**
When the DHT is enabled, your node can operate as either a client or server.
In server mode, it queries other peers and responds to their queries - this helps
the network but uses more resources. In client mode, it only queries others without
responding, which is less resource-intensive. With `auto` or `dht`, your node starts
as a client and switches to server when it detects public reachability.
When `Routing.Type` is set to `auto` or `dht`, your node will start as a DHT client, and
switch to a DHT server when and if it determines that it's reachable from the
public internet (e.g., it's not behind a firewall).
> [!CAUTION]
> **`Routing.Type` Experimental options:**
>
> These modes are for research and testing only, not production use.
> They may change without notice between releases.
>
> - **`delegated`**: Uses only HTTP routers from [`Routing.DelegatedRouters`](#routingdelegatedrouters)
> and IPNS publishers from [`Ipns.DelegatedPublishers`](#ipnsdelegatedpublishers),
> without initializing the DHT. Useful when peer-to-peer connectivity is unavailable.
> Note: cannot provide content to the network (no DHT means no provider records).
>
> - **`custom`**: Disables all default routers. You define your own routing in
> [`Routing.Routers`](#routingrouters). See [delegated-routing.md](delegated-routing.md).
To force a specific Amino DHT-only mode, client or server, set `Routing.Type` to
`dhtclient` or `dhtserver` respectively. Please do not set this to `dhtserver`
unless you're sure your node is reachable from the public network.
When `Routing.Type` is set to `auto` or `autoclient` your node will accelerate some types of routing
by leveraging [`Routing.DelegatedRouters`](#routingdelegatedrouters) HTTP endpoints compatible with [Delegated Routing V1 HTTP API](https://specs.ipfs.tech/routing/http-routing-v1/)
introduced in [IPIP-337](https://github.com/ipfs/specs/pull/337)
in addition to the Amino DHT.
When `Routing.Type` is set to `delegated`, your node will use **only** HTTP delegated routers and IPNS publishers,
without initializing the Amino DHT at all. This mode is useful for environments where peer-to-peer DHT connectivity
is not available or desired, while still enabling content routing and IPNS publishing via HTTP APIs.
This mode requires configuring [`Routing.DelegatedRouters`](#routingdelegatedrouters) for content routing and
[`Ipns.DelegatedPublishers`](#ipnsdelegatedpublishers) for IPNS publishing.
**Note:** `delegated` mode operates as read-only for content providing - your node cannot announce content to the network
since there is no DHT connectivity. Content providing is automatically disabled when using this routing type.
[Advanced routing rules](https://github.com/ipfs/kubo/blob/master/docs/delegated-routing.md) can be configured in `Routing.Routers` after setting `Routing.Type` to `custom`.
Default: `auto` (DHT + [`Routing.DelegatedRouters`](#routingdelegatedrouters))
Default: `auto`
Type: `optionalString` (`null`/missing means the default)
### `Routing.DelegatedRouters`
An array of URL hostnames for delegated routers to be queried in addition to the Amino DHT when `Routing.Type` is set to `auto` (default) or `autoclient`.
These endpoints must support the [Delegated Routing V1 HTTP API](https://specs.ipfs.tech/routing/http-routing-v1/).
The special value `"auto"` uses delegated routers from [AutoConf](#autoconf) when enabled.
You can combine `"auto"` with custom URLs (e.g., `["auto", "https://custom.example.com"]`) to query both the default delegated routers and your own endpoints. The first `"auto"` entry gets substituted with autoconf values, and other URLs are preserved.
> [!TIP]
> Delegated routing allows IPFS implementations to offload tasks like content routing, peer routing, and naming to a separate process or server while also benefiting from HTTP caching.
>
> One can run their own delegated router either by implementing the [Delegated Routing V1 HTTP API](https://specs.ipfs.tech/routing/http-routing-v1/) themselves, or by using [Someguy](https://github.com/ipfs/someguy), a turn-key implementation that proxies requests to other routing systems. A public utility instance of Someguy is hosted at [`https://delegated-ipfs.dev`](https://docs.ipfs.tech/concepts/public-utilities/#delegated-routing).
Default: `["auto"]`
Type: `array[string]` (URLs or `"auto"`)
### `Routing.AcceleratedDHTClient`
This alternative Amino DHT client with a Full-Routing-Table strategy will
@ -2646,27 +2739,21 @@ When it is enabled:
This is critical to maintain to not harm the network.
- The operations `ipfs stats dht` will default to showing information about the accelerated DHT client
**Caveats:**
1. Running the accelerated client likely will result in more resource consumption (connections, RAM, CPU, bandwidth)
- Users that are limited in the number of parallel connections their machines/networks can perform will likely suffer
- The resource usage is not smooth as the client crawls the network in rounds and reproviding is similarly done in rounds
- Users who previously had a lot of content but were unable to advertise it on the network will see an increase in
egress bandwidth as their nodes start to advertise all of their CIDs into the network. If you have lots of data
entering your node that you don't want to advertise, then consider using [Provide Strategies](#providestrategy)
to reduce the number of CIDs that you are reproviding. Similarly, if you are running a node that deals mostly with
short-lived temporary data (e.g. you use a separate node for ingesting data then for storing and serving it) then
you may benefit from using [Strategic Providing](experimental-features.md#strategic-providing) to prevent advertising
of data that you ultimately will not have.
2. Currently, the DHT is not usable for queries for the first 5-10 minutes of operation as the routing table is being
prepared. This means operations like searching the DHT for particular peers or content will not work initially.
- You can see if the DHT has been initially populated by running `ipfs stats dht`
3. Currently, the accelerated DHT client is not compatible with LAN-based DHTs and will not perform operations against
them
4. (⚠️ 0.39 limitation) When used with [`Provide.DHT.SweepEnabled`](#providedhtsweepenabled), the sweep provider may
fail to estimate DHT size during the accelerated client's network crawl, resulting in all CIDs grouped into a
single region. Content still gets reprovided, but without sweep efficiency gains. Consider disabling the
accelerated client when using sweep mode.
> [!CAUTION]
> **`Routing.AcceleratedDHTClient` Caveats:**
>
> 1. Running the accelerated client likely will result in more resource consumption (connections, RAM, CPU, bandwidth)
> - Users that are limited in the number of parallel connections their machines/networks can perform will be most affected
> - The resource usage is not smooth as the client crawls the network in rounds and reproviding is similarly done in rounds
> - Users who previously had a lot of content but were unable to advertise it on the network will see an increase in
> egress bandwidth as their nodes start to advertise all of their CIDs into the network. If you have lots of data
> entering your node that you don't want to advertise, consider using [`Provide.*`](#provide) configuration
> to control which CIDs are reprovided.
> 2. Currently, the DHT is not usable for queries for the first 5-10 minutes of operation as the routing table is being
> prepared. This means operations like searching the DHT for particular peers or content will not work initially.
> - You can see if the DHT has been initially populated by running `ipfs stats dht`
> 3. Currently, the accelerated DHT client is not compatible with LAN-based DHTs and will not perform operations against
> them.
Default: `false`
@ -2700,30 +2787,18 @@ Default: `[]`
Type: `array[string]`
### `Routing.DelegatedRouters`
An array of URL hostnames for delegated routers to be queried in addition to the Amino DHT when `Routing.Type` is set to `auto` (default) or `autoclient`.
These endpoints must support the [Delegated Routing V1 HTTP API](https://specs.ipfs.tech/routing/http-routing-v1/).
The special value `"auto"` uses delegated routers from [AutoConf](#autoconf) when enabled.
> [!TIP]
> Delegated routing allows IPFS implementations to offload tasks like content routing, peer routing, and naming to a separate process or server while also benefiting from HTTP caching.
>
> One can run their own delegated router either by implementing the [Delegated Routing V1 HTTP API](https://specs.ipfs.tech/routing/http-routing-v1/) themselves, or by using [Someguy](https://github.com/ipfs/someguy), a turn-key implementation that proxies requests to other routing systems. A public utility instance of Someguy is hosted at [`https://delegated-ipfs.dev`](https://docs.ipfs.tech/concepts/public-utilities/#delegated-routing).
Default: `["auto"]`
Type: `array[string]` (URLs or `"auto"`)
### `Routing.Routers`
Alternative configuration used when `Routing.Type=custom`.
> [!WARNING]
> **EXPERIMENTAL: `Routing.Routers` configuration may change in future release**
> [!CAUTION]
> **EXPERIMENTAL: `Routing.Routers` is for research and testing only, not production use.**
>
> Consider this advanced low-level config: Most users can simply use `Routing.Type=auto` or `autoclient` and set up basic config in user-friendly [`Routing.DelegatedRouters`](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingdelegatedrouters).
> - The configuration format and behavior may change without notice between releases.
> - Bugs and regressions may not be prioritized.
> - HTTP-only configurations cannot reliably provide content. See [delegated-routing.md](delegated-routing.md#limitations).
>
> Most users should use `Routing.Type=auto` or `autoclient` with [`Routing.DelegatedRouters`](#routingdelegatedrouters).
Allows for replacing the default routing (Amino DHT) with alternative Router
implementations.
@ -2734,9 +2809,9 @@ Default: `{}`
Type: `object[string->object]`
#### `Routing.Routers: Type`
#### `Routing.Routers.[name].Type`
**EXPERIMENTAL: `Routing.Routers` configuration may change in future release**
**⚠️ EXPERIMENTAL: For research and testing only. May change without notice.**
It specifies the routing type that will be created.
@ -2748,9 +2823,9 @@ Currently supported types:
Type: `string`
#### `Routing.Routers: Parameters`
#### `Routing.Routers.[name].Parameters`
**EXPERIMENTAL: `Routing.Routers` configuration may change in future release**
**⚠️ EXPERIMENTAL: For research and testing only. May change without notice.**
Parameters needed to create the specified router. Supported params per router type:
@ -2787,14 +2862,18 @@ Default: `{}` (use the safe implicit defaults)
Type: `object[string->string]`
### `Routing: Methods`
### `Routing.Methods`
`Methods:map` will define which routers will be executed per method used when `Routing.Type=custom`.
> [!WARNING]
> **EXPERIMENTAL: `Routing.Routers` configuration may change in future release**
> [!CAUTION]
> **EXPERIMENTAL: `Routing.Methods` is for research and testing only, not production use.**
>
> Consider this advanced low-level config: Most users can simply use `Routing.Type=auto` or `autoclient` and set up basic config in user-friendly [`Routing.DelegatedRouters`](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingdelegatedrouters).
> - The configuration format and behavior may change without notice between releases.
> - Bugs and regressions may not be prioritized.
> - HTTP-only configurations cannot reliably provide content. See [delegated-routing.md](delegated-routing.md#limitations).
>
> Most users should use `Routing.Type=auto` or `autoclient` with [`Routing.DelegatedRouters`](#routingdelegatedrouters).
The key will be the name of the method: `"provide"`, `"find-providers"`, `"find-peers"`, `"put-ipns"`, `"get-ipns"`. All methods must be added to the list.
@ -3303,7 +3382,7 @@ NATs.
See also:
- Docs: [Libp2p Circuit Relay](https://docs.libp2p.io/concepts/circuit-relay/)
- Docs: [Libp2p Circuit Relay](https://web.archive.org/web/20260128152445/https://docs.libp2p.io/concepts/nat/circuit-relay/)
- [`Swarm.RelayClient.Enabled`](#swarmrelayclientenabled) for getting a public
- `/p2p-circuit` address when behind a firewall.
- [`Swarm.EnableHolePunching`](#swarmenableholepunching) for direct connection upgrade through relay
@ -3351,7 +3430,7 @@ is a transport protocol that provides another way for browsers to
connect to the rest of the libp2p network. WebRTC Direct allows for browser
nodes to connect to other nodes without special configuration, such as TLS
certificates. This can be useful for browser nodes that do not yet support
[WebTransport](https://blog.libp2p.io/2022-12-19-libp2p-webtransport/),
[WebTransport](https://web.archive.org/web/20260107053250/https://blog.libp2p.io/2022-12-19-libp2p-webtransport/),
which is still relatively new and has [known issues](https://github.com/libp2p/js-libp2p/issues/2572).
Enabling this transport allows Kubo node to act on `/udp/4001/webrtc-direct`
@ -3451,7 +3530,7 @@ Please remove this option from your config.
## `DNS`
Options for configuring DNS resolution for [DNSLink](https://docs.ipfs.tech/concepts/dnslink/) and `/dns*` [Multiaddrs][libp2p-multiaddrs].
Options for configuring DNS resolution for [DNSLink](https://docs.ipfs.tech/concepts/dnslink/) and `/dns*` [Multiaddrs][libp2p-multiaddrs] (including peer addresses discovered via DHT or delegated routing).
### `DNS.Resolvers`
@ -3481,6 +3560,7 @@ Be mindful that:
- The default catch-all resolver is the cleartext one provided by your operating system. It can be overridden by adding a DoH entry for the DNS root indicated by `.` as illustrated above.
- Out-of-the-box support for selected non-ICANN TLDs relies on third-party centralized services provided by respective communities on best-effort basis.
- The special value `"auto"` uses DNS resolvers from [AutoConf](#autoconf) when enabled. For example: `{".": "auto"}` uses any custom DoH resolver (global or per TLD) provided by AutoConf system.
- When [`AutoTLS.SkipDNSLookup`](#autotlsskipdnslookup) is enabled (default), domains matching [`AutoTLS.DomainSuffix`](#autotlsdomainsuffix) (default: `libp2p.direct`) are resolved locally by parsing the IP directly from the hostname. Set `AutoTLS.SkipDNSLookup=false` to force network DNS lookups for these domains.
Default: `{".": "auto"}`
@ -3601,9 +3681,11 @@ Type: `flag`
## `Import`
Options to configure the default options used for ingesting data, in commands such as `ipfs add` or `ipfs block put`. All affected commands are detailed per option.
Options to configure the default parameters used for ingesting data, in commands such as `ipfs add` or `ipfs block put`. All affected commands are detailed per option.
Note that using flags will override the options defined here.
These options implement [IPIP-499: UnixFS CID Profiles](https://github.com/ipfs/specs/pull/499) for reproducible CID generation across IPFS implementations. Instead of configuring individual options, you can apply a predefined profile with `ipfs config profile apply <profile-name>`. See [Profiles](#profiles) for available options like `unixfs-v1-2025`.
Note that using CLI flags will override the options defined here.
### `Import.CidVersion`
@ -3783,6 +3865,42 @@ Default: `256KiB` (may change, inspect `DefaultUnixFSHAMTDirectorySizeThreshold`
Type: [`optionalBytes`](#optionalbytes)
### `Import.UnixFSHAMTDirectorySizeEstimation`
Controls how directory size is estimated when deciding whether to switch
from a basic UnixFS directory to HAMT sharding.
Accepted values:
- `links` (default): Legacy estimation using sum of link names and CID byte lengths.
- `block`: Full serialized dag-pb block size for accurate threshold decisions.
- `disabled`: Disable HAMT sharding entirely (directories always remain basic).
The `block` estimation is recommended for new profiles as it provides more
accurate threshold decisions and better cross-implementation consistency.
See [IPIP-499](https://github.com/ipfs/specs/pull/499) for more details.
Commands affected: `ipfs add`
Default: `links`
Type: `optionalString`
### `Import.UnixFSDAGLayout`
Controls the DAG layout used when chunking files.
Accepted values:
- `balanced` (default): Balanced DAG layout with uniform leaf depth.
- `trickle`: Trickle DAG layout optimized for streaming.
Commands affected: `ipfs add`
Default: `balanced`
Type: `optionalString`
## `Version`
Options to configure agent version announced to the swarm, and leveraging
@ -3826,7 +3944,7 @@ applied with the `--profile` flag to `ipfs init` or with the `ipfs config profil
apply` command. When a profile is applied a backup of the configuration file
will be created in `$IPFS_PATH`.
Configuration profiles can be applied additively. For example, both the `test-cid-v1` and `lowpower` profiles can be applied one after the other.
Configuration profiles can be applied additively. For example, both the `unixfs-v1-2025` and `lowpower` profiles can be applied one after the other.
The available configuration profiles are listed below. You can also find them
documented in `ipfs config profile --help`.
@ -3983,42 +4101,35 @@ Disables [Provide](#provide) system (and announcing to Amino DHT).
(Re-)enables [Provide](#provide) system (reverts [`announce-off` profile](#announce-off-profile)).
### `unixfs-v0-2015` profile
Legacy UnixFS import profile for backward-compatible CID generation.
Produces CIDv0 with no raw leaves, sha2-256, 256 KiB chunks, and
link-based HAMT size estimation.
See <https://github.com/ipfs/kubo/blob/master/config/profile.go> for exact [`Import.*`](#import) settings.
> [!NOTE]
> Use only when legacy CIDs are required. For new projects, use [`unixfs-v1-2025`](#unixfs-v1-2025-profile).
>
> See [IPIP-499](https://github.com/ipfs/specs/pull/499) for more details.
### `legacy-cid-v0` profile
Makes UnixFS import (`ipfs add`) produce legacy CIDv0 with no raw leaves, sha2-256 and 256 KiB chunks.
Alias for [`unixfs-v0-2015`](#unixfs-v0-2015-profile) profile.
### `unixfs-v1-2025` profile
Recommended UnixFS import profile for cross-implementation CID determinism.
Uses CIDv1, raw leaves, sha2-256, 1 MiB chunks, 1024 links per file node,
256 HAMT fanout, and block-based size estimation for HAMT threshold.
See <https://github.com/ipfs/kubo/blob/master/config/profile.go> for exact [`Import.*`](#import) settings.
> [!NOTE]
> This profile is provided for legacy users and should not be used for new projects.
### `test-cid-v1` profile
Makes UnixFS import (`ipfs add`) produce modern CIDv1 with raw leaves, sha2-256
and 1 MiB chunks (max 174 links per file, 256 per HAMT node, switch dir to HAMT
above 256KiB).
See <https://github.com/ipfs/kubo/blob/master/config/profile.go> for exact [`Import.*`](#import) settings.
> [!NOTE]
> [`Import.*`](#import) settings applied by this profile MAY change in future release. Provided for testing purposes.
> This profile ensures CID consistency across different IPFS implementations.
>
> Follow [kubo#4143](https://github.com/ipfs/kubo/issues/4143) for more details,
> and provide feedback in [discuss.ipfs.tech/t/should-we-profile-cids](https://discuss.ipfs.tech/t/should-we-profile-cids/18507) or [ipfs/specs#499](https://github.com/ipfs/specs/pull/499).
### `test-cid-v1-wide` profile
Makes UnixFS import (`ipfs add`) produce modern CIDv1 with raw leaves, sha2-256
and 1 MiB chunks and wider file DAGs (max 1024 links per every node type,
switch dir to HAMT above 1MiB).
See <https://github.com/ipfs/kubo/blob/master/config/profile.go> for exact [`Import.*`](#import) settings.
> [!NOTE]
> [`Import.*`](#import) settings applied by this profile MAY change in future release. Provided for testing purposes.
>
> Follow [kubo#4143](https://github.com/ipfs/kubo/issues/4143) for more details,
> and provide feedback in [discuss.ipfs.tech/t/should-we-profile-cids](https://discuss.ipfs.tech/t/should-we-profile-cids/18507) or [ipfs/specs#499](https://github.com/ipfs/specs/pull/499).
> See [IPIP-499](https://github.com/ipfs/specs/pull/499) for more details.
## Security

View File

@ -45,7 +45,7 @@ This gives a more Go-centric dependency updating flow to building a new binary w
## Bespoke Extension Points
Certain Kubo functionality may have their own extension points. For example:
* Kubo supports the [Routing v1](https://github.com/ipfs/specs/blob/main/routing/ROUTING_V1_HTTP.md) API for delegating content routing to external processes
* Kubo supports the [Routing v1](https://specs.ipfs.tech/routing/http-routing-v1/) API for delegating content routing to external processes
* Kubo supports the [Pinning Service API](https://github.com/ipfs/pinning-services-api-spec) for delegating pinning to external processes
* Kubo supports [DNSLink](https://dnslink.dev/) for delegating name->CID mappings to DNS

View File

@ -1,4 +1,15 @@
# New multi-router configuration system
# Delegated Routing Notes
- Status Date: 2025-12
> [!IMPORTANT]
> Most users are best served by setting delegated HTTP router URLs in [`Routing.DelegatedRouters`](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingdelegatedrouters) and `Routing.Type` to `auto` or `autoclient`, rather than using custom routing with `Routing.Routers` and `Routing.Methods` directly.
>
> The rest of this documentation describes experimental features intended only for researchers and advanced users.
----
# Custom Multi-Router Configuration (Experimental)
- Start Date: 2022-08-15
- Related Issues:
@ -6,27 +17,16 @@
- https://github.com/ipfs/kubo/issues/9079
- https://github.com/ipfs/kubo/pull/9877
## Summary
Previously we only used the Amino DHT for content routing and content
providing.
Kubo 0.14 introduced experimental support for [delegated routing](https://github.com/ipfs/kubo/pull/8997),
which then got changed and standardized as [Routing V1 HTTP API](https://specs.ipfs.tech/routing/http-routing-v1/).
Kubo 0.23.0 release added support for [self-hosting Routing V1 HTTP API server](https://github.com/ipfs/kubo/blob/master/docs/changelogs/v0.23.md#self-hosting-routingv1-endpoint-for-delegated-routing-needs).
> [!TIP]
> Kubo 0.35 added support for [`Routing.DelegatedRouters`](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingdelegatedrouters).
> [!CAUTION]
> **`Routing.Type=custom` with `Routing.Routers` and `Routing.Methods` is EXPERIMENTAL.**
>
> Most of users are best served by setting delegated HTTP router URLs there and `Routing.Type` to `auto` or `autoclient`, rather than custom routing with complex `Routing.Routers` and `Routing.Methods` directly.
> This feature is provided for **research and testing purposes only**. It is **not suitable for production use**.
>
> The rest of this documentation should be considered only by advanced users and researchers.
Now we need a better way to add different routers using different protocols
like [Routing V1](https://specs.ipfs.tech/routing/http-routing-v1/) or Amino
DHT, and be able to configure them (future routing systems to come) to cover different use cases.
> - The configuration format and behavior may change without notice between Kubo releases.
> - Bugs and regressions affecting custom routing may not be prioritized or fixed promptly.
> - HTTP-only routing configurations (without DHT) cannot reliably provide content to the network (👉️ see [Limitations](#limitations) below).
>
> **For production deployments**, use `Routing.Type=auto` (default) or `Routing.Type=autoclient` with [`Routing.DelegatedRouters`](https://github.com/ipfs/kubo/blob/master/docs/config.md#routingdelegatedrouters).
## Motivation
@ -362,6 +362,29 @@ I got ideas from all of the following links to create this design document:
- https://www.notion.so/pl-strflt/Delegated-Routing-Thoughts-very-very-WIP-0543bc51b1bd4d63a061b0f28e195d38
- https://gist.github.com/guseggert/effa027ff4cbadd7f67598efb6704d12
### Limitations
#### HTTP-only routing cannot reliably provide content
Configurations that use only HTTP routers (without any DHT router) are unable to reliably announce content (provider records) to the network.
This limitation exists because:
1. **No standardized HTTP API for providing**: The [Routing V1 HTTP API](https://specs.ipfs.tech/routing/http-routing-v1/) spec only defines read operations (`GET /routing/v1/providers/{cid}`). The write operation (`PUT /routing/v1/providers`) was never standardized.
2. **Legacy experimental API**: The only available HTTP providing mechanism is an undocumented `PUT /routing/v1/providers` request format called `ProvideBitswap`, which is a historical experiment. See [IPIP-526](https://github.com/ipfs/specs/pull/526) for ongoing discussion about formalizing HTTP-based provider announcements.
3. **Provider system integration**: Kubo's default provider system (`Provide.DHT.SweepEnabled=true` since v0.38) is designed for DHT-based providing. When no DHT is configured, the provider system may silently skip HTTP routers or behave unexpectedly.
**Workarounds for testing:**
If you need to test HTTP providing, you can try:
- Setting `Provide.DHT.SweepEnabled=false` to use the legacy provider system
- Including at least one DHT router in your custom configuration alongside HTTP routers
These workarounds are not guaranteed to work across Kubo versions and should not be relied upon for production use.
### Copyright
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).

316
docs/developer-guide.md Normal file
View File

@ -0,0 +1,316 @@
# Developer Guide
By the end of this guide, you will be able to:
- Build Kubo from source
- Run the test suites
- Make and verify code changes
This guide covers the local development workflow. For user documentation, see [docs.ipfs.tech](https://docs.ipfs.tech/).
## Table of Contents
- [Prerequisites](#prerequisites)
- [Quick Start](#quick-start)
- [Building](#building)
- [Running Tests](#running-tests)
- [Running the Linter](#running-the-linter)
- [Common Development Tasks](#common-development-tasks)
- [Code Organization](#code-organization)
- [Architecture](#architecture)
- [Troubleshooting](#troubleshooting)
- [Development Dependencies](#development-dependencies)
- [Further Reading](#further-reading)
## Prerequisites
Before you begin, ensure you have:
- **Go** - see `go.mod` for the minimum required version
- **Git**
- **GNU Make**
- **GCC** (optional) - required for CGO (Go's C interop); without it, build with `CGO_ENABLED=0`
## Quick Start
```bash
git clone https://github.com/ipfs/kubo.git
cd kubo
make build
./cmd/ipfs/ipfs version
```
You should see output like:
```
ipfs version 0.34.0-dev
```
The binary is built to `cmd/ipfs/ipfs`. To install it system-wide:
```bash
make install
```
This installs the binary to `$GOPATH/bin`.
## Building
| Command | Description |
|---------|-------------|
| `make build` | build the `ipfs` binary to `cmd/ipfs/ipfs` |
| `make install` | install to `$GOPATH/bin` |
| `make nofuse` | build without FUSE support |
| `make build CGO_ENABLED=0` | build without CGO (no C compiler needed) |
For Windows-specific instructions, see [windows.md](windows.md).
## Running Tests
Kubo has two types of tests:
- **Unit tests** - test individual packages in isolation. Fast and don't require a running daemon.
- **End-to-end tests** - spawn real `ipfs` nodes, run actual CLI commands, and test the full system. Slower but catch integration issues.
Note that `go test ./...` runs both unit and end-to-end tests. Use `make test` to run all tests. CI runs unit and end-to-end tests in separate jobs for faster feedback.
<!-- TODO: uncomment when https://github.com/ipfs/kubo/pull/11113 is merged
| Command | What it runs |
|---------|--------------|
| `make test_unit` | unit tests only (excludes `test/cli`) |
| `make test_cli` | CLI end-to-end tests only (requires `make build` first) |
| `make test_sharness` | sharness end-to-end tests only |
| `make test` | all tests (unit + CLI + sharness) |
-->
For end-to-end tests, Kubo has two suites:
- **`test/cli`** - modern Go-based test harness that spawns real `ipfs` nodes and runs actual CLI commands. All new tests should be added here.
- **`test/sharness`** - legacy bash-based tests. We are slowly migrating these to `test/cli`.
When modifying tests: cosmetic changes to `test/sharness` are fine, but if significant rewrites are needed, remove the outdated sharness test and add a modern one to `test/cli` instead.
### Before Running Tests
**Environment requirements**: some legacy tests expect default ports (8080, 5001, 4001) to be free and no mDNS (local network discovery) Kubo service on the LAN. Tests may fail if you have a local Kubo instance running. Before running the full test suite, stop any running `ipfs daemon`.
Two critical setup steps:
1. **Rebuild after code changes**: if you modify any `.go` files outside of `test/`, you must run `make build` before running integration tests.
2. **Set environment variables**: integration tests use the `ipfs` binary from `PATH` and need an isolated `IPFS_PATH`. Run these commands from the repository root:
```bash
export PATH="$PWD/cmd/ipfs:$PATH"
export IPFS_PATH="$(mktemp -d)"
```
### Unit Tests
```bash
go test ./...
```
### CLI Integration Tests (`test/cli`)
These are Go-based integration tests that invoke the `ipfs` CLI.
Instead of running the entire test suite, you can run a specific test to get faster feedback during development.
Run a specific test (recommended during development):
```bash
go test ./test/cli/... -run TestAdd -v
```
Run all CLI tests:
```bash
go test ./test/cli/...
```
Run a specific test:
```bash
go test ./test/cli/... -run TestAdd
```
Run with verbose output:
```bash
go test ./test/cli/... -v
```
**Common error**: "version (16) is lower than repos (17)" means your `PATH` points to an old binary. Check `which ipfs` and rebuild with `make build`.
### Sharness Tests (`test/sharness`)
Shell-based integration tests using [sharness](https://github.com/chriscool/sharness) (a portable shell testing framework).
```bash
cd test/sharness
```
Run a specific test:
```bash
timeout 60s ./t0080-repo.sh
```
Run with verbose output (this disables automatic cleanup):
```bash
./t0080-repo.sh -v
```
**Cleanup**: the `-v` flag disables automatic cleanup. Before re-running tests, kill any dangling `ipfs daemon` processes:
```bash
pkill -f "ipfs daemon"
```
### Full Test Suite
```bash
make test # run all tests
make test_short # run shorter test suite
```
## Running the Linter
Run the linter using the Makefile target (not `golangci-lint` directly):
```bash
make -O test_go_lint
```
## Common Development Tasks
### Modifying CLI Commands
After editing help text in `core/commands/`, verify the output width:
```bash
go test ./test/cli/... -run TestCommandDocsWidth
```
### Updating Dependencies
Use the Makefile target (not `go mod tidy` directly):
```bash
make mod_tidy
```
### Editing the Changelog
When modifying `docs/changelogs/`:
- update the Table of Contents when adding sections
- add user-facing changes to the Highlights section (the Changelog section is auto-generated)
### Running the Daemon
Always run the daemon with a timeout or shut it down promptly.
With timeout:
```bash
timeout 60s ipfs daemon
```
Or shut down via API:
```bash
ipfs shutdown
```
For multi-step experiments, store `IPFS_PATH` in a file to ensure consistency.
## Code Organization
| Directory | Description |
|-----------|-------------|
| `cmd/ipfs/` | CLI entry point and binary |
| `core/` | core IPFS node implementation |
| `core/commands/` | CLI command definitions |
| `core/coreapi/` | Go API implementation |
| `client/rpc/` | HTTP RPC client |
| `plugin/` | plugin system |
| `repo/` | repository management |
| `test/cli/` | Go-based CLI integration tests |
| `test/sharness/` | legacy shell-based integration tests |
| `docs/` | documentation |
Key external dependencies:
- [go-libp2p](https://github.com/libp2p/go-libp2p) - networking stack
- [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht) - distributed hash table
- [boxo](https://github.com/ipfs/boxo) - IPFS SDK (including Bitswap, the data exchange engine)
For a deep dive into how code flows through Kubo, see [The `Add` command demystified](add-code-flow.md).
## Architecture
**Map of Implemented Subsystems** ([editable source](https://docs.google.com/drawings/d/1OVpBT2q-NtSJqlPX3buvjYhOnWfdzb85YEsM_njesME/edit)):
<img src="https://docs.google.com/drawings/d/e/2PACX-1vS_n1FvSu6mdmSirkBrIIEib2gqhgtatD9awaP2_WdrGN4zTNeg620XQd9P95WT-IvognSxIIdCM5uE/pub?w=1446&amp;h=1036">
**CLI, HTTP-API, Core Diagram**:
![](./cli-http-api-core-diagram.png)
## Troubleshooting
### "version (N) is lower than repos (M)" Error
This means the `ipfs` binary in your `PATH` is older than expected.
Check which binary is being used:
```bash
which ipfs
```
Rebuild and verify PATH:
```bash
make build
export PATH="$PWD/cmd/ipfs:$PATH"
./cmd/ipfs/ipfs version
```
### FUSE Issues
If you don't need FUSE support, build without it:
```bash
make nofuse
```
Or set the `TEST_FUSE=0` environment variable when running tests.
### Build Fails with "No such file: stdlib.h"
You're missing a C compiler. Either install GCC or build without CGO:
```bash
make build CGO_ENABLED=0
```
## Development Dependencies
If you make changes to the protocol buffers, you will need to install the [protoc compiler](https://github.com/google/protobuf).
## Further Reading
- [The `Add` command demystified](add-code-flow.md) - deep dive into code flow
- [Configuration reference](config.md)
- [Performance debugging](debug-guide.md)
- [Experimental features](experimental-features.md)
- [Release process](releases.md)
- [Contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md)
## Source Code
The complete source code is at [github.com/ipfs/kubo](https://github.com/ipfs/kubo).

View File

@ -7,9 +7,9 @@ go 1.25
replace github.com/ipfs/kubo => ./../../..
require (
github.com/ipfs/boxo v0.35.3-0.20251203042105-52a05615b40f
github.com/ipfs/boxo v0.36.1-0.20260205181019-5060c8dbda85
github.com/ipfs/kubo v0.0.0-00010101000000-000000000000
github.com/libp2p/go-libp2p v0.45.0
github.com/libp2p/go-libp2p v0.47.0
github.com/multiformats/go-multiaddr v0.16.1
)
@ -18,7 +18,7 @@ require (
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect
github.com/DataDog/zstd v1.5.7 // indirect
github.com/Jorropo/jsync v1.0.1 // indirect
github.com/RaduBerinde/axisds v0.0.0-20250419182453-5135a0650657 // indirect
github.com/RaduBerinde/axisds v0.1.0 // indirect
github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 // indirect
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 // indirect
@ -34,9 +34,9 @@ require (
github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b // indirect
github.com/cockroachdb/errors v1.11.3 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble/v2 v2.1.2 // indirect
github.com/cockroachdb/pebble/v2 v2.1.4 // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 // indirect
github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf // indirect
github.com/cskr/pubsub v1.0.2 // indirect
@ -44,14 +44,14 @@ require (
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
github.com/dgraph-io/badger v1.6.2 // indirect
github.com/dgraph-io/ristretto v0.1.1 // indirect
github.com/dunglas/httpsfv v1.1.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/filecoin-project/go-clock v0.1.0 // indirect
github.com/flynn/noise v1.1.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.10 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.12 // indirect
github.com/gammazero/chanqueue v1.1.1 // indirect
github.com/gammazero/deque v1.2.0 // indirect
github.com/getsentry/sentry-go v0.27.0 // indirect
@ -79,29 +79,29 @@ require (
github.com/ipfs/go-cidutil v0.1.0 // indirect
github.com/ipfs/go-datastore v0.9.0 // indirect
github.com/ipfs/go-ds-badger v0.3.4 // indirect
github.com/ipfs/go-ds-flatfs v0.5.5 // indirect
github.com/ipfs/go-ds-flatfs v0.6.0 // indirect
github.com/ipfs/go-ds-leveldb v0.5.2 // indirect
github.com/ipfs/go-ds-measure v0.2.2 // indirect
github.com/ipfs/go-ds-pebble v0.5.7 // indirect
github.com/ipfs/go-dsqueue v0.1.1 // indirect
github.com/ipfs/go-ds-pebble v0.5.9 // indirect
github.com/ipfs/go-dsqueue v0.1.2 // indirect
github.com/ipfs/go-fs-lock v0.1.1 // indirect
github.com/ipfs/go-ipfs-cmds v0.15.0 // indirect
github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260204204540-af9bcbaf5709 // indirect
github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect
github.com/ipfs/go-ipfs-pq v0.0.3 // indirect
github.com/ipfs/go-ipfs-pq v0.0.4 // indirect
github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect
github.com/ipfs/go-ipld-cbor v0.2.1 // indirect
github.com/ipfs/go-ipld-format v0.6.3 // indirect
github.com/ipfs/go-ipld-git v0.1.1 // indirect
github.com/ipfs/go-ipld-legacy v0.2.2 // indirect
github.com/ipfs/go-log/v2 v2.9.0 // indirect
github.com/ipfs/go-log/v2 v2.9.1 // indirect
github.com/ipfs/go-metrics-interface v0.3.0 // indirect
github.com/ipfs/go-peertaskqueue v0.8.2 // indirect
github.com/ipfs/go-peertaskqueue v0.8.3 // indirect
github.com/ipfs/go-test v0.2.3 // indirect
github.com/ipfs/go-unixfsnode v1.10.2 // indirect
github.com/ipld/go-car/v2 v2.16.0 // indirect
github.com/ipld/go-codec-dagpb v1.7.0 // indirect
github.com/ipld/go-ipld-prime v0.21.0 // indirect
github.com/ipshipyard/p2p-forge v0.6.1 // indirect
github.com/ipshipyard/p2p-forge v0.7.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/klauspost/compress v1.18.0 // indirect
@ -115,22 +115,22 @@ require (
github.com/libp2p/go-doh-resolver v0.5.0 // indirect
github.com/libp2p/go-flow-metrics v0.3.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
github.com/libp2p/go-libp2p-kad-dht v0.36.0 // indirect
github.com/libp2p/go-libp2p-kad-dht v0.37.1 // indirect
github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect
github.com/libp2p/go-libp2p-pubsub v0.14.2 // indirect
github.com/libp2p/go-libp2p-pubsub v0.15.0 // indirect
github.com/libp2p/go-libp2p-pubsub-router v0.6.0 // indirect
github.com/libp2p/go-libp2p-record v0.3.1 // indirect
github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect
github.com/libp2p/go-libp2p-xor v0.1.0 // indirect
github.com/libp2p/go-msgio v0.3.0 // indirect
github.com/libp2p/go-netroute v0.3.0 // indirect
github.com/libp2p/go-netroute v0.4.0 // indirect
github.com/libp2p/go-reuseport v0.4.0 // indirect
github.com/libp2p/go-yamux/v5 v5.0.1 // indirect
github.com/libp2p/zeroconf/v2 v2.2.0 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mholt/acmez/v3 v3.1.2 // indirect
github.com/miekg/dns v1.1.68 // indirect
github.com/miekg/dns v1.1.72 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 // indirect
@ -138,7 +138,7 @@ require (
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
github.com/multiformats/go-multiaddr-dns v0.5.0 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multicodec v0.10.0 // indirect
@ -176,9 +176,9 @@ require (
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.66.1 // indirect
github.com/prometheus/procfs v0.17.0 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.55.0 // indirect
github.com/quic-go/webtransport-go v0.9.0 // indirect
github.com/quic-go/qpack v0.6.0 // indirect
github.com/quic-go/quic-go v0.59.0 // indirect
github.com/quic-go/webtransport-go v0.10.0 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
@ -194,40 +194,40 @@ require (
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect
go.opentelemetry.io/otel v1.38.0 // indirect
go.opentelemetry.io/otel v1.39.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/zipkin v1.38.0 // indirect
go.opentelemetry.io/otel/metric v1.38.0 // indirect
go.opentelemetry.io/otel/metric v1.39.0 // indirect
go.opentelemetry.io/otel/sdk v1.38.0 // indirect
go.opentelemetry.io/otel/trace v1.38.0 // indirect
go.opentelemetry.io/otel/trace v1.39.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
go.uber.org/dig v1.19.0 // indirect
go.uber.org/fx v1.24.0 // indirect
go.uber.org/mock v0.5.2 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
go.uber.org/zap v1.27.1 // indirect
go.uber.org/zap/exp v0.3.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
golang.org/x/crypto v0.45.0 // indirect
golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 // indirect
golang.org/x/mod v0.30.0 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect
golang.org/x/text v0.31.0 // indirect
golang.org/x/crypto v0.47.0 // indirect
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect
golang.org/x/mod v0.32.0 // indirect
golang.org/x/net v0.49.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.40.0 // indirect
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 // indirect
golang.org/x/text v0.33.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/tools v0.39.0 // indirect
golang.org/x/tools v0.41.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
gonum.org/v1/gonum v0.16.0 // indirect
gonum.org/v1/gonum v0.17.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect
google.golang.org/grpc v1.75.0 // indirect
google.golang.org/protobuf v1.36.10 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.4.1 // indirect
)

View File

@ -1,9 +1,7 @@
bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc h1:utDghgcjE8u+EBjHOgYT+dJPcnDF05KqWMBcjuJy510=
bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
@ -18,12 +16,7 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M=
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
@ -34,8 +27,8 @@ github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwS
github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU=
github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/RaduBerinde/axisds v0.0.0-20250419182453-5135a0650657 h1:8XBWWQD+vFF+JqOsm16t0Kab1a7YWV8+GISVEP8AuZ8=
github.com/RaduBerinde/axisds v0.0.0-20250419182453-5135a0650657/go.mod h1:UHGJonU9z4YYGKJxSaC6/TNcLOBptpmM5m2Cksbnw0Y=
github.com/RaduBerinde/axisds v0.1.0 h1:YItk/RmU5nvlsv/awo2Fjx97Mfpt4JfgtEVAGPrLdz8=
github.com/RaduBerinde/axisds v0.1.0/go.mod h1:UHGJonU9z4YYGKJxSaC6/TNcLOBptpmM5m2Cksbnw0Y=
github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 h1:bsU8Tzxr/PNz75ayvCnxKZWEYdLMPDkUgticP4a4Bvk=
github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54/go.mod h1:0tr7FllbE9gJkHq7CVeeDDFAFKQVy5RnCSSNBOvdqbc=
github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f h1:JjxwchlOepwsUWcQwD2mLUAGE9aCp0/ehy6yCHFBOvo=
@ -45,16 +38,13 @@ github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vS
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 h1:iW0a5ljuFxkLGPNem5Ui+KBjFJzKg4Fv2fnxe4dvzpM=
github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5/go.mod h1:Y2QMoi1vgtOIfc+6DhrMOGkLoGzqSV2rKp4Sm+opsyA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
@ -64,7 +54,6 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/caddyserver/certmagic v0.23.0 h1:CfpZ/50jMfG4+1J/u2LV6piJq4HOfO6ppOnOf7DkFEU=
github.com/caddyserver/certmagic v0.23.0/go.mod h1:9mEZIWqqWoI+Gf+4Trh04MOVPD0tGSxtqsxg87hAIH4=
github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+YTAyA=
@ -95,19 +84,18 @@ github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZe
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA=
github.com/cockroachdb/pebble/v2 v2.1.2 h1:IwYt+Y2Cdw6egblwk1kWzdmJvD2680t5VK/3i0BJ6IA=
github.com/cockroachdb/pebble/v2 v2.1.2/go.mod h1:Aza05DCCc05ghIJZkB4Q/axv/JK9wx5cFwWcnhG0eGw=
github.com/cockroachdb/pebble/v2 v2.1.4 h1:j9wPgMDbkErFdAKYFGhsoCcvzcjR+6zrJ4jhKtJ6bOk=
github.com/cockroachdb/pebble/v2 v2.1.4/go.mod h1:Reo1RTniv1UjVTAu/Fv74y5i3kJ5gmVrPhO9UtFiKn8=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 h1:Nua446ru3juLHLZd4AwKNzClZgL1co3pUPGv3o8FlcA=
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg=
github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b h1:VXvSNzmr8hMj8XTuY0PT9Ane9qZGul/p67vGYwl9BFI=
github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf h1:dwGgBWn84wUS1pVikGiruW+x5XM4amhjaZO20vCjay4=
@ -135,6 +123,8 @@ github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkz
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dunglas/httpsfv v1.1.0 h1:Jw76nAyKWKZKFrpMMcL76y35tOpYHqQPzHQiwDvpe54=
github.com/dunglas/httpsfv v1.1.0/go.mod h1:zID2mqw9mFsnt7YC3vYQ9/cjq30q41W+1AnDwH8TiMg=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
@ -148,21 +138,18 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU=
github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0=
github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/gabriel-vasile/mimetype v1.4.12 h1:e9hWvmLYvtp846tLHam2o++qitpguFiYCKbn0w9jyqw=
github.com/gabriel-vasile/mimetype v1.4.12/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/gammazero/chanqueue v1.1.1 h1:n9Y+zbBxw2f7uUE9wpgs0rOSkP/I/yhDLiNuhyVjojQ=
github.com/gammazero/chanqueue v1.1.1/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc=
github.com/gammazero/deque v1.2.0 h1:scEFO8Uidhw6KDU5qg1HA5fYwM0+us2qdeJqm43bitU=
@ -171,10 +158,7 @@ github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM=
github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
@ -188,7 +172,6 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
@ -202,7 +185,6 @@ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4er
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
@ -239,8 +221,6 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
@ -253,8 +233,6 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@ -264,8 +242,6 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
github.com/guillaumemichel/reservedpool v0.3.0 h1:eqqO/QvTllLBrit7LVtVJBqw4cD0WdV9ajUe7WNTajw=
@ -291,8 +267,8 @@ github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 h1:OqNqsGZPX8zh3eFMO8Lf8EHRRnSGBMqcd
github.com/ipfs-shipyard/nopfs/ipfs v0.25.0/go.mod h1:BxhUdtBgOXg1B+gAPEplkg/GpyTZY+kCMSfsJvvydqU=
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
github.com/ipfs/boxo v0.35.3-0.20251203042105-52a05615b40f h1:QaTdH/7Bi3wPYtwRs9b7cKguT2h2al1ZLYE+ziFYqEg=
github.com/ipfs/boxo v0.35.3-0.20251203042105-52a05615b40f/go.mod h1:Abmp1if6bMQG87/0SQPIB9fkxJnZMLCt2nQw3yUZHH0=
github.com/ipfs/boxo v0.36.1-0.20260205181019-5060c8dbda85 h1:zBxLtBiJxVTdntQZUogxjjy4FIjMnKdWGXSTR104AnY=
github.com/ipfs/boxo v0.36.1-0.20260205181019-5060c8dbda85/go.mod h1:92hnRXfP5ScKEIqlq9Ns7LR1dFXEVADKWVGH0fjk83k=
github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA=
github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU=
github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk=
@ -314,28 +290,28 @@ github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46U
github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk=
github.com/ipfs/go-ds-badger v0.3.4 h1:MmqFicftE0KrwMC77WjXTrPuoUxhwyFsjKONSeWrlOo=
github.com/ipfs/go-ds-badger v0.3.4/go.mod h1:HfqsKJcNnIr9ZhZ+rkwS1J5PpaWjJjg6Ipmxd7KPfZ8=
github.com/ipfs/go-ds-flatfs v0.5.5 h1:lkx5C99pFBMI7T1sYF7y3v7xIYekNVNMp/95Gm9Y3tY=
github.com/ipfs/go-ds-flatfs v0.5.5/go.mod h1:bM7+m7KFUyv5dp3RBKTr3+OHgZ6h8ydCQkO7tjeO9Z4=
github.com/ipfs/go-ds-flatfs v0.6.0 h1:olAEnDNBK1VMoTRZvfzgo90H5kBP4qIZPpYMtNlBBws=
github.com/ipfs/go-ds-flatfs v0.6.0/go.mod h1:p8a/YhmAFYyuonxDbvuIANlDCgS69uqVv+iH5f8fAxY=
github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8=
github.com/ipfs/go-ds-leveldb v0.5.2 h1:6nmxlQ2zbp4LCNdJVsmHfs9GP0eylfBNxpmY1csp0x0=
github.com/ipfs/go-ds-leveldb v0.5.2/go.mod h1:2fAwmcvD3WoRT72PzEekHBkQmBDhc39DJGoREiuGmYo=
github.com/ipfs/go-ds-measure v0.2.2 h1:4kwvBGbbSXNYe4ANlg7qTIYoZU6mNlqzQHdVqICkqGI=
github.com/ipfs/go-ds-measure v0.2.2/go.mod h1:b/87ak0jMgH9Ylt7oH0+XGy4P8jHx9KG09Qz+pOeTIs=
github.com/ipfs/go-ds-pebble v0.5.7 h1:4PQI46y3fjjxUTgHwYqcOVyoxiU6v1sqN6ONeRXGQTM=
github.com/ipfs/go-ds-pebble v0.5.7/go.mod h1:rsIgXE2qN+VfHKBin2cOOGFTZ/Agor6i8wBWA6ihbr0=
github.com/ipfs/go-dsqueue v0.1.1 h1:6PQlHDyf9PSTN69NmwUir5+0is3tU0vRJj8zLlgK8Mc=
github.com/ipfs/go-dsqueue v0.1.1/go.mod h1:Xxg353WSwwzYn3FGSzZ+taSQII3pIZ+EJC8/oWRDM10=
github.com/ipfs/go-ds-pebble v0.5.9 h1:D1FEuMxjbEmDADNqsyT74n9QHVAn12nv9i9Qa15AFYc=
github.com/ipfs/go-ds-pebble v0.5.9/go.mod h1:XmUBN05l6B+tMg7mpMS75ZcKW/CX01uZMhhWw85imQA=
github.com/ipfs/go-dsqueue v0.1.2 h1:jBMsgvT9Pj9l3cqI0m5jYpW/aWDYkW4Us6EuzrcSGbs=
github.com/ipfs/go-dsqueue v0.1.2/go.mod h1:OU94YuMVUIF/ctR7Ysov9PI4gOa2XjPGN9nd8imSv78=
github.com/ipfs/go-fs-lock v0.1.1 h1:TecsP/Uc7WqYYatasreZQiP9EGRy4ZnKoG4yXxR33nw=
github.com/ipfs/go-fs-lock v0.1.1/go.mod h1:2goSXMCw7QfscHmSe09oXiR34DQeUdm+ei+dhonqly0=
github.com/ipfs/go-ipfs-cmds v0.15.0 h1:nQDgKadrzyiFyYoZMARMIoVoSwe3gGTAfGvrWLeAQbQ=
github.com/ipfs/go-ipfs-cmds v0.15.0/go.mod h1:VABf/mv/wqvYX6hLG6Z+40eNAEw3FQO0bSm370Or3Wk=
github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260204204540-af9bcbaf5709 h1:0JiurWPnR7ZtjYW8XdfThOcOU5WlVVGQ1JY4FHHgyu8=
github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260204204540-af9bcbaf5709/go.mod h1:yZeTCte5zTH66bbEpLPkSog3/ImppCD00DMP7NjYmys=
github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ=
github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
github.com/ipfs/go-ipfs-ds-help v1.1.1 h1:B5UJOH52IbcfS56+Ul+sv8jnIV10lbjLF5eOO0C66Nw=
github.com/ipfs/go-ipfs-ds-help v1.1.1/go.mod h1:75vrVCkSdSFidJscs8n4W+77AtTpCIAdDGAwjitJMIo=
github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE=
github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4=
github.com/ipfs/go-ipfs-pq v0.0.4 h1:U7jjENWJd1jhcrR8X/xHTaph14PTAK9O+yaLJbjqgOw=
github.com/ipfs/go-ipfs-pq v0.0.4/go.mod h1:9UdLOIIb99IFrgT0Fc53pvbvlJBhpUb4GJuAQf3+O2A=
github.com/ipfs/go-ipfs-redirects-file v0.1.2 h1:QCK7VtL91FH17KROVVy5KrzDx2hu68QvB2FTWk08ZQk=
github.com/ipfs/go-ipfs-redirects-file v0.1.2/go.mod h1:yIiTlLcDEM/8lS6T3FlCEXZktPPqSOyuY6dEzVqw7Fw=
github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc=
@ -349,12 +325,12 @@ github.com/ipfs/go-ipld-git v0.1.1/go.mod h1:+VyMqF5lMcJh4rwEppV0e6g4nCCHXThLYYD
github.com/ipfs/go-ipld-legacy v0.2.2 h1:DThbqCPVLpWBcGtU23KDLiY2YRZZnTkXQyfz8aOfBkQ=
github.com/ipfs/go-ipld-legacy v0.2.2/go.mod h1:hhkj+b3kG9b2BcUNw8IFYAsfeNo8E3U7eYlWeAOPyDU=
github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
github.com/ipfs/go-log/v2 v2.9.0 h1:l4b06AwVXwldIzbVPZy5z7sKp9lHFTX0KWfTBCtHaOk=
github.com/ipfs/go-log/v2 v2.9.0/go.mod h1:UhIYAwMV7Nb4ZmihUxfIRM2Istw/y9cAk3xaK+4Zs2c=
github.com/ipfs/go-log/v2 v2.9.1 h1:3JXwHWU31dsCpvQ+7asz6/QsFJHqFr4gLgQ0FWteujk=
github.com/ipfs/go-log/v2 v2.9.1/go.mod h1:evFx7sBiohUN3AG12mXlZBw5hacBQld3ZPHrowlJYoo=
github.com/ipfs/go-metrics-interface v0.3.0 h1:YwG7/Cy4R94mYDUuwsBfeziJCVm9pBMJ6q/JR9V40TU=
github.com/ipfs/go-metrics-interface v0.3.0/go.mod h1:OxxQjZDGocXVdyTPocns6cOLwHieqej/jos7H4POwoY=
github.com/ipfs/go-peertaskqueue v0.8.2 h1:PaHFRaVFdxQk1Qo3OKiHPYjmmusQy7gKQUaL8JDszAU=
github.com/ipfs/go-peertaskqueue v0.8.2/go.mod h1:L6QPvou0346c2qPJNiJa6BvOibxDfaiPlqHInmzg0FA=
github.com/ipfs/go-peertaskqueue v0.8.3 h1:tBPpGJy+A92RqtRFq5amJn0Uuj8Pw8tXi0X3eHfHM8w=
github.com/ipfs/go-peertaskqueue v0.8.3/go.mod h1:OqVync4kPOcXEGdj/LKvox9DCB5mkSBeXsPczCxLtYA=
github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc=
github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o=
github.com/ipfs/go-unixfsnode v1.10.2 h1:TREegX1J4X+k1w4AhoDuxxFvVcS9SegMRvrmxF6Tca8=
@ -368,8 +344,8 @@ github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH
github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ=
github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20250821084354-a425e60cd714 h1:cqNk8PEwHnK0vqWln+U/YZhQc9h2NB3KjUjDPZo5Q2s=
github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20250821084354-a425e60cd714/go.mod h1:ZEUdra3CoqRVRYgAX/jAJO9aZGz6SKtKEG628fHHktY=
github.com/ipshipyard/p2p-forge v0.6.1 h1:987/hUC1YxI56CcMX6iTB+9BLjFV0d2SJnig9Z1pf8A=
github.com/ipshipyard/p2p-forge v0.6.1/go.mod h1:pj8Zcs+ex5OMq5a1bFLHqW0oL3qYO0v5eGLZmit0l7U=
github.com/ipshipyard/p2p-forge v0.7.0 h1:PQayexxZC1FR2Vx0XOSbmZ6wDPliidS48I+xXWuF+YU=
github.com/ipshipyard/p2p-forge v0.7.0/go.mod h1:i2wg0p7WmHGyo5vYaK9COZBp8BN5Drncfu3WoQNZlQY=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
@ -377,11 +353,9 @@ github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABo
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY=
github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
@ -406,7 +380,6 @@ github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
@ -424,20 +397,20 @@ github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZ
github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784=
github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo=
github.com/libp2p/go-libp2p v0.45.0 h1:Pdhr2HsFXaYjtfiNcBP4CcRUONvbMFdH3puM9vV4Tiw=
github.com/libp2p/go-libp2p v0.45.0/go.mod h1:NovCojezAt4dnDd4fH048K7PKEqH0UFYYqJRjIIu8zc=
github.com/libp2p/go-libp2p v0.47.0 h1:qQpBjSCWNQFF0hjBbKirMXE9RHLtSuzTDkTfr1rw0yc=
github.com/libp2p/go-libp2p v0.47.0/go.mod h1:s8HPh7mMV933OtXzONaGFseCg/BE//m1V34p3x4EUOY=
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g=
github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw=
github.com/libp2p/go-libp2p-kad-dht v0.36.0 h1:7QuXhV36+Vyj+L6A7mrYkn2sYLrbRcbjvsYDu/gXhn8=
github.com/libp2p/go-libp2p-kad-dht v0.36.0/go.mod h1:O24LxTH9Rt3I5XU8nmiA9VynS4TrTwAyj+zBJKB05vQ=
github.com/libp2p/go-libp2p-kad-dht v0.37.1 h1:jtX8bQIXVCs6/allskNB4m5n95Xvwav7wHAhopGZfS0=
github.com/libp2p/go-libp2p-kad-dht v0.37.1/go.mod h1:Uwokdh232k9Y1uMy2yJOK5zb7hpMHn4P8uWS4s9i05Q=
github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio=
github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s=
github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4=
github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs=
github.com/libp2p/go-libp2p-pubsub v0.14.2 h1:nT5lFHPQOFJcp9CW8hpKtvbpQNdl2udJuzLQWbgRum8=
github.com/libp2p/go-libp2p-pubsub v0.14.2/go.mod h1:MKPU5vMI8RRFyTP0HfdsF9cLmL1nHAeJm44AxJGJx44=
github.com/libp2p/go-libp2p-pubsub v0.15.0 h1:cG7Cng2BT82WttmPFMi50gDNV+58K626m/wR00vGL1o=
github.com/libp2p/go-libp2p-pubsub v0.15.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4=
github.com/libp2p/go-libp2p-pubsub-router v0.6.0 h1:D30iKdlqDt5ZmLEYhHELCMRj8b4sFAqrUcshIUvVP/s=
github.com/libp2p/go-libp2p-pubsub-router v0.6.0/go.mod h1:FY/q0/RBTKsLA7l4vqC2cbRbOvyDotg8PJQ7j8FDudE=
github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg=
@ -451,8 +424,8 @@ github.com/libp2p/go-libp2p-xor v0.1.0/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQ
github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ=
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
github.com/libp2p/go-netroute v0.3.0 h1:nqPCXHmeNmgTJnktosJ/sIef9hvwYCrsLxXmfNks/oc=
github.com/libp2p/go-netroute v0.3.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA=
github.com/libp2p/go-netroute v0.4.0 h1:sZZx9hyANYUx9PZyqcgE/E1GUG3iEtTZHUEvdtXT7/Q=
github.com/libp2p/go-netroute v0.4.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA=
github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
@ -461,12 +434,10 @@ github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfY
github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q=
github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/marcopolo/simnet v0.0.1 h1:rSMslhPz6q9IvJeFWDoMGxMIrlsbXau3NkuIXHGJxfg=
github.com/marcopolo/simnet v0.0.1/go.mod h1:WDaQkgLAjqDUEBAOXz22+1j6wXKfGlC5sD5XWt3ddOs=
github.com/marcopolo/simnet v0.0.4 h1:50Kx4hS9kFGSRIbrt9xUS3NJX33EyPqHVmpXvaKLqrY=
github.com/marcopolo/simnet v0.0.4/go.mod h1:tfQF1u2DmaB6WHODMtQaLtClEf3a296CKQLq5gAsIS0=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
@ -475,14 +446,12 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/mholt/acmez/v3 v3.1.2 h1:auob8J/0FhmdClQicvJvuDavgd5ezwLBfKuYmynhYzc=
github.com/mholt/acmez/v3 v3.1.2/go.mod h1:L1wOU06KKvq7tswuMDwKdcHeKpFFgkppZy/y0DFxagQ=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA=
github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps=
github.com/miekg/dns v1.1.72 h1:vhmr+TF2A3tuoGNkLDFK9zi36F2LS+hKTRW0Uf8kbzI=
github.com/miekg/dns v1.1.72/go.mod h1:+EuEPhdHOsfk6Wk5TT2CzssZdqkmFhf8r+aVyDEToIs=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
@ -500,8 +469,6 @@ github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dz
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8=
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
@ -518,8 +485,8 @@ github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU
github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4=
github.com/multiformats/go-multiaddr v0.16.1 h1:fgJ0Pitow+wWXzN9do+1b8Pyjmo8m5WhGfzpL82MpCw=
github.com/multiformats/go-multiaddr v0.16.1/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
github.com/multiformats/go-multiaddr-dns v0.5.0 h1:p/FTyHKX0nl59f+S+dEUe8HRK+i5Ow/QHMw8Nh3gPCo=
github.com/multiformats/go-multiaddr-dns v0.5.0/go.mod h1:yJ349b8TPIAANUyuOzn1oz9o22tV9f+06L+cCeMxC14=
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ=
@ -546,8 +513,6 @@ github.com/multiformats/go-varint v0.1.0 h1:i2wqFp4sdl3IcIxfAonHQV9qU5OsZ4Ts9IOo
github.com/multiformats/go-varint v0.1.0/go.mod h1:5KVAVXegtfmNQQm/lCY+ATvDzvJJhSkUlGQV9wgObdI=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
@ -568,7 +533,6 @@ github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg=
github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
@ -632,25 +596,21 @@ github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4
github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw=
github.com/probe-lab/go-libdht v0.4.0 h1:LAqHuko/owRW6+0cs5wmJXbHzg09EUMJEh5DI37yXqo=
github.com/probe-lab/go-libdht v0.4.0/go.mod h1:hamw22kI6YkPQFGy5P6BrWWDrgE9ety5Si8iWAyuDvc=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.55.0 h1:zccPQIqYCXDt5NmcEabyYvOnomjs8Tlwl7tISjJh9Mk=
github.com/quic-go/quic-go v0.55.0/go.mod h1:DR51ilwU1uE164KuWXhinFcKWGlEjzys2l8zUl5Ss1U=
github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70=
github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao=
github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII=
github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw=
github.com/quic-go/quic-go v0.59.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU=
github.com/quic-go/webtransport-go v0.10.0 h1:LqXXPOXuETY5Xe8ITdGisBzTYmUOy5eSj+9n4hLTjHI=
github.com/quic-go/webtransport-go v0.10.0/go.mod h1:LeGIXr5BQKE3UsynwVBeQrU1TPrbh73MGoC6jd+V7ow=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
@ -658,30 +618,7 @@ github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/slok/go-http-metrics v0.13.0 h1:lQDyJJx9wKhmbliyUsZ2l6peGnXRHjsjoqPt5VYzcP8=
github.com/slok/go-http-metrics v0.13.0/go.mod h1:HIr7t/HbN2sJaunvnt9wKP9xoBBVZFo1/KiHU3b0w+4=
@ -692,8 +629,6 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9
github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY=
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
@ -727,15 +662,12 @@ github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ=
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ=
github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE=
github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s=
github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y=
@ -769,7 +701,6 @@ github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI=
github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE=
github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA=
@ -781,8 +712,8 @@ go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
@ -793,14 +724,14 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE=
go.opentelemetry.io/otel/exporters/zipkin v1.38.0 h1:0rJ2TmzpHDG+Ib9gPmu3J3cE0zXirumQcKS4wCoZUa0=
go.opentelemetry.io/otel/exporters/zipkin v1.38.0/go.mod h1:Su/nq/K5zRjDKKC3Il0xbViE3juWgG3JDoqLumFx5G0=
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
@ -813,23 +744,19 @@ go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U=
go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@ -842,8 +769,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -852,11 +779,10 @@ golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 h1:DHNhtq3sNNzrvduZZIiFyXWOL9IWaDPHqTnLJp+rCBY=
golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0=
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU=
golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -876,18 +802,15 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@ -913,16 +836,13 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -933,18 +853,16 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -981,10 +899,10 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo=
golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 h1:O1cMQHRfwNpDfDJerqRoE2oD+AFlyid87D40L/OkkJo=
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2/go.mod h1:b7fPSJ0pKZ3ccUh8gnTONJxhn3c/PS6tyzQvyqw4iA8=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -1004,17 +922,14 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -1046,8 +961,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1055,11 +970,8 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY=
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4=
gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@ -1069,17 +981,11 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@ -1097,9 +1003,6 @@ google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -1122,8 +1025,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -1131,7 +1034,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8=
gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
@ -1145,8 +1047,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@ -1159,5 +1059,3 @@ pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=

View File

@ -10,6 +10,7 @@ import (
"path/filepath"
"strings"
"sync"
"time"
"github.com/ipfs/boxo/files"
"github.com/ipfs/boxo/path"
@ -58,6 +59,28 @@ func createTempRepo() (string, error) {
return "", err
}
// Use TCP-only on loopback with random port for reliable local testing.
// This matches what kubo's test harness uses (test/cli/transports_test.go).
// QUIC/UDP transports are avoided because they may be throttled on CI.
cfg.Addresses.Swarm = []string{
"/ip4/127.0.0.1/tcp/0",
}
// Explicitly disable non-TCP transports for reliability.
cfg.Swarm.Transports.Network.QUIC = config.False
cfg.Swarm.Transports.Network.Relay = config.False
cfg.Swarm.Transports.Network.WebTransport = config.False
cfg.Swarm.Transports.Network.WebRTCDirect = config.False
cfg.Swarm.Transports.Network.Websocket = config.False
cfg.AutoTLS.Enabled = config.False
// Disable routing - we don't need DHT for direct peer connections.
// Bitswap works with directly connected peers without needing DHT lookups.
cfg.Routing.Type = config.NewOptionalString("none")
// Disable bootstrap for this example - we manually connect only the peers we need.
cfg.Bootstrap = []string{}
// When creating the repository, you can define custom settings on the repository, such as enabling experimental
// features (See experimental-features.md) or customizing the gateway endpoint.
// To do such things, you should modify the variable `cfg`. For example:
@ -96,10 +119,14 @@ func createNode(ctx context.Context, repoPath string) (*core.IpfsNode, error) {
// Construct the node
nodeOptions := &core.BuildCfg{
Online: true,
Routing: libp2p.DHTOption, // This option sets the node to be a full DHT node (both fetching and storing DHT Records)
// Routing: libp2p.DHTClientOption, // This option sets the node to be a client DHT node (only fetching records)
Repo: repo,
Online: true,
// For this example, we use NilRouterOption (no routing) since we connect peers directly.
// Bitswap works with directly connected peers without needing DHT lookups.
// In production, you would typically use:
// Routing: libp2p.DHTOption, // Full DHT node (stores and fetches records)
// Routing: libp2p.DHTClientOption, // DHT client (only fetches records)
Routing: libp2p.NilRouterOption,
Repo: repo,
}
return core.NewNode(ctx, nodeOptions)
@ -192,7 +219,7 @@ func main() {
fmt.Println("-- Getting an IPFS node running -- ")
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
// Spawn a local peer using a temporary path, for testing purposes
@ -284,41 +311,31 @@ func main() {
fmt.Printf("Got directory back from IPFS (IPFS path: %s) and wrote it to %s\n", cidDirectory.String(), outputPathDirectory)
/// --- Part IV: Getting a file from the IPFS Network
/// --- Part IV: Getting a file from another IPFS node
fmt.Println("\n-- Going to connect to a few nodes in the Network as bootstrappers --")
fmt.Println("\n-- Connecting to nodeA and fetching content via bitswap --")
peerMa := fmt.Sprintf("/ip4/127.0.0.1/udp/4010/p2p/%s", nodeA.Identity.String())
// Get nodeA's actual listening address dynamically.
// We configured TCP-only on 127.0.0.1 with random port, so this will be a TCP address.
peerAddrs, err := ipfsA.Swarm().LocalAddrs(ctx)
if err != nil {
panic(fmt.Errorf("could not get peer addresses: %s", err))
}
peerMa := peerAddrs[0].String() + "/p2p/" + nodeA.Identity.String()
bootstrapNodes := []string{
// IPFS Bootstrapper nodes.
// In production, use real bootstrap peers like:
// "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
// "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa",
// "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb",
// "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt",
// IPFS Cluster Pinning nodes
// "/ip4/138.201.67.219/tcp/4001/p2p/QmUd6zHcbkbcs7SMxwLs48qZVX3vpcM8errYS7xEczwRMA",
// "/ip4/138.201.67.219/udp/4001/quic/p2p/QmUd6zHcbkbcs7SMxwLs48qZVX3vpcM8errYS7xEczwRMA",
// "/ip4/138.201.67.220/tcp/4001/p2p/QmNSYxZAiJHeLdkBg38roksAR9So7Y5eojks1yjEcUtZ7i",
// "/ip4/138.201.67.220/udp/4001/quic/p2p/QmNSYxZAiJHeLdkBg38roksAR9So7Y5eojks1yjEcUtZ7i",
// "/ip4/138.201.68.74/tcp/4001/p2p/QmdnXwLrC8p1ueiq2Qya8joNvk3TVVDAut7PrikmZwubtR",
// "/ip4/138.201.68.74/udp/4001/quic/p2p/QmdnXwLrC8p1ueiq2Qya8joNvk3TVVDAut7PrikmZwubtR",
// "/ip4/94.130.135.167/tcp/4001/p2p/QmUEMvxS2e7iDrereVYc5SWPauXPyNwxcy9BXZrC1QTcHE",
// "/ip4/94.130.135.167/udp/4001/quic/p2p/QmUEMvxS2e7iDrereVYc5SWPauXPyNwxcy9BXZrC1QTcHE",
// You can add more nodes here, for example, another IPFS node you might have running locally, mine was:
// "/ip4/127.0.0.1/tcp/4010/p2p/QmZp2fhDLxjYue2RiUvLwT9MWdnbDxam32qYFnGmxZDh5L",
// "/ip4/127.0.0.1/udp/4010/quic/p2p/QmZp2fhDLxjYue2RiUvLwT9MWdnbDxam32qYFnGmxZDh5L",
// For this example, we only connect to nodeA which has our test content.
peerMa,
}
go func() {
err := connectToPeers(ctx, ipfsB, bootstrapNodes)
if err != nil {
log.Printf("failed connect to peers: %s", err)
}
}()
fmt.Println("Connecting to peer...")
err = connectToPeers(ctx, ipfsB, bootstrapNodes)
if err != nil {
panic(fmt.Errorf("failed to connect to peers: %s", err))
}
fmt.Println("Connected to peer")
exampleCIDStr := peerCidFile.RootCid().String()

View File

@ -1,17 +1,39 @@
package main
import (
"bytes"
"io"
"os"
"os/exec"
"strings"
"testing"
"time"
)
func TestExample(t *testing.T) {
out, err := exec.Command("go", "run", "main.go").Output()
t.Log("Starting go run main.go...")
start := time.Now()
cmd := exec.Command("go", "run", "main.go")
cmd.Env = append(os.Environ(), "GOLOG_LOG_LEVEL=error") // reduce libp2p noise
// Stream output to both test log and capture buffer for verification
// This ensures we see progress even if the process is killed
var buf bytes.Buffer
cmd.Stdout = io.MultiWriter(os.Stdout, &buf)
cmd.Stderr = io.MultiWriter(os.Stderr, &buf)
err := cmd.Run()
elapsed := time.Since(start)
t.Logf("Command completed in %v", elapsed)
out := buf.String()
if err != nil {
t.Fatalf("running example (%v)", err)
t.Fatalf("running example (%v):\n%s", err, out)
}
if !strings.Contains(string(out), "All done!") {
t.Errorf("example did not run successfully")
if !strings.Contains(out, "All done!") {
t.Errorf("example did not complete successfully, output:\n%s", out)
}
}

View File

@ -199,9 +199,8 @@ configured, the daemon will fail to start.
## ipfs p2p
Allows tunneling of TCP connections through Libp2p streams. If you've ever used
port forwarding with SSH (the `-L` option in OpenSSH), this feature is quite
similar.
Allows tunneling of TCP connections through libp2p streams, similar to SSH port
forwarding (`ssh -L`).
### State
@ -220,98 +219,20 @@ Experimental, will be stabilized in 0.6.0
> If you enable this and plan to expose CLI or HTTP RPC to other users or machines,
> secure RPC API using [`API.Authorizations`](https://github.com/ipfs/kubo/blob/master/docs/config.md#apiauthorizations) or custom auth middleware.
The `p2p` command needs to be enabled in the config:
```sh
> ipfs config --json Experimental.Libp2pStreamMounting true
```
### How to use
**Netcat example:**
First, pick a protocol name for your application. Think of the protocol name as
a port number, just significantly more user-friendly. In this example, we're
going to use `/x/kickass/1.0`.
***Setup:***
1. A "server" node with peer ID `$SERVER_ID`
2. A "client" node.
***On the "server" node:***
First, start your application and have it listen for TCP connections on
port `$APP_PORT`.
Then, configure the p2p listener by running:
```sh
> ipfs p2p listen /x/kickass/1.0 /ip4/127.0.0.1/tcp/$APP_PORT
```
This will configure IPFS to forward all incoming `/x/kickass/1.0` streams to
`127.0.0.1:$APP_PORT` (opening a new connection to `127.0.0.1:$APP_PORT` per
incoming stream.
***On the "client" node:***
First, configure the client p2p dialer, so that it forwards all inbound
connections on `127.0.0.1:SOME_PORT` to the server node listening
on `/x/kickass/1.0`.
```sh
> ipfs p2p forward /x/kickass/1.0 /ip4/127.0.0.1/tcp/$SOME_PORT /p2p/$SERVER_ID
```
Next, have your application open a connection to `127.0.0.1:$SOME_PORT`. This
connection will be forwarded to the service running on `127.0.0.1:$APP_PORT` on
the remote machine. You can test it with netcat:
***On "server" node:***
```sh
> nc -v -l -p $APP_PORT
```
***On "client" node:***
```sh
> nc -v 127.0.0.1 $SOME_PORT
```
You should now see that a connection has been established and be able to
exchange messages between netcat instances.
(note that depending on your netcat version you may need to drop the `-v` flag)
**SSH example**
**Setup:**
1. A "server" node with peer ID `$SERVER_ID` and running ssh server on the
default port.
2. A "client" node.
_you can get `$SERVER_ID` by running `ipfs id -f "<id>\n"`_
***First, on the "server" node:***
```sh
ipfs p2p listen /x/ssh /ip4/127.0.0.1/tcp/22
```
***Then, on "client" node:***
```sh
ipfs p2p forward /x/ssh /ip4/127.0.0.1/tcp/2222 /p2p/$SERVER_ID
```
You should now be able to connect to your ssh server through a libp2p connection
with `ssh [user]@127.0.0.1 -p 2222`.
See [docs/p2p-tunnels.md](p2p-tunnels.md) for usage examples, foreground mode,
and systemd integration.
### Road to being a real feature
- [ ] More documentation
- [x] More documentation
- [x] `ipfs p2p forward` mode
- [ ] Ability to define tunnels via JSON config, similar to [`Peering.Peers`](https://github.com/ipfs/kubo/blob/master/docs/config.md#peeringpeers), see [kubo#5460](https://github.com/ipfs/kubo/issues/5460)
## p2p http proxy
@ -454,6 +375,8 @@ kubo now automatically shards when directory block is bigger than 256KB, ensurin
## IPNS pubsub
Specification: [IPNS PubSub Router](https://specs.ipfs.tech/ipns/ipns-pubsub-router/)
### In Version
0.4.14 :
@ -468,13 +391,18 @@ kubo now automatically shards when directory block is bigger than 256KB, ensurin
0.11.0 :
- Can be enabled via `Ipns.UsePubsub` flag in config
0.40.0 :
- Persistent message sequence number validation to prevent message cycles
in large networks
### State
Experimental, default-disabled.
Utilizes pubsub for publishing ipns records in real time.
Utilizes pubsub for publishing IPNS records in real time.
When it is enabled:
- IPNS publishers push records to a name-specific pubsub topic,
in addition to publishing to the DHT.
- IPNS resolvers subscribe to the name-specific topic on first
@ -483,9 +411,6 @@ When it is enabled:
Both the publisher and the resolver nodes need to have the feature enabled for it to work effectively.
Note: While IPNS pubsub has been available since 0.4.14, it received major changes in 0.5.0.
Users interested in this feature should upgrade to at least 0.5.0
### How to enable
Run your daemon with the `--enable-namesys-pubsub` flag
@ -495,13 +420,12 @@ ipfs config --json Ipns.UsePubsub true
```
NOTE:
- This feature implicitly enables [ipfs pubsub](#ipfs-pubsub).
- This feature implicitly enables pubsub.
- Passing `--enable-namesys-pubsub` CLI flag overrides `Ipns.UsePubsub` config.
### Road to being a real feature
- [ ] Needs more people to use and report on how well it works
- [ ] Pubsub enabled as a real feature
## AutoRelay

View File

@ -6,7 +6,7 @@ they were stored in a traditional web server.
[More about Gateways](https://docs.ipfs.tech/concepts/ipfs-gateway/) and [addressing IPFS on the web](https://docs.ipfs.tech/how-to/address-ipfs-on-web/).
Kubo's Gateway implementation follows [ipfs/specs: Specification for HTTP Gateways](https://github.com/ipfs/specs/tree/main/http-gateways#readme).
Kubo's Gateway implementation follows [IPFS Gateway Specifications](https://specs.ipfs.tech/http-gateways/) and is tested with [Gateway Conformance Test Suite](https://github.com/ipfs/gateway-conformance).
### Local gateway
@ -14,14 +14,21 @@ By default, Kubo nodes run
a [path gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#path-gateway) at `http://127.0.0.1:8080/`
and a [subdomain gateway](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#subdomain-gateway) at `http://localhost:8080/`.
The path one also implements [trustless gateway spec](https://specs.ipfs.tech/http-gateways/trustless-gateway/)
and supports [trustless responses](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) as opt-in via `Accept` header.
> [!CAUTION]
> **For browsing websites, web apps, and dapps in a browser, use the subdomain
> gateway** (`localhost`). Each content root gets its own
> [web origin](https://developer.mozilla.org/en-US/docs/Web/Security/Same-origin_policy),
> isolating localStorage, cookies, and session data between sites.
>
> **For file retrieval, use the path gateway** (`127.0.0.1`). Path gateways are
> suited for downloading files or fetching [verifiable](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval)
> content, but lack origin isolation (all content shares the same origin).
Additional listening addresses and gateway behaviors can be set in the [config](#configuration) file.
### Public gateways
Protocol Labs provides a public gateway at
IPFS Foundation [provides public gateways](https://docs.ipfs.tech/concepts/public-utilities/) at
`https://ipfs.io` ([path](https://specs.ipfs.tech/http-gateways/path-gateway/)),
`https://dweb.link` ([subdomain](https://docs.ipfs.tech/how-to/address-ipfs-on-web/#subdomain-gateway)),
and `https://trustless-gateway.link` ([trustless](https://specs.ipfs.tech/http-gateways/trustless-gateway/) only).
@ -41,6 +48,82 @@ The gateway's log level can be changed with this command:
> ipfs log level core/server debug
```
## Running in Production
When deploying Kubo's gateway in production, be aware of these important considerations:
<a id="reverse-proxy"></a>
> [!IMPORTANT]
> **Reverse Proxy:** When running Kubo behind a reverse proxy (such as nginx),
> the original `Host` header **must** be forwarded to Kubo for
> [`Gateway.PublicGateways`](config.md#gatewaypublicgateways) to work.
> Kubo uses the `Host` header to match configured hostnames and detect
> subdomain gateway patterns like `{cid}.ipfs.example.org` or DNSLink hostnames.
>
> If the `Host` header is not forwarded correctly, Kubo will not recognize
> the configured gateway hostnames and requests may be handled incorrectly.
>
> If `X-Forwarded-Proto` is not set, redirects over HTTPS will use wrong protocol
> and DNSLink names will not be inlined for subdomain gateways.
>
> Example: minimal nginx configuration for `example.org`
>
> ```nginx
> server {
> listen 80;
> listen [::]:80;
>
> # IMPORTANT: Include wildcard to match subdomain gateway requests.
> # The dot prefix matches both apex domain and all subdomains.
> server_name .example.org;
>
> location / {
> proxy_pass http://127.0.0.1:8080;
>
> # IMPORTANT: Forward the original Host header to Kubo.
> # Without this, PublicGateways configuration will not work.
> proxy_set_header Host $host;
>
> # IMPORTANT: X-Forwarded-Proto is required for correct behavior:
> # - Redirects will use https:// URLs when set to "https"
> # - DNSLink names will be inlined for subdomain gateways
> # (e.g., /ipns/en.wikipedia-on-ipfs.org → en-wikipedia--on--ipfs-org.ipns.example.org)
> proxy_set_header X-Forwarded-Proto $scheme;
> proxy_set_header X-Forwarded-Host $host;
> }
> }
> ```
>
> Common mistakes to avoid:
>
> - **Missing wildcard in `server_name`:** Using only `server_name example.org;`
> will not match subdomain requests like `{cid}.ipfs.example.org`. Always
> include `*.example.org` or use the dot prefix `.example.org`.
>
> - **Wrong `Host` header value:** Using `proxy_set_header Host $proxy_host;`
> sends the backend's hostname (e.g., `127.0.0.1:8080`) instead of the
> original `Host` header. Always use `$host` or `$http_host`.
>
> - **Missing `Host` header entirely:** If `proxy_set_header Host` is not
> specified, nginx defaults to `$proxy_host`, which breaks gateway routing.
> [!IMPORTANT]
> **Timeouts:** Configure [`Gateway.RetrievalTimeout`](config.md#gatewayretrievaltimeout)
> to terminate stalled transfers (resets on each data write, catches unresponsive operations),
> and [`Gateway.MaxRequestDuration`](config.md#gatewaymaxrequestduration) as a fallback
> deadline (default: 1 hour, catches cases when other timeouts are misconfigured or fail to fire).
> [!IMPORTANT]
> **Rate Limiting:** Use [`Gateway.MaxConcurrentRequests`](config.md#gatewaymaxconcurrentrequests)
> to protect against traffic spikes.
> [!IMPORTANT]
> **CDN/Cloudflare:** If using Cloudflare or other CDNs with
> [deserialized responses](config.md#gatewaydeserializedresponses) enabled, review
> [`Gateway.MaxRangeRequestFileSize`](config.md#gatewaymaxrangerequestfilesize) to avoid
> excess bandwidth billing from range request bugs. Cloudflare users may need additional
> protection via [Cloudflare Snippets](https://github.com/ipfs/boxo/issues/856#issuecomment-3523944976).
## Directories
For convenience, the gateway (mostly) acts like a normal web-server when serving
@ -53,7 +136,7 @@ a directory:
2. Dynamically build and serve a listing of the contents of the directory.
<sub><sup>&dagger;</sup>This redirect is skipped if the query string contains a
`go-get=1` parameter. See [PR#3964](https://github.com/ipfs/kubo/pull/3963)
`go-get=1` parameter. See [PR#3963](https://github.com/ipfs/kubo/pull/3963)
for details</sub>
## Static Websites
@ -107,10 +190,12 @@ This is equivalent of `ipfs block get`.
### `application/vnd.ipld.car`
Returns a [CAR](https://ipld.io/specs/transport/car/) stream for specific DAG and selector.
Returns a [CAR](https://ipld.io/specs/transport/car/) stream for a DAG or a subset of it.
Right now only 'full DAG' implicit selector is implemented.
Support for user-provided IPLD selectors is tracked in https://github.com/ipfs/kubo/issues/8769.
The `dag-scope` parameter controls which blocks are included: `all` (default, entire DAG),
`entity` (logical unit like a file), or `block` (single block). For [UnixFS](https://specs.ipfs.tech/unixfs/) files,
`entity-bytes` enables byte range requests. See [IPIP-402](https://specs.ipfs.tech/ipips/ipip-0402/)
for details.
This is a rough equivalent of `ipfs dag export`.

View File

@ -1,12 +0,0 @@
#!/bin/bash
set -e
# see also ".mailmap" for how email addresses and names are deduplicated
cat >AUTHORS <<-'EOF'
# This file lists all individuals having contributed content to the repository.
# For how it is generated, see `docs/generate-authors.sh`.
EOF
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf >>AUTHORS

214
docs/p2p-tunnels.md Normal file
View File

@ -0,0 +1,214 @@
# P2P Tunnels
Kubo supports tunneling TCP connections through libp2p streams, similar to SSH
port forwarding (`ssh -L`). This allows exposing local services to remote peers
and forwarding remote services to local ports.
- [Why P2P Tunnels?](#why-p2p-tunnels)
- [Quick Start](#quick-start)
- [Background Mode](#background-mode)
- [Foreground Mode](#foreground-mode)
- [systemd Integration](#systemd-integration)
- [Security Considerations](#security-considerations)
- [Troubleshooting](#troubleshooting)
## Why P2P Tunnels?
Unlike traditional SSH tunnels, libp2p-based tunnels do not require:
- **No public IP or open ports**: The server does not need a static IP address
or port forwarding configured on the router. Connectivity to peers behind NAT
is facilitated by [Direct Connection Upgrade through Relay (DCUtR)](https://github.com/libp2p/specs/blob/master/relay/DCUtR.md),
which enables NAT hole-punching.
- **No DNS or IP address management**: All you need is the server's PeerID and
an agreed-upon protocol name (e.g., `/x/ssh`). Kubo handles peer discovery
and routing via the [Amino DHT](https://specs.ipfs.tech/routing/kad-dht/).
- **Simplified firewall rules**: Since connections are established through
libp2p's existing swarm connections, no additional firewall configuration is
needed beyond what Kubo already requires.
This makes p2p tunnels useful for connecting to machines on home networks,
behind corporate firewalls, or in environments where traditional port forwarding
is not available.
## Quick Start
Enable the experimental feature:
```console
$ ipfs config --json Experimental.Libp2pStreamMounting true
```
Test with netcat (`nc`) - no services required:
**On the server:**
```console
$ ipfs p2p listen /x/test /ip4/127.0.0.1/tcp/9999
$ nc -l -p 9999
```
**On the client:**
Replace `$SERVER_ID` with the server's peer ID (get it with `ipfs id -f "<id>\n"`
on the server).
```console
$ ipfs p2p forward /x/test /ip4/127.0.0.1/tcp/9998 /p2p/$SERVER_ID
$ nc 127.0.0.1 9998
```
Type in either terminal and the text appears in the other. Use Ctrl+C to exit.
## Background Mode
By default, `ipfs p2p listen` and `ipfs p2p forward` register the tunnel with
the daemon and return immediately. The tunnel persists until explicitly closed
with `ipfs p2p close` or the daemon shuts down.
This example exposes a local SSH server (listening on `localhost:22`) to a
remote peer. The same pattern works for any TCP service.
**On the server** (the machine running SSH):
Register a p2p listener that forwards incoming connections to the local SSH
server. The protocol name `/x/ssh` is an arbitrary identifier that both peers
must agree on (the `/x/` prefix is required for custom protocols).
```console
$ ipfs p2p listen /x/ssh /ip4/127.0.0.1/tcp/22
```
**On the client:**
Create a local port (`2222`) that tunnels through libp2p to the server's SSH
service.
```console
$ ipfs p2p forward /x/ssh /ip4/127.0.0.1/tcp/2222 /p2p/$SERVER_ID
```
Now connect to SSH through the tunnel:
```console
$ ssh user@127.0.0.1 -p 2222
```
**Other services:** To tunnel a different service, change the port and protocol
name. For example, to expose a web server on port 8080, use `/x/mywebapp` and
`/ip4/127.0.0.1/tcp/8080`.
## Foreground Mode
Use `--foreground` (`-f`) to block until interrupted. The tunnel is
automatically removed when the command exits:
```console
$ ipfs p2p listen /x/ssh /ip4/127.0.0.1/tcp/22 --foreground
Listening on /x/ssh, forwarding to /ip4/127.0.0.1/tcp/22, waiting for interrupt...
^C
Received interrupt, removing listener for /x/ssh
```
The listener/forwarder is automatically removed when:
- The command receives Ctrl+C or SIGTERM
- `ipfs p2p close` is called
- The daemon shuts down
This mode is useful for systemd services and scripts that need cleanup on exit.
### systemd Integration
The `--foreground` flag enables clean integration with systemd. The examples
below show how to run `ipfs p2p listen` as a user service that starts
automatically when the IPFS daemon is ready.
Ensure IPFS daemon runs as a systemd user service. See
[misc/README.md](https://github.com/ipfs/kubo/blob/master/misc/README.md#systemd)
for setup instructions and where to place unit files.
#### P2P listener with path-based activation
Use a `.path` unit to wait for the daemon's RPC API to be ready before starting
the p2p listener.
**`ipfs-p2p-tunnel.path`**:
```systemd
[Unit]
Description=Monitor for IPFS daemon startup
After=ipfs.service
Requires=ipfs.service
[Path]
PathExists=%h/.ipfs/api
Unit=ipfs-p2p-tunnel.service
[Install]
WantedBy=default.target
```
The `%h` specifier expands to the user's home directory. If you use a custom
`IPFS_PATH`, adjust accordingly.
**`ipfs-p2p-tunnel.service`**:
```systemd
[Unit]
Description=IPFS p2p tunnel
Requires=ipfs.service
[Service]
ExecStart=ipfs p2p listen /x/ssh /ip4/127.0.0.1/tcp/22 -f
Restart=on-failure
RestartSec=10
[Install]
WantedBy=default.target
```
#### Enabling the services
```console
$ systemctl --user enable ipfs.service
$ systemctl --user enable ipfs-p2p-tunnel.path
$ systemctl --user start ipfs.service
```
The path unit monitors `~/.ipfs/api` and starts `ipfs-p2p-tunnel.service`
once the file exists.
## Security Considerations
> [!WARNING]
> This feature provides CLI and HTTP RPC users with the ability to set up port
> forwarding for localhost and LAN ports. If you enable this and plan to expose
> CLI or HTTP RPC to other users or machines, secure the RPC API using
> [`API.Authorizations`](https://github.com/ipfs/kubo/blob/master/docs/config.md#apiauthorizations)
> or custom auth middleware.
## Troubleshooting
### Foreground listener stops when terminal closes
When using `--foreground`, the listener stops if the terminal closes. For
persistent foreground listeners, use a systemd service, `nohup`, `tmux`, or
`screen`. Without `--foreground`, the listener persists in the daemon regardless
of terminal state.
### Connection refused errors
Verify:
1. The experimental feature is enabled: `ipfs config Experimental.Libp2pStreamMounting`
2. The listener is active: `ipfs p2p ls`
3. Both peers can connect: `ipfs swarm connect /p2p/$PEER_ID`
### Persistent tunnel configuration
There is currently no way to define tunnels in the Kubo JSON config file. Use
`--foreground` mode with a systemd service for persistent tunnels. Support for
configuring tunnels via JSON config may be added in the future (see [kubo#5460](https://github.com/ipfs/kubo/issues/5460) - PRs welcome!).

97
go.mod
View File

@ -11,35 +11,34 @@ require (
github.com/cenkalti/backoff/v4 v4.3.0
github.com/ceramicnetwork/go-dag-jose v0.1.1
github.com/cheggaaa/pb v1.0.29
github.com/cockroachdb/pebble/v2 v2.1.2
github.com/coreos/go-systemd/v22 v22.5.0
github.com/cockroachdb/pebble/v2 v2.1.4
github.com/coreos/go-systemd/v22 v22.7.0
github.com/dustin/go-humanize v1.0.1
github.com/elgris/jsondiff v0.0.0-20160530203242-765b5c24c302
github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5
github.com/filecoin-project/go-clock v0.1.0
github.com/fsnotify/fsnotify v1.7.0
github.com/fsnotify/fsnotify v1.9.0
github.com/google/uuid v1.6.0
github.com/hashicorp/go-version v1.7.0
github.com/hashicorp/go-version v1.8.0
github.com/ipfs-shipyard/nopfs v0.0.14
github.com/ipfs-shipyard/nopfs/ipfs v0.25.0
github.com/ipfs/boxo v0.35.3-0.20251203042105-52a05615b40f
github.com/ipfs/boxo v0.36.1-0.20260205181019-5060c8dbda85
github.com/ipfs/go-block-format v0.2.3
github.com/ipfs/go-cid v0.6.0
github.com/ipfs/go-cidutil v0.1.0
github.com/ipfs/go-datastore v0.9.0
github.com/ipfs/go-detect-race v0.0.1
github.com/ipfs/go-ds-badger v0.3.4
github.com/ipfs/go-ds-flatfs v0.5.5
github.com/ipfs/go-ds-flatfs v0.6.0
github.com/ipfs/go-ds-leveldb v0.5.2
github.com/ipfs/go-ds-measure v0.2.2
github.com/ipfs/go-ds-pebble v0.5.7
github.com/ipfs/go-ds-pebble v0.5.9
github.com/ipfs/go-fs-lock v0.1.1
github.com/ipfs/go-ipfs-cmds v0.15.0
github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260204204540-af9bcbaf5709
github.com/ipfs/go-ipld-cbor v0.2.1
github.com/ipfs/go-ipld-format v0.6.3
github.com/ipfs/go-ipld-git v0.1.1
github.com/ipfs/go-ipld-legacy v0.2.2
github.com/ipfs/go-log/v2 v2.9.0
github.com/ipfs/go-log/v2 v2.9.1
github.com/ipfs/go-metrics-interface v0.3.0
github.com/ipfs/go-metrics-prometheus v0.1.0
github.com/ipfs/go-test v0.2.3
@ -47,23 +46,23 @@ require (
github.com/ipld/go-car/v2 v2.16.0
github.com/ipld/go-codec-dagpb v1.7.0
github.com/ipld/go-ipld-prime v0.21.0
github.com/ipshipyard/p2p-forge v0.6.1
github.com/ipshipyard/p2p-forge v0.7.0
github.com/jbenet/go-temp-err-catcher v0.1.0
github.com/julienschmidt/httprouter v1.3.0
github.com/libp2p/go-doh-resolver v0.5.0
github.com/libp2p/go-libp2p v0.45.0
github.com/libp2p/go-libp2p v0.47.0
github.com/libp2p/go-libp2p-http v0.5.0
github.com/libp2p/go-libp2p-kad-dht v0.36.0
github.com/libp2p/go-libp2p-kad-dht v0.37.1
github.com/libp2p/go-libp2p-kbucket v0.8.0
github.com/libp2p/go-libp2p-pubsub v0.14.2
github.com/libp2p/go-libp2p-pubsub v0.15.0
github.com/libp2p/go-libp2p-pubsub-router v0.6.0
github.com/libp2p/go-libp2p-record v0.3.1
github.com/libp2p/go-libp2p-routing-helpers v0.7.5
github.com/libp2p/go-libp2p-testing v0.12.0
github.com/libp2p/go-socket-activation v0.1.1
github.com/miekg/dns v1.1.68
github.com/miekg/dns v1.1.72
github.com/multiformats/go-multiaddr v0.16.1
github.com/multiformats/go-multiaddr-dns v0.4.1
github.com/multiformats/go-multiaddr-dns v0.5.0
github.com/multiformats/go-multibase v0.2.0
github.com/multiformats/go-multicodec v0.10.0
github.com/multiformats/go-multihash v0.2.3
@ -73,34 +72,34 @@ require (
github.com/prometheus/client_golang v1.23.2
github.com/stretchr/testify v1.11.1
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d
github.com/tidwall/gjson v1.16.0
github.com/tidwall/gjson v1.18.0
github.com/tidwall/sjson v1.2.5
github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7
go.opencensus.io v0.24.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0
go.opentelemetry.io/contrib/propagators/autoprop v0.46.1
go.opentelemetry.io/otel v1.38.0
go.opentelemetry.io/otel v1.39.0
go.opentelemetry.io/otel/exporters/prometheus v0.56.0
go.opentelemetry.io/otel/sdk v1.38.0
go.opentelemetry.io/otel/sdk/metric v1.38.0
go.opentelemetry.io/otel/trace v1.38.0
go.opentelemetry.io/otel/trace v1.39.0
go.uber.org/dig v1.19.0
go.uber.org/fx v1.24.0
go.uber.org/zap v1.27.0
golang.org/x/crypto v0.45.0
golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39
golang.org/x/mod v0.30.0
golang.org/x/sync v0.18.0
golang.org/x/sys v0.38.0
google.golang.org/protobuf v1.36.10
go.uber.org/zap v1.27.1
golang.org/x/crypto v0.47.0
golang.org/x/exp v0.0.0-20260112195511-716be5621a96
golang.org/x/mod v0.32.0
golang.org/x/sync v0.19.0
golang.org/x/sys v0.40.0
google.golang.org/protobuf v1.36.11
)
require (
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect
github.com/DataDog/zstd v1.5.7 // indirect
github.com/Jorropo/jsync v1.0.1 // indirect
github.com/RaduBerinde/axisds v0.0.0-20250419182453-5135a0650657 // indirect
github.com/RaduBerinde/axisds v0.1.0 // indirect
github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 // indirect
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 // indirect
@ -114,7 +113,7 @@ require (
github.com/cockroachdb/errors v1.11.3 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 // indirect
github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf // indirect
github.com/cskr/pubsub v1.0.2 // indirect
@ -124,11 +123,12 @@ require (
github.com/dgraph-io/badger v1.6.2 // indirect
github.com/dgraph-io/ristretto v0.0.2 // indirect
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect
github.com/dunglas/httpsfv v1.1.0 // indirect
github.com/fatih/color v1.15.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/filecoin-project/go-clock v0.1.0 // indirect
github.com/flynn/noise v1.1.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/gabriel-vasile/mimetype v1.4.10 // indirect
github.com/gabriel-vasile/mimetype v1.4.12 // indirect
github.com/gammazero/chanqueue v1.1.1 // indirect
github.com/gammazero/deque v1.2.0 // indirect
github.com/getsentry/sentry-go v0.27.0 // indirect
@ -151,11 +151,11 @@ require (
github.com/huin/goupnp v1.3.0 // indirect
github.com/ipfs/bbloom v0.0.4 // indirect
github.com/ipfs/go-bitfield v1.1.0 // indirect
github.com/ipfs/go-dsqueue v0.1.1 // indirect
github.com/ipfs/go-dsqueue v0.1.2 // indirect
github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect
github.com/ipfs/go-ipfs-pq v0.0.3 // indirect
github.com/ipfs/go-ipfs-pq v0.0.4 // indirect
github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect
github.com/ipfs/go-peertaskqueue v0.8.2 // indirect
github.com/ipfs/go-peertaskqueue v0.8.3 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
@ -170,7 +170,7 @@ require (
github.com/libp2p/go-libp2p-gostream v0.6.0 // indirect
github.com/libp2p/go-libp2p-xor v0.1.0 // indirect
github.com/libp2p/go-msgio v0.3.0 // indirect
github.com/libp2p/go-netroute v0.3.0 // indirect
github.com/libp2p/go-netroute v0.4.0 // indirect
github.com/libp2p/go-reuseport v0.4.0 // indirect
github.com/libp2p/go-yamux/v5 v5.0.1 // indirect
github.com/libp2p/zeroconf/v2 v2.2.0 // indirect
@ -220,9 +220,9 @@ require (
github.com/prometheus/common v0.66.1 // indirect
github.com/prometheus/procfs v0.17.0 // indirect
github.com/prometheus/statsd_exporter v0.27.1 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.55.0 // indirect
github.com/quic-go/webtransport-go v0.9.0 // indirect
github.com/quic-go/qpack v0.6.0 // indirect
github.com/quic-go/quic-go v0.59.0 // indirect
github.com/quic-go/webtransport-go v0.10.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
github.com/rs/cors v1.11.1 // indirect
@ -249,22 +249,22 @@ require (
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/zipkin v1.38.0 // indirect
go.opentelemetry.io/otel/metric v1.38.0 // indirect
go.opentelemetry.io/otel/metric v1.39.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
go.uber.org/mock v0.5.2 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap/exp v0.3.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/oauth2 v0.33.0 // indirect
golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect
golang.org/x/term v0.37.0 // indirect
golang.org/x/text v0.31.0 // indirect
golang.org/x/net v0.49.0 // indirect
golang.org/x/oauth2 v0.34.0 // indirect
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 // indirect
golang.org/x/term v0.39.0 // indirect
golang.org/x/text v0.33.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/tools v0.39.0 // indirect
golang.org/x/tools v0.41.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
gonum.org/v1/gonum v0.16.0 // indirect
gonum.org/v1/gonum v0.17.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect
google.golang.org/grpc v1.75.0 // indirect
@ -272,3 +272,10 @@ require (
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.4.1 // indirect
)
// Exclude ancient +incompatible versions that confuse Dependabot.
// These pre-Go-modules versions reference packages that no longer exist.
exclude (
github.com/ipfs/go-ipfs-cmds v2.0.1+incompatible
github.com/libp2p/go-libp2p v6.0.23+incompatible
)

274
go.sum
View File

@ -1,9 +1,7 @@
bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc h1:utDghgcjE8u+EBjHOgYT+dJPcnDF05KqWMBcjuJy510=
bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
@ -36,12 +34,7 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg=
contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ=
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M=
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
@ -53,8 +46,8 @@ github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU=
github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ=
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/RaduBerinde/axisds v0.0.0-20250419182453-5135a0650657 h1:8XBWWQD+vFF+JqOsm16t0Kab1a7YWV8+GISVEP8AuZ8=
github.com/RaduBerinde/axisds v0.0.0-20250419182453-5135a0650657/go.mod h1:UHGJonU9z4YYGKJxSaC6/TNcLOBptpmM5m2Cksbnw0Y=
github.com/RaduBerinde/axisds v0.1.0 h1:YItk/RmU5nvlsv/awo2Fjx97Mfpt4JfgtEVAGPrLdz8=
github.com/RaduBerinde/axisds v0.1.0/go.mod h1:UHGJonU9z4YYGKJxSaC6/TNcLOBptpmM5m2Cksbnw0Y=
github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 h1:bsU8Tzxr/PNz75ayvCnxKZWEYdLMPDkUgticP4a4Bvk=
github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54/go.mod h1:0tr7FllbE9gJkHq7CVeeDDFAFKQVy5RnCSSNBOvdqbc=
github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f h1:JjxwchlOepwsUWcQwD2mLUAGE9aCp0/ehy6yCHFBOvo=
@ -81,7 +74,6 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
@ -91,7 +83,6 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/caddyserver/certmagic v0.23.0 h1:CfpZ/50jMfG4+1J/u2LV6piJq4HOfO6ppOnOf7DkFEU=
github.com/caddyserver/certmagic v0.23.0/go.mod h1:9mEZIWqqWoI+Gf+4Trh04MOVPD0tGSxtqsxg87hAIH4=
github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+YTAyA=
@ -126,21 +117,20 @@ github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZe
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA=
github.com/cockroachdb/pebble/v2 v2.1.2 h1:IwYt+Y2Cdw6egblwk1kWzdmJvD2680t5VK/3i0BJ6IA=
github.com/cockroachdb/pebble/v2 v2.1.2/go.mod h1:Aza05DCCc05ghIJZkB4Q/axv/JK9wx5cFwWcnhG0eGw=
github.com/cockroachdb/pebble/v2 v2.1.4 h1:j9wPgMDbkErFdAKYFGhsoCcvzcjR+6zrJ4jhKtJ6bOk=
github.com/cockroachdb/pebble/v2 v2.1.4/go.mod h1:Reo1RTniv1UjVTAu/Fv74y5i3kJ5gmVrPhO9UtFiKn8=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 h1:Nua446ru3juLHLZd4AwKNzClZgL1co3pUPGv3o8FlcA=
github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg=
github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b h1:VXvSNzmr8hMj8XTuY0PT9Ane9qZGul/p67vGYwl9BFI=
github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/go-systemd/v22 v22.7.0 h1:LAEzFkke61DFROc7zNLX/WA2i5J8gYqe0rSj9KI28KA=
github.com/coreos/go-systemd/v22 v22.7.0/go.mod h1:xNUYtjHu2EDXbsxz1i41wouACIwT7Ybq9o0BQhMwD0w=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf h1:dwGgBWn84wUS1pVikGiruW+x5XM4amhjaZO20vCjay4=
@ -167,6 +157,8 @@ github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70d
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dunglas/httpsfv v1.1.0 h1:Jw76nAyKWKZKFrpMMcL76y35tOpYHqQPzHQiwDvpe54=
github.com/dunglas/httpsfv v1.1.0/go.mod h1:zID2mqw9mFsnt7YC3vYQ9/cjq30q41W+1AnDwH8TiMg=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
@ -189,18 +181,16 @@ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjr
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0=
github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/gabriel-vasile/mimetype v1.4.12 h1:e9hWvmLYvtp846tLHam2o++qitpguFiYCKbn0w9jyqw=
github.com/gabriel-vasile/mimetype v1.4.12/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/gammazero/chanqueue v1.1.1 h1:n9Y+zbBxw2f7uUE9wpgs0rOSkP/I/yhDLiNuhyVjojQ=
github.com/gammazero/chanqueue v1.1.1/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc=
github.com/gammazero/deque v1.2.0 h1:scEFO8Uidhw6KDU5qg1HA5fYwM0+us2qdeJqm43bitU=
@ -209,10 +199,7 @@ github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM=
github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
@ -240,7 +227,6 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
@ -253,7 +239,6 @@ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4er
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
@ -298,8 +283,6 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
@ -318,8 +301,6 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@ -329,16 +310,14 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
github.com/guillaumemichel/reservedpool v0.3.0 h1:eqqO/QvTllLBrit7LVtVJBqw4cD0WdV9ajUe7WNTajw=
github.com/guillaumemichel/reservedpool v0.3.0/go.mod h1:sXSDIaef81TFdAJglsCFCMfgF5E5Z5xK1tFhjDhvbUc=
github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU=
github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48=
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4=
github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
@ -358,8 +337,8 @@ github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 h1:OqNqsGZPX8zh3eFMO8Lf8EHRRnSGBMqcd
github.com/ipfs-shipyard/nopfs/ipfs v0.25.0/go.mod h1:BxhUdtBgOXg1B+gAPEplkg/GpyTZY+kCMSfsJvvydqU=
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
github.com/ipfs/boxo v0.35.3-0.20251203042105-52a05615b40f h1:QaTdH/7Bi3wPYtwRs9b7cKguT2h2al1ZLYE+ziFYqEg=
github.com/ipfs/boxo v0.35.3-0.20251203042105-52a05615b40f/go.mod h1:Abmp1if6bMQG87/0SQPIB9fkxJnZMLCt2nQw3yUZHH0=
github.com/ipfs/boxo v0.36.1-0.20260205181019-5060c8dbda85 h1:zBxLtBiJxVTdntQZUogxjjy4FIjMnKdWGXSTR104AnY=
github.com/ipfs/boxo v0.36.1-0.20260205181019-5060c8dbda85/go.mod h1:92hnRXfP5ScKEIqlq9Ns7LR1dFXEVADKWVGH0fjk83k=
github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA=
github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU=
github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk=
@ -381,28 +360,28 @@ github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46U
github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk=
github.com/ipfs/go-ds-badger v0.3.4 h1:MmqFicftE0KrwMC77WjXTrPuoUxhwyFsjKONSeWrlOo=
github.com/ipfs/go-ds-badger v0.3.4/go.mod h1:HfqsKJcNnIr9ZhZ+rkwS1J5PpaWjJjg6Ipmxd7KPfZ8=
github.com/ipfs/go-ds-flatfs v0.5.5 h1:lkx5C99pFBMI7T1sYF7y3v7xIYekNVNMp/95Gm9Y3tY=
github.com/ipfs/go-ds-flatfs v0.5.5/go.mod h1:bM7+m7KFUyv5dp3RBKTr3+OHgZ6h8ydCQkO7tjeO9Z4=
github.com/ipfs/go-ds-flatfs v0.6.0 h1:olAEnDNBK1VMoTRZvfzgo90H5kBP4qIZPpYMtNlBBws=
github.com/ipfs/go-ds-flatfs v0.6.0/go.mod h1:p8a/YhmAFYyuonxDbvuIANlDCgS69uqVv+iH5f8fAxY=
github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8=
github.com/ipfs/go-ds-leveldb v0.5.2 h1:6nmxlQ2zbp4LCNdJVsmHfs9GP0eylfBNxpmY1csp0x0=
github.com/ipfs/go-ds-leveldb v0.5.2/go.mod h1:2fAwmcvD3WoRT72PzEekHBkQmBDhc39DJGoREiuGmYo=
github.com/ipfs/go-ds-measure v0.2.2 h1:4kwvBGbbSXNYe4ANlg7qTIYoZU6mNlqzQHdVqICkqGI=
github.com/ipfs/go-ds-measure v0.2.2/go.mod h1:b/87ak0jMgH9Ylt7oH0+XGy4P8jHx9KG09Qz+pOeTIs=
github.com/ipfs/go-ds-pebble v0.5.7 h1:4PQI46y3fjjxUTgHwYqcOVyoxiU6v1sqN6ONeRXGQTM=
github.com/ipfs/go-ds-pebble v0.5.7/go.mod h1:rsIgXE2qN+VfHKBin2cOOGFTZ/Agor6i8wBWA6ihbr0=
github.com/ipfs/go-dsqueue v0.1.1 h1:6PQlHDyf9PSTN69NmwUir5+0is3tU0vRJj8zLlgK8Mc=
github.com/ipfs/go-dsqueue v0.1.1/go.mod h1:Xxg353WSwwzYn3FGSzZ+taSQII3pIZ+EJC8/oWRDM10=
github.com/ipfs/go-ds-pebble v0.5.9 h1:D1FEuMxjbEmDADNqsyT74n9QHVAn12nv9i9Qa15AFYc=
github.com/ipfs/go-ds-pebble v0.5.9/go.mod h1:XmUBN05l6B+tMg7mpMS75ZcKW/CX01uZMhhWw85imQA=
github.com/ipfs/go-dsqueue v0.1.2 h1:jBMsgvT9Pj9l3cqI0m5jYpW/aWDYkW4Us6EuzrcSGbs=
github.com/ipfs/go-dsqueue v0.1.2/go.mod h1:OU94YuMVUIF/ctR7Ysov9PI4gOa2XjPGN9nd8imSv78=
github.com/ipfs/go-fs-lock v0.1.1 h1:TecsP/Uc7WqYYatasreZQiP9EGRy4ZnKoG4yXxR33nw=
github.com/ipfs/go-fs-lock v0.1.1/go.mod h1:2goSXMCw7QfscHmSe09oXiR34DQeUdm+ei+dhonqly0=
github.com/ipfs/go-ipfs-cmds v0.15.0 h1:nQDgKadrzyiFyYoZMARMIoVoSwe3gGTAfGvrWLeAQbQ=
github.com/ipfs/go-ipfs-cmds v0.15.0/go.mod h1:VABf/mv/wqvYX6hLG6Z+40eNAEw3FQO0bSm370Or3Wk=
github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260204204540-af9bcbaf5709 h1:0JiurWPnR7ZtjYW8XdfThOcOU5WlVVGQ1JY4FHHgyu8=
github.com/ipfs/go-ipfs-cmds v0.15.1-0.20260204204540-af9bcbaf5709/go.mod h1:yZeTCte5zTH66bbEpLPkSog3/ImppCD00DMP7NjYmys=
github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ=
github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
github.com/ipfs/go-ipfs-ds-help v1.1.1 h1:B5UJOH52IbcfS56+Ul+sv8jnIV10lbjLF5eOO0C66Nw=
github.com/ipfs/go-ipfs-ds-help v1.1.1/go.mod h1:75vrVCkSdSFidJscs8n4W+77AtTpCIAdDGAwjitJMIo=
github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE=
github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4=
github.com/ipfs/go-ipfs-pq v0.0.4 h1:U7jjENWJd1jhcrR8X/xHTaph14PTAK9O+yaLJbjqgOw=
github.com/ipfs/go-ipfs-pq v0.0.4/go.mod h1:9UdLOIIb99IFrgT0Fc53pvbvlJBhpUb4GJuAQf3+O2A=
github.com/ipfs/go-ipfs-redirects-file v0.1.2 h1:QCK7VtL91FH17KROVVy5KrzDx2hu68QvB2FTWk08ZQk=
github.com/ipfs/go-ipfs-redirects-file v0.1.2/go.mod h1:yIiTlLcDEM/8lS6T3FlCEXZktPPqSOyuY6dEzVqw7Fw=
github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc=
@ -416,14 +395,14 @@ github.com/ipfs/go-ipld-git v0.1.1/go.mod h1:+VyMqF5lMcJh4rwEppV0e6g4nCCHXThLYYD
github.com/ipfs/go-ipld-legacy v0.2.2 h1:DThbqCPVLpWBcGtU23KDLiY2YRZZnTkXQyfz8aOfBkQ=
github.com/ipfs/go-ipld-legacy v0.2.2/go.mod h1:hhkj+b3kG9b2BcUNw8IFYAsfeNo8E3U7eYlWeAOPyDU=
github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
github.com/ipfs/go-log/v2 v2.9.0 h1:l4b06AwVXwldIzbVPZy5z7sKp9lHFTX0KWfTBCtHaOk=
github.com/ipfs/go-log/v2 v2.9.0/go.mod h1:UhIYAwMV7Nb4ZmihUxfIRM2Istw/y9cAk3xaK+4Zs2c=
github.com/ipfs/go-log/v2 v2.9.1 h1:3JXwHWU31dsCpvQ+7asz6/QsFJHqFr4gLgQ0FWteujk=
github.com/ipfs/go-log/v2 v2.9.1/go.mod h1:evFx7sBiohUN3AG12mXlZBw5hacBQld3ZPHrowlJYoo=
github.com/ipfs/go-metrics-interface v0.3.0 h1:YwG7/Cy4R94mYDUuwsBfeziJCVm9pBMJ6q/JR9V40TU=
github.com/ipfs/go-metrics-interface v0.3.0/go.mod h1:OxxQjZDGocXVdyTPocns6cOLwHieqej/jos7H4POwoY=
github.com/ipfs/go-metrics-prometheus v0.1.0 h1:bApWOHkrH3VTBHzTHrZSfq4n4weOZDzZFxUXv+HyKcA=
github.com/ipfs/go-metrics-prometheus v0.1.0/go.mod h1:2GtL525C/4yxtvSXpRJ4dnE45mCX9AS0XRa03vHx7G0=
github.com/ipfs/go-peertaskqueue v0.8.2 h1:PaHFRaVFdxQk1Qo3OKiHPYjmmusQy7gKQUaL8JDszAU=
github.com/ipfs/go-peertaskqueue v0.8.2/go.mod h1:L6QPvou0346c2qPJNiJa6BvOibxDfaiPlqHInmzg0FA=
github.com/ipfs/go-peertaskqueue v0.8.3 h1:tBPpGJy+A92RqtRFq5amJn0Uuj8Pw8tXi0X3eHfHM8w=
github.com/ipfs/go-peertaskqueue v0.8.3/go.mod h1:OqVync4kPOcXEGdj/LKvox9DCB5mkSBeXsPczCxLtYA=
github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc=
github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o=
github.com/ipfs/go-unixfsnode v1.10.2 h1:TREegX1J4X+k1w4AhoDuxxFvVcS9SegMRvrmxF6Tca8=
@ -437,8 +416,8 @@ github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH
github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ=
github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20250821084354-a425e60cd714 h1:cqNk8PEwHnK0vqWln+U/YZhQc9h2NB3KjUjDPZo5Q2s=
github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20250821084354-a425e60cd714/go.mod h1:ZEUdra3CoqRVRYgAX/jAJO9aZGz6SKtKEG628fHHktY=
github.com/ipshipyard/p2p-forge v0.6.1 h1:987/hUC1YxI56CcMX6iTB+9BLjFV0d2SJnig9Z1pf8A=
github.com/ipshipyard/p2p-forge v0.6.1/go.mod h1:pj8Zcs+ex5OMq5a1bFLHqW0oL3qYO0v5eGLZmit0l7U=
github.com/ipshipyard/p2p-forge v0.7.0 h1:PQayexxZC1FR2Vx0XOSbmZ6wDPliidS48I+xXWuF+YU=
github.com/ipshipyard/p2p-forge v0.7.0/go.mod h1:i2wg0p7WmHGyo5vYaK9COZBp8BN5Drncfu3WoQNZlQY=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
@ -446,7 +425,6 @@ github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABo
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY=
github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
@ -484,7 +462,6 @@ github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
@ -504,8 +481,8 @@ github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZ
github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784=
github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo=
github.com/libp2p/go-libp2p v0.45.0 h1:Pdhr2HsFXaYjtfiNcBP4CcRUONvbMFdH3puM9vV4Tiw=
github.com/libp2p/go-libp2p v0.45.0/go.mod h1:NovCojezAt4dnDd4fH048K7PKEqH0UFYYqJRjIIu8zc=
github.com/libp2p/go-libp2p v0.47.0 h1:qQpBjSCWNQFF0hjBbKirMXE9RHLtSuzTDkTfr1rw0yc=
github.com/libp2p/go-libp2p v0.47.0/go.mod h1:s8HPh7mMV933OtXzONaGFseCg/BE//m1V34p3x4EUOY=
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g=
@ -514,14 +491,14 @@ github.com/libp2p/go-libp2p-gostream v0.6.0 h1:QfAiWeQRce6pqnYfmIVWJFXNdDyfiR/qk
github.com/libp2p/go-libp2p-gostream v0.6.0/go.mod h1:Nywu0gYZwfj7Jc91PQvbGU8dIpqbQQkjWgDuOrFaRdA=
github.com/libp2p/go-libp2p-http v0.5.0 h1:+x0AbLaUuLBArHubbbNRTsgWz0RjNTy6DJLOxQ3/QBc=
github.com/libp2p/go-libp2p-http v0.5.0/go.mod h1:glh87nZ35XCQyFsdzZps6+F4HYI6DctVFY5u1fehwSg=
github.com/libp2p/go-libp2p-kad-dht v0.36.0 h1:7QuXhV36+Vyj+L6A7mrYkn2sYLrbRcbjvsYDu/gXhn8=
github.com/libp2p/go-libp2p-kad-dht v0.36.0/go.mod h1:O24LxTH9Rt3I5XU8nmiA9VynS4TrTwAyj+zBJKB05vQ=
github.com/libp2p/go-libp2p-kad-dht v0.37.1 h1:jtX8bQIXVCs6/allskNB4m5n95Xvwav7wHAhopGZfS0=
github.com/libp2p/go-libp2p-kad-dht v0.37.1/go.mod h1:Uwokdh232k9Y1uMy2yJOK5zb7hpMHn4P8uWS4s9i05Q=
github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio=
github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s=
github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4=
github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs=
github.com/libp2p/go-libp2p-pubsub v0.14.2 h1:nT5lFHPQOFJcp9CW8hpKtvbpQNdl2udJuzLQWbgRum8=
github.com/libp2p/go-libp2p-pubsub v0.14.2/go.mod h1:MKPU5vMI8RRFyTP0HfdsF9cLmL1nHAeJm44AxJGJx44=
github.com/libp2p/go-libp2p-pubsub v0.15.0 h1:cG7Cng2BT82WttmPFMi50gDNV+58K626m/wR00vGL1o=
github.com/libp2p/go-libp2p-pubsub v0.15.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4=
github.com/libp2p/go-libp2p-pubsub-router v0.6.0 h1:D30iKdlqDt5ZmLEYhHELCMRj8b4sFAqrUcshIUvVP/s=
github.com/libp2p/go-libp2p-pubsub-router v0.6.0/go.mod h1:FY/q0/RBTKsLA7l4vqC2cbRbOvyDotg8PJQ7j8FDudE=
github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg=
@ -535,8 +512,8 @@ github.com/libp2p/go-libp2p-xor v0.1.0/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQ
github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ=
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
github.com/libp2p/go-netroute v0.3.0 h1:nqPCXHmeNmgTJnktosJ/sIef9hvwYCrsLxXmfNks/oc=
github.com/libp2p/go-netroute v0.3.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA=
github.com/libp2p/go-netroute v0.4.0 h1:sZZx9hyANYUx9PZyqcgE/E1GUG3iEtTZHUEvdtXT7/Q=
github.com/libp2p/go-netroute v0.4.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA=
github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
@ -547,12 +524,10 @@ github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfY
github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q=
github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/marcopolo/simnet v0.0.1 h1:rSMslhPz6q9IvJeFWDoMGxMIrlsbXau3NkuIXHGJxfg=
github.com/marcopolo/simnet v0.0.1/go.mod h1:WDaQkgLAjqDUEBAOXz22+1j6wXKfGlC5sD5XWt3ddOs=
github.com/marcopolo/simnet v0.0.4 h1:50Kx4hS9kFGSRIbrt9xUS3NJX33EyPqHVmpXvaKLqrY=
github.com/marcopolo/simnet v0.0.4/go.mod h1:tfQF1u2DmaB6WHODMtQaLtClEf3a296CKQLq5gAsIS0=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
@ -575,10 +550,9 @@ github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1f
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/mholt/acmez/v3 v3.1.2 h1:auob8J/0FhmdClQicvJvuDavgd5ezwLBfKuYmynhYzc=
github.com/mholt/acmez/v3 v3.1.2/go.mod h1:L1wOU06KKvq7tswuMDwKdcHeKpFFgkppZy/y0DFxagQ=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA=
github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps=
github.com/miekg/dns v1.1.72 h1:vhmr+TF2A3tuoGNkLDFK9zi36F2LS+hKTRW0Uf8kbzI=
github.com/miekg/dns v1.1.72/go.mod h1:+EuEPhdHOsfk6Wk5TT2CzssZdqkmFhf8r+aVyDEToIs=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
@ -617,8 +591,8 @@ github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU
github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4=
github.com/multiformats/go-multiaddr v0.16.1 h1:fgJ0Pitow+wWXzN9do+1b8Pyjmo8m5WhGfzpL82MpCw=
github.com/multiformats/go-multiaddr v0.16.1/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
github.com/multiformats/go-multiaddr-dns v0.5.0 h1:p/FTyHKX0nl59f+S+dEUe8HRK+i5Ow/QHMw8Nh3gPCo=
github.com/multiformats/go-multiaddr-dns v0.5.0/go.mod h1:yJ349b8TPIAANUyuOzn1oz9o22tV9f+06L+cCeMxC14=
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ=
@ -647,8 +621,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
@ -669,7 +641,6 @@ github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg=
github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
@ -734,7 +705,6 @@ github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4
github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw=
github.com/probe-lab/go-libdht v0.4.0 h1:LAqHuko/owRW6+0cs5wmJXbHzg09EUMJEh5DI37yXqo=
github.com/probe-lab/go-libdht v0.4.0/go.mod h1:hamw22kI6YkPQFGy5P6BrWWDrgE9ety5Si8iWAyuDvc=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
@ -750,7 +720,6 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
@ -759,7 +728,6 @@ github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJ
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
@ -771,12 +739,12 @@ github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUO
github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI=
github.com/prometheus/statsd_exporter v0.27.1 h1:tcRJOmwlA83HPfWzosAgr2+zEN5XDFv+M2mn/uYkn5Y=
github.com/prometheus/statsd_exporter v0.27.1/go.mod h1:vA6ryDfsN7py/3JApEst6nLTJboq66XsNcJGNmC88NQ=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.55.0 h1:zccPQIqYCXDt5NmcEabyYvOnomjs8Tlwl7tISjJh9Mk=
github.com/quic-go/quic-go v0.55.0/go.mod h1:DR51ilwU1uE164KuWXhinFcKWGlEjzys2l8zUl5Ss1U=
github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70=
github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao=
github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII=
github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw=
github.com/quic-go/quic-go v0.59.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU=
github.com/quic-go/webtransport-go v0.10.0 h1:LqXXPOXuETY5Xe8ITdGisBzTYmUOy5eSj+9n4hLTjHI=
github.com/quic-go/webtransport-go v0.10.0/go.mod h1:LeGIXr5BQKE3UsynwVBeQrU1TPrbh73MGoC6jd+V7ow=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@ -789,30 +757,7 @@ github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
@ -825,8 +770,6 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9
github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY=
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
@ -862,12 +805,11 @@ github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxm
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/texttheater/golang-levenshtein v1.0.1 h1:+cRNoVrfiwufQPhoMzB6N0Yf/Mqajr6t1lOv8GyGE2U=
github.com/texttheater/golang-levenshtein v1.0.1/go.mod h1:PYAKrbF5sAiq9wd+H82hs7gNaen0CplQ9uvm6+enD/8=
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.16.0 h1:SyXa+dsSPpUlcwEDuKuEBJEz5vzTvOea+9rjyYodQFg=
github.com/tidwall/gjson v1.16.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
@ -881,8 +823,6 @@ github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpF
github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE=
github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s=
github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y=
@ -920,7 +860,6 @@ github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI=
github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE=
github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA=
@ -944,8 +883,8 @@ go.opentelemetry.io/contrib/propagators/jaeger v1.21.1 h1:f4beMGDKiVzg9IcX7/VuWV
go.opentelemetry.io/contrib/propagators/jaeger v1.21.1/go.mod h1:U9jhkEl8d1LL+QXY7q3kneJWJugiN3kZJV2OWz3hkBY=
go.opentelemetry.io/contrib/propagators/ot v1.21.1 h1:3TN5vkXjKYWp0YdMcnUEC/A+pBPvqz9V3nCS2xmcurk=
go.opentelemetry.io/contrib/propagators/ot v1.21.1/go.mod h1:oy0MYCbS/b3cqUDW37wBWtlwBIsutngS++Lklpgh+fc=
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
@ -958,14 +897,14 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE=
go.opentelemetry.io/otel/exporters/zipkin v1.38.0 h1:0rJ2TmzpHDG+Ib9gPmu3J3cE0zXirumQcKS4wCoZUa0=
go.opentelemetry.io/otel/exporters/zipkin v1.38.0/go.mod h1:Su/nq/K5zRjDKKC3Il0xbViE3juWgG3JDoqLumFx5G0=
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
@ -978,23 +917,19 @@ go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U=
go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@ -1007,8 +942,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -1019,11 +954,10 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39 h1:DHNhtq3sNNzrvduZZIiFyXWOL9IWaDPHqTnLJp+rCBY=
golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0=
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU=
golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -1044,19 +978,16 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@ -1097,20 +1028,17 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1125,19 +1053,17 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1194,10 +1120,10 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo=
golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 h1:O1cMQHRfwNpDfDJerqRoE2oD+AFlyid87D40L/OkkJo=
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2/go.mod h1:b7fPSJ0pKZ3ccUh8gnTONJxhn3c/PS6tyzQvyqw4iA8=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -1206,8 +1132,8 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1219,18 +1145,15 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -1278,8 +1201,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1287,11 +1210,8 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY=
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4=
gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@ -1309,18 +1229,12 @@ google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@ -1353,9 +1267,6 @@ google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -1385,8 +1296,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -1395,7 +1306,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8=
gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
@ -1410,8 +1320,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@ -1426,5 +1334,3 @@ pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=

View File

@ -39,6 +39,12 @@ To run this in your user session, save it as `~/.config/systemd/user/ipfs.servic
```
Read more about `--user` services here: [wiki.archlinux.org:Systemd ](https://wiki.archlinux.org/index.php/Systemd/User#Automatic_start-up_of_systemd_user_instances)
#### P2P tunnel services
For running `ipfs p2p listen` or `ipfs p2p forward` as systemd services,
see [docs/p2p-tunnels.md](../docs/p2p-tunnels.md) for examples using the
`--foreground` flag and path-based activation.
### initd
- Here is a full-featured sample service file: https://github.com/dylanPowers/ipfs-linux-service/blob/master/init.d/ipfs

View File

@ -41,40 +41,57 @@ define go-build
$(GOCC) build $(go-flags-with-tags) -o "$@" "$(1)"
endef
test_go_test: $$(DEPS_GO)
$(GOCC) test $(go-flags-with-tags) $(GOTFLAGS) ./...
.PHONY: test_go_test
# Only disable colors when running in CI (non-interactive terminal)
GOTESTSUM_NOCOLOR := $(if $(CI),--no-color,)
# Build all platforms from .github/build-platforms.yml
# Packages excluded from coverage (test code and examples are not production code)
COVERPKG_EXCLUDE := /(test|docs/examples)/
# Packages excluded from unit tests: coverage exclusions + client/rpc (tested by test_cli)
UNIT_EXCLUDE := /(test|docs/examples)/|/client/rpc$$
# Unit tests with coverage
# Produces JSON for CI reporting and coverage profile for Codecov
test_unit: test/bin/gotestsum $$(DEPS_GO)
mkdir -p test/unit coverage
rm -f test/unit/gotest.json coverage/unit_tests.coverprofile
gotestsum $(GOTESTSUM_NOCOLOR) --jsonfile test/unit/gotest.json -- $(go-flags-with-tags) $(GOTFLAGS) -covermode=atomic -coverprofile=coverage/unit_tests.coverprofile -coverpkg=$$($(GOCC) list $(go-tags) ./... | grep -vE '$(COVERPKG_EXCLUDE)' | tr '\n' ',' | sed 's/,$$//') $$($(GOCC) list $(go-tags) ./... | grep -vE '$(UNIT_EXCLUDE)')
.PHONY: test_unit
# CLI/integration tests (requires built binary in PATH)
# Includes test/cli, test/integration, and client/rpc
# Produces JSON for CI reporting
# Override TEST_CLI_TIMEOUT for local development: make test_cli TEST_CLI_TIMEOUT=5m
TEST_CLI_TIMEOUT ?= 10m
test_cli: cmd/ipfs/ipfs test/bin/gotestsum $$(DEPS_GO)
mkdir -p test/cli
rm -f test/cli/cli-tests.json
PATH="$(CURDIR)/cmd/ipfs:$(CURDIR)/test/bin:$$PATH" gotestsum $(GOTESTSUM_NOCOLOR) --jsonfile test/cli/cli-tests.json -- -v -timeout=$(TEST_CLI_TIMEOUT) ./test/cli/... ./test/integration/... ./client/rpc/...
.PHONY: test_cli
# Example tests (docs/examples/kubo-as-a-library)
# Tests against both published and current kubo versions
# Uses timeout to ensure CI gets output before job-level timeout kills everything
TEST_EXAMPLES_TIMEOUT ?= 2m
test_examples:
cd docs/examples/kubo-as-a-library && go test -v -timeout=$(TEST_EXAMPLES_TIMEOUT) ./... && cp go.mod go.mod.bak && cp go.sum go.sum.bak && (go mod edit -replace github.com/ipfs/kubo=./../../.. && go mod tidy && go test -v -timeout=$(TEST_EXAMPLES_TIMEOUT) ./...; ret=$$?; mv go.mod.bak go.mod; mv go.sum.bak go.sum; exit $$ret)
.PHONY: test_examples
# Build kubo for all platforms from .github/build-platforms.yml
test_go_build:
bin/test-go-build-platforms
.PHONY: test_go_build
test_go_short: GOTFLAGS += -test.short
test_go_short: test_go_test
.PHONY: test_go_short
test_go_race: GOTFLAGS += -race
test_go_race: test_go_test
.PHONY: test_go_race
test_go_expensive: test_go_test test_go_build
.PHONY: test_go_expensive
TEST_GO += test_go_expensive
# Check Go source formatting
test_go_fmt:
bin/test-go-fmt
.PHONY: test_go_fmt
TEST_GO += test_go_fmt
# Run golangci-lint (used by CI)
test_go_lint: test/bin/golangci-lint
golangci-lint run --timeout=3m ./...
.PHONY: test_go_lint
test_go: $(TEST_GO)
# Version check is no longer needed - go.mod enforces minimum version
.PHONY: check_go_version
TEST_GO := test_go_fmt test_unit test_cli test_examples
TEST += $(TEST_GO)
TEST_SHORT += test_go_fmt test_go_short
TEST_SHORT += test_go_fmt test_unit

View File

@ -20,6 +20,10 @@ type Listener interface {
// close closes the listener. Does not affect child streams
close()
// Done returns a channel that is closed when the listener is closed.
// This allows callers to detect when a listener has been removed.
Done() <-chan struct{}
}
// Listeners manages a group of Listener implementations,
@ -73,15 +77,13 @@ func (r *Listeners) Register(l Listener) error {
return nil
}
// Close removes and closes all listeners for which matchFunc returns true.
// Returns the number of listeners closed.
func (r *Listeners) Close(matchFunc func(listener Listener) bool) int {
todo := make([]Listener, 0)
var todo []Listener
r.Lock()
for _, l := range r.Listeners {
if !matchFunc(l) {
continue
}
if _, ok := r.Listeners[l.key()]; ok {
if matchFunc(l) {
delete(r.Listeners, l.key())
todo = append(todo, l)
}

View File

@ -23,6 +23,7 @@ type localListener struct {
peer peer.ID
listener manet.Listener
done chan struct{}
}
// ForwardLocal creates new P2P stream to a remote listener.
@ -32,6 +33,7 @@ func (p2p *P2P) ForwardLocal(ctx context.Context, peer peer.ID, proto protocol.I
p2p: p2p,
proto: proto,
peer: peer,
done: make(chan struct{}),
}
maListener, err := manet.Listen(bindAddr)
@ -98,6 +100,11 @@ func (l *localListener) setupStream(local manet.Conn) {
func (l *localListener) close() {
l.listener.Close()
close(l.done)
}
func (l *localListener) Done() <-chan struct{} {
return l.done
}
func (l *localListener) Protocol() protocol.ID {

View File

@ -25,6 +25,8 @@ type remoteListener struct {
// reportRemote if set to true makes the handler send '<base58 remote peerid>\n'
// to target before any data is forwarded
reportRemote bool
done chan struct{}
}
// ForwardRemote creates new p2p listener.
@ -36,6 +38,7 @@ func (p2p *P2P) ForwardRemote(ctx context.Context, proto protocol.ID, addr ma.Mu
addr: addr,
reportRemote: reportRemote,
done: make(chan struct{}),
}
if err := p2p.ListenersP2P.Register(listener); err != nil {
@ -99,7 +102,13 @@ func (l *remoteListener) TargetAddress() ma.Multiaddr {
return l.addr
}
func (l *remoteListener) close() {}
func (l *remoteListener) close() {
close(l.done)
}
func (l *remoteListener) Done() <-chan struct{} {
return l.done
}
func (l *remoteListener) key() protocol.ID {
return l.proto

View File

@ -31,6 +31,13 @@ import (
var log = logging.Logger("routing/delegated")
// Parse creates a composed router from the custom routing configuration.
//
// EXPERIMENTAL: Custom routing (Routing.Type=custom with Routing.Routers and
// Routing.Methods) is for research and testing only, not production use.
// The configuration format and behavior may change without notice between
// releases. HTTP-only configurations cannot reliably provide content.
// See docs/delegated-routing.md for limitations.
func Parse(routers config.Routers, methods config.Methods, extraDHT *ExtraDHTParams, extraHTTP *ExtraHTTPParams) (routing.Routing, error) {
if err := methods.Check(); err != nil {
return nil, err

View File

@ -8,7 +8,6 @@ import (
"testing"
"time"
"github.com/dustin/go-humanize"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/ipfs/kubo/test/cli/testutils"
@ -40,11 +39,6 @@ func TestAdd(t *testing.T) {
shortStringCidV1Sha512 = "bafkrgqbqt3gerhas23vuzrapkdeqf4vu2dwxp3srdj6hvg6nhsug2tgyn6mj3u23yx7utftq3i2ckw2fwdh5qmhid5qf3t35yvkc5e5ottlw6"
)
const (
cidV0Length = 34 // cidv0 sha2-256
cidV1Length = 36 // cidv1 sha2-256
)
t.Run("produced cid version: implicit default (CIDv0)", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
@ -166,7 +160,7 @@ func TestAdd(t *testing.T) {
//
// UnixFSChunker=size-262144 (256KiB)
// Import.UnixFSFileMaxLinks=174
node := harness.NewT(t).NewNode().Init("--profile=legacy-cid-v0") // legacy-cid-v0 for determinism across all params
node := harness.NewT(t).NewNode().Init("--profile=unixfs-v0-2015") // unixfs-v0-2015 for determinism across all params
node.UpdateConfig(func(cfg *config.Config) {
cfg.Import.UnixFSChunker = *config.NewOptionalString("size-262144") // 256 KiB chunks
cfg.Import.UnixFSFileMaxLinks = *config.NewOptionalInteger(174) // max 174 per level
@ -187,266 +181,243 @@ func TestAdd(t *testing.T) {
require.Equal(t, "QmbBftNHWmjSWKLC49dMVrfnY8pjrJYntiAXirFJ7oJrNk", cidStr)
})
t.Run("ipfs init --profile=legacy-cid-v0 sets config that produces legacy CIDv0", func(t *testing.T) {
// Profile-specific threshold tests are in cid_profiles_test.go (TestCIDProfiles).
// Tests here cover general ipfs add behavior not tied to specific profiles.
t.Run("ipfs add --hidden", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init("--profile=legacy-cid-v0")
node.StartDaemon()
defer node.StopDaemon()
cidStr := node.IPFSAddStr(shortString)
require.Equal(t, shortStringCidV0, cidStr)
})
// Helper to create test directory with hidden file
setupTestDir := func(t *testing.T, node *harness.Node) string {
testDir, err := os.MkdirTemp(node.Dir, "hidden-test")
require.NoError(t, err)
require.NoError(t, os.WriteFile(filepath.Join(testDir, "visible.txt"), []byte("visible"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(testDir, ".hidden"), []byte("hidden"), 0o644))
return testDir
}
t.Run("ipfs init --profile=legacy-cid-v0 applies UnixFSChunker=size-262144 and UnixFSFileMaxLinks", func(t *testing.T) {
t.Parallel()
seed := "v0-seed"
profile := "--profile=legacy-cid-v0"
t.Run("under UnixFSFileMaxLinks=174", func(t *testing.T) {
t.Run("default excludes hidden files", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Add 44544KiB file:
// 174 * 256KiB should fit in single DAG layer
cidStr := node.IPFSAddDeterministic("44544KiB", seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 174, len(root.Links))
// expect same CID every time
require.Equal(t, "QmUbBALi174SnogsUzLpYbD4xPiBSFANF4iztWCsHbMKh2", cidStr)
testDir := setupTestDir(t, node)
cidStr := node.IPFS("add", "-r", "-Q", testDir).Stdout.Trimmed()
lsOutput := node.IPFS("ls", cidStr).Stdout.Trimmed()
require.Contains(t, lsOutput, "visible.txt")
require.NotContains(t, lsOutput, ".hidden")
})
t.Run("above UnixFSFileMaxLinks=174", func(t *testing.T) {
t.Run("--hidden includes hidden files", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// add 256KiB (one more block), it should force rebalancing DAG and moving most to second layer
cidStr := node.IPFSAddDeterministic("44800KiB", seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 2, len(root.Links))
// expect same CID every time
require.Equal(t, "QmepeWtdmS1hHXx1oZXsPUv6bMrfRRKfZcoPPU4eEfjnbf", cidStr)
testDir := setupTestDir(t, node)
cidStr := node.IPFS("add", "-r", "-Q", "--hidden", testDir).Stdout.Trimmed()
lsOutput := node.IPFS("ls", cidStr).Stdout.Trimmed()
require.Contains(t, lsOutput, "visible.txt")
require.Contains(t, lsOutput, ".hidden")
})
t.Run("-H includes hidden files", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
testDir := setupTestDir(t, node)
cidStr := node.IPFS("add", "-r", "-Q", "-H", testDir).Stdout.Trimmed()
lsOutput := node.IPFS("ls", cidStr).Stdout.Trimmed()
require.Contains(t, lsOutput, "visible.txt")
require.Contains(t, lsOutput, ".hidden")
})
})
t.Run("ipfs init --profile=legacy-cid-v0 applies UnixFSHAMTDirectoryMaxFanout=256 and UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
t.Run("ipfs add --empty-dirs", func(t *testing.T) {
t.Parallel()
seed := "hamt-legacy-cid-v0"
profile := "--profile=legacy-cid-v0"
t.Run("under UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
// Helper to create test directory with empty subdirectory
setupTestDir := func(t *testing.T, node *harness.Node) string {
testDir, err := os.MkdirTemp(node.Dir, "empty-dirs-test")
require.NoError(t, err)
require.NoError(t, os.Mkdir(filepath.Join(testDir, "empty-subdir"), 0o755))
require.NoError(t, os.WriteFile(filepath.Join(testDir, "file.txt"), []byte("content"), 0o644))
return testDir
}
t.Run("default includes empty directories", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
randDir, err := os.MkdirTemp(node.Dir, seed)
require.NoError(t, err)
// Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total
err = createDirectoryForHAMT(randDir, cidV0Length, "255KiB", seed)
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
// Confirm the number of links is more than UnixFSHAMTDirectorySizeThreshold (indicating regular "basic" directory"
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 903, len(root.Links))
testDir := setupTestDir(t, node)
cidStr := node.IPFS("add", "-r", "-Q", testDir).Stdout.Trimmed()
require.Contains(t, node.IPFS("ls", cidStr).Stdout.Trimmed(), "empty-subdir")
})
t.Run("above UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
t.Run("--empty-dirs=true includes empty directories", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
randDir, err := os.MkdirTemp(node.Dir, seed)
require.NoError(t, err)
testDir := setupTestDir(t, node)
cidStr := node.IPFS("add", "-r", "-Q", "--empty-dirs=true", testDir).Stdout.Trimmed()
require.Contains(t, node.IPFS("ls", cidStr).Stdout.Trimmed(), "empty-subdir")
})
// Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total
err = createDirectoryForHAMT(randDir, cidV0Length, "257KiB", seed)
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
t.Run("--empty-dirs=false excludes empty directories", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Confirm this time, the number of links is less than UnixFSHAMTDirectorySizeThreshold
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 252, len(root.Links))
testDir := setupTestDir(t, node)
cidStr := node.IPFS("add", "-r", "-Q", "--empty-dirs=false", testDir).Stdout.Trimmed()
lsOutput := node.IPFS("ls", cidStr).Stdout.Trimmed()
require.NotContains(t, lsOutput, "empty-subdir")
require.Contains(t, lsOutput, "file.txt")
})
})
t.Run("ipfs init --profile=test-cid-v1 produces CIDv1 with raw leaves", func(t *testing.T) {
t.Run("ipfs add symlink handling", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init("--profile=test-cid-v1")
node.StartDaemon()
defer node.StopDaemon()
cidStr := node.IPFSAddStr(shortString)
require.Equal(t, shortStringCidV1, cidStr) // raw leaf
})
// Helper to create test directory structure:
// testDir/
// target.txt (file with "target content")
// link.txt -> target.txt (symlink at top level)
// subdir/
// subsubdir/
// nested-target.txt (file with "nested content")
// nested-link.txt -> nested-target.txt (symlink in sub-sub directory)
setupTestDir := func(t *testing.T, node *harness.Node) string {
testDir, err := os.MkdirTemp(node.Dir, "deref-symlinks-test")
require.NoError(t, err)
t.Run("ipfs init --profile=test-cid-v1 applies UnixFSChunker=size-1048576", func(t *testing.T) {
t.Parallel()
seed := "v1-seed"
profile := "--profile=test-cid-v1"
// Top-level file and symlink
targetFile := filepath.Join(testDir, "target.txt")
require.NoError(t, os.WriteFile(targetFile, []byte("target content"), 0o644))
require.NoError(t, os.Symlink("target.txt", filepath.Join(testDir, "link.txt")))
t.Run("under UnixFSFileMaxLinks=174", func(t *testing.T) {
// Nested file and symlink in sub-sub directory
subsubdir := filepath.Join(testDir, "subdir", "subsubdir")
require.NoError(t, os.MkdirAll(subsubdir, 0o755))
nestedTarget := filepath.Join(subsubdir, "nested-target.txt")
require.NoError(t, os.WriteFile(nestedTarget, []byte("nested content"), 0o644))
require.NoError(t, os.Symlink("nested-target.txt", filepath.Join(subsubdir, "nested-link.txt")))
return testDir
}
t.Run("default preserves symlinks", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Add 174MiB file:
// 174 * 1MiB should fit in single layer
cidStr := node.IPFSAddDeterministic("174MiB", seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 174, len(root.Links))
// expect same CID every time
require.Equal(t, "bafybeigwduxcf2aawppv3isnfeshnimkyplvw3hthxjhr2bdeje4tdaicu", cidStr)
testDir := setupTestDir(t, node)
// Add directory with symlink (default: preserve)
dirCID := node.IPFS("add", "-r", "-Q", testDir).Stdout.Trimmed()
// Get and verify symlinks are preserved
outDir, err := os.MkdirTemp(node.Dir, "symlink-get-out")
require.NoError(t, err)
node.IPFS("get", "-o", outDir, dirCID)
// Check top-level symlink is preserved
linkPath := filepath.Join(outDir, "link.txt")
fi, err := os.Lstat(linkPath)
require.NoError(t, err)
require.True(t, fi.Mode()&os.ModeSymlink != 0, "link.txt should be a symlink")
target, err := os.Readlink(linkPath)
require.NoError(t, err)
require.Equal(t, "target.txt", target)
// Check nested symlink is preserved
nestedLinkPath := filepath.Join(outDir, "subdir", "subsubdir", "nested-link.txt")
fi, err = os.Lstat(nestedLinkPath)
require.NoError(t, err)
require.True(t, fi.Mode()&os.ModeSymlink != 0, "nested-link.txt should be a symlink")
})
t.Run("above UnixFSFileMaxLinks=174", func(t *testing.T) {
// --dereference-args is deprecated but still works for backwards compatibility.
// It only resolves symlinks passed as CLI arguments, NOT symlinks found
// during directory traversal. Use --dereference-symlinks instead.
t.Run("--dereference-args resolves CLI args only", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
// add +1MiB (one more block), it should force rebalancing DAG and moving most to second layer
cidStr := node.IPFSAddDeterministic("175MiB", seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 2, len(root.Links))
// expect same CID every time
require.Equal(t, "bafybeidhd7lo2n2v7lta5yamob3xwhbxcczmmtmhquwhjesi35jntf7mpu", cidStr)
})
})
t.Run("ipfs init --profile=test-cid-v1 applies UnixFSHAMTDirectoryMaxFanout=256 and UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
t.Parallel()
seed := "hamt-cid-v1"
profile := "--profile=test-cid-v1"
t.Run("under UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
randDir, err := os.MkdirTemp(node.Dir, seed)
require.NoError(t, err)
testDir := setupTestDir(t, node)
symlinkPath := filepath.Join(testDir, "link.txt")
targetPath := filepath.Join(testDir, "target.txt")
// Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total
err = createDirectoryForHAMT(randDir, cidV1Length, "255KiB", seed)
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
symlinkCID := node.IPFS("add", "-Q", "--dereference-args", symlinkPath).Stdout.Trimmed()
targetCID := node.IPFS("add", "-Q", targetPath).Stdout.Trimmed()
// Confirm the number of links is more than UnixFSHAMTDirectoryMaxFanout (indicating regular "basic" directory"
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 897, len(root.Links))
// CIDs should match because --dereference-args resolves the symlink
require.Equal(t, targetCID, symlinkCID,
"--dereference-args should resolve CLI arg symlink to target content")
// Now add the directory recursively with --dereference-args
// Nested symlinks should NOT be resolved (only CLI args are resolved)
dirCID := node.IPFS("add", "-r", "-Q", "--dereference-args", testDir).Stdout.Trimmed()
outDir, err := os.MkdirTemp(node.Dir, "deref-args-out")
require.NoError(t, err)
node.IPFS("get", "-o", outDir, dirCID)
// Nested symlink should still be a symlink (not dereferenced)
nestedLinkPath := filepath.Join(outDir, "subdir", "subsubdir", "nested-link.txt")
fi, err := os.Lstat(nestedLinkPath)
require.NoError(t, err)
require.True(t, fi.Mode()&os.ModeSymlink != 0,
"--dereference-args should NOT resolve nested symlinks, only CLI args")
})
t.Run("above UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
// --dereference-symlinks resolves ALL symlinks: both CLI arguments AND
// symlinks found during directory traversal. This is a superset of
// the deprecated --dereference-args behavior.
t.Run("--dereference-symlinks resolves all symlinks", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
randDir, err := os.MkdirTemp(node.Dir, seed)
testDir := setupTestDir(t, node)
symlinkPath := filepath.Join(testDir, "link.txt")
targetPath := filepath.Join(testDir, "target.txt")
symlinkCID := node.IPFS("add", "-Q", "--dereference-symlinks", symlinkPath).Stdout.Trimmed()
targetCID := node.IPFS("add", "-Q", targetPath).Stdout.Trimmed()
require.Equal(t, targetCID, symlinkCID,
"--dereference-symlinks should resolve CLI arg symlink (like --dereference-args)")
// Test 2: Nested symlinks in sub-sub directory are ALSO resolved
dirCID := node.IPFS("add", "-r", "-Q", "--dereference-symlinks", testDir).Stdout.Trimmed()
outDir, err := os.MkdirTemp(node.Dir, "deref-symlinks-out")
require.NoError(t, err)
node.IPFS("get", "-o", outDir, dirCID)
// Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total
err = createDirectoryForHAMT(randDir, cidV1Length, "257KiB", seed)
// Top-level symlink should be dereferenced to regular file
linkPath := filepath.Join(outDir, "link.txt")
fi, err := os.Lstat(linkPath)
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
// Confirm this time, the number of links is less than UnixFSHAMTDirectoryMaxFanout
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 252, len(root.Links))
})
})
t.Run("ipfs init --profile=test-cid-v1-wide applies UnixFSChunker=size-1048576 and UnixFSFileMaxLinks=1024", func(t *testing.T) {
t.Parallel()
seed := "v1-seed-1024"
profile := "--profile=test-cid-v1-wide"
t.Run("under UnixFSFileMaxLinks=1024", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
// Add 174MiB file:
// 1024 * 1MiB should fit in single layer
cidStr := node.IPFSAddDeterministic("1024MiB", seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 1024, len(root.Links))
// expect same CID every time
require.Equal(t, "bafybeiej5w63ir64oxgkr5htqmlerh5k2rqflurn2howimexrlkae64xru", cidStr)
})
t.Run("above UnixFSFileMaxLinks=1024", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
// add +1MiB (one more block), it should force rebalancing DAG and moving most to second layer
cidStr := node.IPFSAddDeterministic("1025MiB", seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 2, len(root.Links))
// expect same CID every time
require.Equal(t, "bafybeieilp2qx24pe76hxrxe6bpef5meuxto3kj5dd6mhb5kplfeglskdm", cidStr)
})
})
t.Run("ipfs init --profile=test-cid-v1-wide applies UnixFSHAMTDirectoryMaxFanout=256 and UnixFSHAMTDirectorySizeThreshold=1MiB", func(t *testing.T) {
t.Parallel()
seed := "hamt-cid-v1"
profile := "--profile=test-cid-v1-wide"
t.Run("under UnixFSHAMTDirectorySizeThreshold=1MiB", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
randDir, err := os.MkdirTemp(node.Dir, seed)
require.False(t, fi.Mode()&os.ModeSymlink != 0,
"link.txt should be dereferenced to regular file")
content, err := os.ReadFile(linkPath)
require.NoError(t, err)
require.Equal(t, "target content", string(content))
// Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total
err = createDirectoryForHAMT(randDir, cidV1Length, "1023KiB", seed)
// Nested symlink in sub-sub directory should ALSO be dereferenced
nestedLinkPath := filepath.Join(outDir, "subdir", "subsubdir", "nested-link.txt")
fi, err = os.Lstat(nestedLinkPath)
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
// Confirm the number of links is more than UnixFSHAMTDirectoryMaxFanout (indicating regular "basic" directory"
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 3599, len(root.Links))
})
t.Run("above UnixFSHAMTDirectorySizeThreshold=1MiB", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
randDir, err := os.MkdirTemp(node.Dir, seed)
require.False(t, fi.Mode()&os.ModeSymlink != 0,
"nested-link.txt should be dereferenced (--dereference-symlinks resolves ALL symlinks)")
nestedContent, err := os.ReadFile(nestedLinkPath)
require.NoError(t, err)
// Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total
err = createDirectoryForHAMT(randDir, cidV1Length, "1025KiB", seed)
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
// Confirm this time, the number of links is less than UnixFSHAMTDirectoryMaxFanout
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 992, len(root.Links))
require.Equal(t, "nested content", string(nestedContent))
})
})
}
@ -627,30 +598,46 @@ func TestAddFastProvide(t *testing.T) {
})
}
// createDirectoryForHAMT aims to create enough files with long names for the directory block to be close to the UnixFSHAMTDirectorySizeThreshold.
// The calculation is based on boxo's HAMTShardingSize and sizeBelowThreshold which calculates ballpark size of the block
// by adding length of link names and the binary cid length.
// See https://github.com/ipfs/boxo/blob/6c5a07602aed248acc86598f30ab61923a54a83e/ipld/unixfs/io/directory.go#L491
func createDirectoryForHAMT(dirPath string, cidLength int, unixfsNodeSizeTarget, seed string) error {
hamtThreshold, err := humanize.ParseBytes(unixfsNodeSizeTarget)
if err != nil {
return err
}
// createDirectoryForHAMTLinksEstimation creates a directory with the specified number
// of files for testing links-based size estimation (size = sum of nameLen + cidLen).
// Used by legacy profiles (unixfs-v0-2015).
//
// The lastNameLen parameter allows the last file to have a different name length,
// enabling exact +1 byte threshold tests.
func createDirectoryForHAMTLinksEstimation(dirPath string, numFiles, nameLen, lastNameLen int, seed string) error {
return createDeterministicFiles(dirPath, numFiles, nameLen, lastNameLen, seed)
}
// Calculate how many files with long filenames are needed to hit UnixFSHAMTDirectorySizeThreshold
nameLen := 255 // max that works across windows/macos/linux
// createDirectoryForHAMTBlockEstimation creates a directory with the specified number
// of files for testing block-based size estimation (LinkSerializedSize with protobuf overhead).
// Used by modern profiles (unixfs-v1-2025).
//
// The lastNameLen parameter allows the last file to have a different name length,
// enabling exact +1 byte threshold tests.
func createDirectoryForHAMTBlockEstimation(dirPath string, numFiles, nameLen, lastNameLen int, seed string) error {
return createDeterministicFiles(dirPath, numFiles, nameLen, lastNameLen, seed)
}
// createDeterministicFiles creates numFiles files with deterministic names.
// Files 0 to numFiles-2 have nameLen characters, and the last file has lastNameLen characters.
// Each file contains "x" (1 byte) for non-zero tsize in directory links.
func createDeterministicFiles(dirPath string, numFiles, nameLen, lastNameLen int, seed string) error {
alphabetLen := len(testutils.AlphabetEasy)
numFiles := int(hamtThreshold) / (nameLen + cidLength)
// Deterministic pseudo-random bytes for static CID
drand, err := testutils.DeterministicRandomReader(unixfsNodeSizeTarget, seed)
// Deterministic pseudo-random bytes for static filenames
drand, err := testutils.DeterministicRandomReader("1MiB", seed)
if err != nil {
return err
}
// Create necessary files in a single, flat directory
for i := 0; i < numFiles; i++ {
buf := make([]byte, nameLen)
// Use lastNameLen for the final file
currentNameLen := nameLen
if i == numFiles-1 {
currentNameLen = lastNameLen
}
buf := make([]byte, currentNameLen)
_, err := io.ReadFull(drand, buf)
if err != nil {
return err
@ -658,21 +645,17 @@ func createDirectoryForHAMT(dirPath string, cidLength int, unixfsNodeSizeTarget,
// Convert deterministic pseudo-random bytes to ASCII
var sb strings.Builder
for _, b := range buf {
// Map byte to printable ASCII range (33-126)
char := testutils.AlphabetEasy[int(b)%alphabetLen]
sb.WriteRune(char)
}
filename := sb.String()[:nameLen]
filename := sb.String()[:currentNameLen]
filePath := filepath.Join(dirPath, filename)
// Create empty file
f, err := os.Create(filePath)
if err != nil {
// Create file with 1-byte content for non-zero tsize
if err := os.WriteFile(filePath, []byte("x"), 0o644); err != nil {
return err
}
f.Close()
}
return nil
}

104
test/cli/api_file_test.go Normal file
View File

@ -0,0 +1,104 @@
package cli
import (
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/stretchr/testify/require"
)
// TestAddressFileReady verifies that when address files ($IPFS_PATH/api and
// $IPFS_PATH/gateway) are created, the corresponding HTTP servers are ready
// to accept connections immediately. This prevents race conditions for tools
// like systemd path units that start services when these files appear.
func TestAddressFileReady(t *testing.T) {
t.Parallel()
t.Run("api file", func(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init()
// Start daemon in background (don't use StartDaemon which waits for API)
res := node.Runner.MustRun(harness.RunRequest{
Path: node.IPFSBin,
Args: []string{"daemon"},
RunFunc: (*exec.Cmd).Start,
})
node.Daemon = res
defer node.StopDaemon()
// Poll for api file to appear
apiFile := filepath.Join(node.Dir, "api")
var fileExists bool
for i := 0; i < 100; i++ {
if _, err := os.Stat(apiFile); err == nil {
fileExists = true
break
}
time.Sleep(100 * time.Millisecond)
}
require.True(t, fileExists, "api file should be created")
// Read the api file to get the address
apiAddr, err := node.TryAPIAddr()
require.NoError(t, err)
// Extract IP and port from multiaddr
ip, err := apiAddr.ValueForProtocol(4) // P_IP4
require.NoError(t, err)
port, err := apiAddr.ValueForProtocol(6) // P_TCP
require.NoError(t, err)
// Immediately try to use the API - should work on first attempt
url := "http://" + ip + ":" + port + "/api/v0/id"
resp, err := http.Post(url, "", nil)
require.NoError(t, err, "RPC API should be ready immediately when api file exists")
defer resp.Body.Close()
require.Equal(t, http.StatusOK, resp.StatusCode)
})
t.Run("gateway file", func(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
node := h.NewNode().Init()
// Start daemon in background
res := node.Runner.MustRun(harness.RunRequest{
Path: node.IPFSBin,
Args: []string{"daemon"},
RunFunc: (*exec.Cmd).Start,
})
node.Daemon = res
defer node.StopDaemon()
// Poll for gateway file to appear
gatewayFile := filepath.Join(node.Dir, "gateway")
var fileExists bool
for i := 0; i < 100; i++ {
if _, err := os.Stat(gatewayFile); err == nil {
fileExists = true
break
}
time.Sleep(100 * time.Millisecond)
}
require.True(t, fileExists, "gateway file should be created")
// Read the gateway file to get the URL (already includes http:// prefix)
gatewayURL, err := os.ReadFile(gatewayFile)
require.NoError(t, err)
// Immediately try to use the Gateway - should work on first attempt
url := strings.TrimSpace(string(gatewayURL)) + "/ipfs/bafkqaaa" // empty file CID
resp, err := http.Get(url)
require.NoError(t, err, "Gateway should be ready immediately when gateway file exists")
defer resp.Body.Close()
require.Equal(t, http.StatusOK, resp.StatusCode)
})
}

View File

@ -39,7 +39,9 @@ func TestBackupBootstrapPeers(t *testing.T) {
// Start 1 and 2. 2 does not know anyone yet.
nodes[1].StartDaemon()
defer nodes[1].StopDaemon()
nodes[2].StartDaemon()
defer nodes[2].StopDaemon()
assert.Len(t, nodes[1].Peers(), 0)
assert.Len(t, nodes[2].Peers(), 0)
@ -51,6 +53,7 @@ func TestBackupBootstrapPeers(t *testing.T) {
// Start 0, wait a bit. Should connect to 1, and then discover 2 via the
// backup bootstrap peers.
nodes[0].StartDaemon()
defer nodes[0].StopDaemon()
time.Sleep(time.Millisecond * 500)
// Check if they're all connected.

View File

@ -22,7 +22,9 @@ func TestBitswapConfig(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
provider := h.NewNode().Init().StartDaemon()
defer provider.StopDaemon()
requester := h.NewNode().Init().StartDaemon()
defer requester.StopDaemon()
hash := provider.IPFSAddStr(string(testData))
requester.Connect(provider)
@ -38,8 +40,10 @@ func TestBitswapConfig(t *testing.T) {
provider := h.NewNode().Init()
provider.SetIPFSConfig("Bitswap.ServerEnabled", false)
provider = provider.StartDaemon()
defer provider.StopDaemon()
requester := h.NewNode().Init().StartDaemon()
defer requester.StopDaemon()
hash := provider.IPFSAddStr(string(testData))
requester.Connect(provider)
@ -70,8 +74,10 @@ func TestBitswapConfig(t *testing.T) {
requester := h.NewNode().Init()
requester.SetIPFSConfig("Bitswap.ServerEnabled", false)
requester.StartDaemon()
defer requester.StopDaemon()
provider := h.NewNode().Init().StartDaemon()
defer provider.StopDaemon()
hash := provider.IPFSAddStr(string(testData))
requester.Connect(provider)
@ -91,8 +97,10 @@ func TestBitswapConfig(t *testing.T) {
cfg.HTTPRetrieval.Enabled = config.True
})
requester.StartDaemon()
defer requester.StopDaemon()
provider := h.NewNode().Init().StartDaemon()
defer provider.StopDaemon()
hash := provider.IPFSAddStr(string(testData))
requester.Connect(provider)
@ -126,7 +134,9 @@ func TestBitswapConfig(t *testing.T) {
cfg.HTTPRetrieval.Enabled = config.True
})
provider = provider.StartDaemon()
defer provider.StopDaemon()
requester := h.NewNode().Init().StartDaemon()
defer requester.StopDaemon()
requester.Connect(provider)
// read libp2p identify from remote peer, and print protocols

View File

@ -0,0 +1,724 @@
package cli
import (
"encoding/json"
"os"
"path/filepath"
"strings"
"testing"
ft "github.com/ipfs/boxo/ipld/unixfs"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/ipfs/kubo/test/cli/testutils"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// cidProfileExpectations defines expected behaviors for a UnixFS import profile.
// This allows DRY testing of multiple profiles with the same test logic.
//
// Each profile is tested against threshold boundaries to verify:
// - CID format (version, hash function, raw leaves vs dag-pb wrapped)
// - File chunking (UnixFSChunker size threshold)
// - DAG structure (UnixFSFileMaxLinks rebalancing threshold)
// - Directory sharding (HAMTThreshold for flat vs HAMT directories)
type cidProfileExpectations struct {
// Profile identification
Name string // canonical profile name from IPIP-499
ProfileArgs []string // args to pass to ipfs init (empty for default behavior)
// CID format expectations
CIDVersion int // 0 or 1
HashFunc string // e.g., "sha2-256"
RawLeaves bool // true = raw codec for small files, false = dag-pb wrapped
// File chunking expectations (UnixFSChunker config)
ChunkSize int // chunk size in bytes (e.g., 262144 for 256KiB, 1048576 for 1MiB)
ChunkSizeHuman string // human-readable chunk size (e.g., "256KiB", "1MiB")
FileMaxLinks int // max links before DAG rebalancing (UnixFSFileMaxLinks config)
// HAMT directory sharding expectations (UnixFSHAMTDirectory* config).
// Threshold behavior: boxo converts to HAMT when size > HAMTThreshold (not >=).
// This means a directory exactly at the threshold stays as a basic (flat) directory.
HAMTFanout int // max links per HAMT shard bucket (256)
HAMTThreshold int // sharding threshold in bytes (262144 = 256 KiB)
HAMTSizeEstimation string // "block" (protobuf size) or "links" (legacy name+cid)
// Test vector parameters for threshold boundary tests.
// - DirBasic: size == threshold (stays basic)
// - DirHAMT: size > threshold (converts to HAMT)
// For block estimation, last filename length is adjusted to hit exact thresholds.
DirBasicNameLen int // filename length for basic directory (files 0 to N-2)
DirBasicLastNameLen int // filename length for last file (0 = same as DirBasicNameLen)
DirBasicFiles int // file count for basic directory (at exact threshold)
DirHAMTNameLen int // filename length for HAMT directory (files 0 to N-2)
DirHAMTLastNameLen int // filename length for last file (0 = same as DirHAMTNameLen)
DirHAMTFiles int // total file count for HAMT directory (over threshold)
// Expected deterministic CIDs for test vectors.
// These serve as regression tests to detect unintended changes in CID generation.
// SmallFileCID is the deterministic CID for "hello world" string.
// Tests basic CID format (version, codec, hash).
SmallFileCID string
// FileAtChunkSizeCID is the deterministic CID for a file exactly at chunk size.
// This file fits in a single block with no links:
// - v0-2015: dag-pb wrapped TFile node (CIDv0)
// - v1-2025: raw leaf block (CIDv1)
FileAtChunkSizeCID string
// FileOverChunkSizeCID is the deterministic CID for a file 1 byte over chunk size.
// This file requires 2 chunks, producing a root dag-pb node with 2 links:
// - v0-2015: links point to dag-pb wrapped TFile leaf nodes
// - v1-2025: links point to raw leaf blocks
FileOverChunkSizeCID string
// FileAtMaxLinksCID is the deterministic CID for a file at UnixFSFileMaxLinks threshold.
// File size = maxLinks * chunkSize, producing a single-layer DAG with exactly maxLinks children.
FileAtMaxLinksCID string
// FileOverMaxLinksCID is the deterministic CID for a file 1 byte over max links threshold.
// The +1 byte requires an additional chunk, forcing DAG rebalancing to 2 layers.
FileOverMaxLinksCID string
// DirBasicCID is the deterministic CID for a directory exactly at HAMTThreshold.
// With > comparison (not >=), directory at exact threshold stays as basic (flat) directory.
DirBasicCID string
// DirHAMTCID is the deterministic CID for a directory 1 byte over HAMTThreshold.
// Crossing the threshold converts the directory to a HAMT sharded structure.
DirHAMTCID string
}
// unixfsV02015 is the legacy profile for backward-compatible CID generation.
// Alias: legacy-cid-v0
var unixfsV02015 = cidProfileExpectations{
Name: "unixfs-v0-2015",
ProfileArgs: []string{"--profile=unixfs-v0-2015"},
CIDVersion: 0,
HashFunc: "sha2-256",
RawLeaves: false,
ChunkSize: 262144, // 256 KiB
ChunkSizeHuman: "256KiB",
FileMaxLinks: 174,
HAMTFanout: 256,
HAMTThreshold: 262144, // 256 KiB
HAMTSizeEstimation: "links",
DirBasicNameLen: 30, // 4096 * (30 + 34) = 262144 exactly at threshold
DirBasicFiles: 4096, // 4096 * 64 = 262144 (stays basic with >)
DirHAMTNameLen: 31, // 4033 * (31 + 34) = 262145 exactly +1 over threshold
DirHAMTLastNameLen: 0, // 0 = same as DirHAMTNameLen (uniform filenames)
DirHAMTFiles: 4033, // 4033 * 65 = 262145 (becomes HAMT)
SmallFileCID: "Qmf412jQZiuVUtdgnB36FXFX7xg5V6KEbSJ4dpQuhkLyfD", // "hello world" dag-pb wrapped
FileAtChunkSizeCID: "QmWmRj3dFDZdb6ABvbmKhEL6TmPbAfBZ1t5BxsEyJrcZhE", // 262144 bytes with seed "chunk-v0-seed"
FileOverChunkSizeCID: "QmYyLxtzZyW22zpoVAtKANLRHpDjZtNeDjQdJrcQNWoRkJ", // 262145 bytes with seed "chunk-v0-seed"
FileAtMaxLinksCID: "QmUbBALi174SnogsUzLpYbD4xPiBSFANF4iztWCsHbMKh2", // 174*256KiB bytes with seed "v0-seed"
FileOverMaxLinksCID: "QmV81WL765sC8DXsRhE5fJv2rwhS4icHRaf3J9Zk5FdRnW", // 174*256KiB+1 bytes with seed "v0-seed"
DirBasicCID: "QmX5GtRk3TSSEHtdrykgqm4eqMEn3n2XhfkFAis5fjyZmN", // 4096 files at threshold
DirHAMTCID: "QmeMiJzmhpJAUgynAcxTQYek5PPKgdv3qEvFsdV3XpVnvP", // 4033 files +1 over threshold
}
// unixfsV12025 is the recommended profile for cross-implementation CID determinism.
var unixfsV12025 = cidProfileExpectations{
Name: "unixfs-v1-2025",
ProfileArgs: []string{"--profile=unixfs-v1-2025"},
CIDVersion: 1,
HashFunc: "sha2-256",
RawLeaves: true,
ChunkSize: 1048576, // 1 MiB
ChunkSizeHuman: "1MiB",
FileMaxLinks: 1024,
HAMTFanout: 256,
HAMTThreshold: 262144, // 256 KiB
HAMTSizeEstimation: "block",
// Block size = numFiles * linkSize + 4 bytes overhead
// LinkSerializedSize(11, 36, 1) = 55, LinkSerializedSize(21, 36, 1) = 65, LinkSerializedSize(22, 36, 1) = 66
DirBasicNameLen: 11, // 4765 files * 55 bytes
DirBasicLastNameLen: 21, // last file: 65 bytes; total: 4765*55 + 65 + 4 = 262144 (at threshold)
DirBasicFiles: 4766, // stays basic with > comparison
DirHAMTNameLen: 11, // 4765 files * 55 bytes
DirHAMTLastNameLen: 22, // last file: 66 bytes; total: 4765*55 + 66 + 4 = 262145 (+1 over threshold)
DirHAMTFiles: 4766, // becomes HAMT
SmallFileCID: "bafkreifzjut3te2nhyekklss27nh3k72ysco7y32koao5eei66wof36n5e", // "hello world" raw leaf
FileAtChunkSizeCID: "bafkreiacndfy443ter6qr2tmbbdhadvxxheowwf75s6zehscklu6ezxmta", // 1048576 bytes with seed "chunk-v1-seed"
FileOverChunkSizeCID: "bafybeigmix7t42i6jacydtquhet7srwvgpizfg7gjbq7627d35mjomtu64", // 1048577 bytes with seed "chunk-v1-seed"
FileAtMaxLinksCID: "bafybeihmf37wcuvtx4hpu7he5zl5qaf2ineo2lqlfrapokkm5zzw7zyhvm", // 1024*1MiB bytes with seed "v1-2025-seed"
FileOverMaxLinksCID: "bafybeibdsi225ugbkmpbdohnxioyab6jsqrmkts3twhpvfnzp77xtzpyhe", // 1024*1MiB+1 bytes with seed "v1-2025-seed"
DirBasicCID: "bafybeic3h7rwruealwxkacabdy45jivq2crwz6bufb5ljwupn36gicplx4", // 4766 files at 262144 bytes (threshold)
DirHAMTCID: "bafybeiegvuterwurhdtkikfhbxcldohmxp566vpjdofhzmnhv6o4freidu", // 4766 files at 262145 bytes (+1 over)
}
// defaultProfile points to the profile that matches Kubo's implicit default behavior.
// Today this is unixfs-v0-2015. When Kubo changes defaults, update this pointer.
var defaultProfile = unixfsV02015
const (
cidV0Length = 34 // CIDv0 sha2-256
cidV1Length = 36 // CIDv1 sha2-256
)
// TestCIDProfiles generates deterministic test vectors for CID profile verification.
// Set CID_PROFILES_CAR_OUTPUT environment variable to export CAR files.
// Example: CID_PROFILES_CAR_OUTPUT=/tmp/cid-profiles go test -run TestCIDProfiles -v
func TestCIDProfiles(t *testing.T) {
t.Parallel()
carOutputDir := os.Getenv("CID_PROFILES_CAR_OUTPUT")
exportCARs := carOutputDir != ""
if exportCARs {
if err := os.MkdirAll(carOutputDir, 0o755); err != nil {
t.Fatalf("failed to create CAR output directory: %v", err)
}
t.Logf("CAR export enabled, writing to: %s", carOutputDir)
}
// Test both IPIP-499 profiles
for _, profile := range []cidProfileExpectations{unixfsV02015, unixfsV12025} {
t.Run(profile.Name, func(t *testing.T) {
t.Parallel()
runProfileTests(t, profile, carOutputDir, exportCARs)
})
}
// Test default behavior (no profile specified)
t.Run("default", func(t *testing.T) {
t.Parallel()
// Default behavior should match defaultProfile (currently unixfs-v0-2015)
defaultExp := defaultProfile
defaultExp.Name = "default"
defaultExp.ProfileArgs = nil // no profile args = default behavior
runProfileTests(t, defaultExp, carOutputDir, exportCARs)
})
}
// runProfileTests runs all test vectors for a given profile.
// Tests verify threshold behaviors for:
// - Small files (CID format verification)
// - UnixFSChunker threshold (single block vs multi-block)
// - UnixFSFileMaxLinks threshold (single-layer vs rebalanced DAG)
// - HAMTThreshold (basic flat directory vs HAMT sharded)
func runProfileTests(t *testing.T, exp cidProfileExpectations, carOutputDir string, exportCARs bool) {
cidLen := cidV0Length
if exp.CIDVersion == 1 {
cidLen = cidV1Length
}
// Test: small file produces correct CID format
// Verifies the profile sets the expected CID version, hash function, and leaf encoding.
t.Run("small file produces correct CID format", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(exp.ProfileArgs...)
node.StartDaemon()
defer node.StopDaemon()
// Use "hello world" for determinism
cidStr := node.IPFSAddStr("hello world")
// Verify CID version (v0 starts with "Qm", v1 with "b")
verifyCIDVersion(t, node, cidStr, exp.CIDVersion)
// Verify hash function (sha2-256 for both profiles)
verifyHashFunction(t, node, cidStr, exp.HashFunc)
// Verify raw leaves vs dag-pb wrapped
// - v0-2015: dag-pb codec (wrapped)
// - v1-2025: raw codec (raw leaves)
verifyRawLeaves(t, node, cidStr, exp.RawLeaves)
// Verify deterministic CID matches expected value
if exp.SmallFileCID != "" {
require.Equal(t, exp.SmallFileCID, cidStr, "expected deterministic CID for small file")
}
if exportCARs {
carPath := filepath.Join(carOutputDir, exp.Name+"_small-file.car")
require.NoError(t, node.IPFSDagExport(cidStr, carPath))
t.Logf("exported: %s -> %s", cidStr, carPath)
}
})
// Test: file at UnixFSChunker threshold (single block)
// A file exactly at chunk size fits in one block with no links.
// - v0-2015 (256KiB): produces dag-pb wrapped TFile node
// - v1-2025 (1MiB): produces raw leaf block
t.Run("file at UnixFSChunker threshold (single block)", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(exp.ProfileArgs...)
node.StartDaemon()
defer node.StopDaemon()
// File exactly at chunk size = single block (no links)
seed := chunkSeedForProfile(exp)
cidStr := node.IPFSAddDeterministicBytes(int64(exp.ChunkSize), seed)
// Verify block structure based on raw leaves setting
if exp.RawLeaves {
// v1-2025: single block is a raw leaf (no dag-pb structure)
codec := node.IPFS("cid", "format", "-f", "%c", cidStr).Stdout.Trimmed()
require.Equal(t, "raw", codec, "single block file is raw leaf")
} else {
// v0-2015: single block is a dag-pb node with no links (TFile type)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 0, len(root.Links), "single block file has no links")
fsType, err := node.UnixFSDataType(cidStr)
require.NoError(t, err)
require.Equal(t, ft.TFile, fsType, "single block file is dag-pb wrapped (TFile)")
}
verifyHashFunction(t, node, cidStr, exp.HashFunc)
if exp.FileAtChunkSizeCID != "" {
require.Equal(t, exp.FileAtChunkSizeCID, cidStr, "expected deterministic CID for file at chunk size")
}
if exportCARs {
carPath := filepath.Join(carOutputDir, exp.Name+"_file-at-chunk-size.car")
require.NoError(t, node.IPFSDagExport(cidStr, carPath))
t.Logf("exported: %s -> %s", cidStr, carPath)
}
})
// Test: file 1 byte over UnixFSChunker threshold (2 blocks)
// A file 1 byte over chunk size requires 2 chunks.
// Root is a dag-pb node with 2 links. Leaf encoding depends on profile:
// - v0-2015: leaf blocks are dag-pb wrapped TFile nodes
// - v1-2025: leaf blocks are raw codec blocks
t.Run("file 1 byte over UnixFSChunker threshold (2 blocks)", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(exp.ProfileArgs...)
node.StartDaemon()
defer node.StopDaemon()
// File +1 byte over chunk size = 2 blocks
seed := chunkSeedForProfile(exp)
cidStr := node.IPFSAddDeterministicBytes(int64(exp.ChunkSize)+1, seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 2, len(root.Links), "file over chunk size has 2 links")
// Verify leaf block encoding
for _, link := range root.Links {
if exp.RawLeaves {
// v1-2025: leaves are raw blocks
leafCodec := node.IPFS("cid", "format", "-f", "%c", link.Hash.Slash).Stdout.Trimmed()
require.Equal(t, "raw", leafCodec, "leaf blocks are raw, not dag-pb")
} else {
// v0-2015: leaves are dag-pb wrapped (TFile type)
leafType, err := node.UnixFSDataType(link.Hash.Slash)
require.NoError(t, err)
require.Equal(t, ft.TFile, leafType, "leaf blocks are dag-pb wrapped (TFile)")
}
}
verifyHashFunction(t, node, cidStr, exp.HashFunc)
if exp.FileOverChunkSizeCID != "" {
require.Equal(t, exp.FileOverChunkSizeCID, cidStr, "expected deterministic CID for file over chunk size")
}
if exportCARs {
carPath := filepath.Join(carOutputDir, exp.Name+"_file-over-chunk-size.car")
require.NoError(t, node.IPFSDagExport(cidStr, carPath))
t.Logf("exported: %s -> %s", cidStr, carPath)
}
})
// Test: file at UnixFSFileMaxLinks threshold (single layer)
// A file of exactly maxLinks * chunkSize bytes fits in a single DAG layer.
// - v0-2015: 174 links (174 * 256KiB = ~44.6MiB)
// - v1-2025: 1024 links (1024 * 1MiB = 1GiB)
t.Run("file at UnixFSFileMaxLinks threshold (single layer)", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(exp.ProfileArgs...)
node.StartDaemon()
defer node.StopDaemon()
// File size = maxLinks * chunkSize (exactly at threshold)
fileSize := fileAtMaxLinksBytes(exp)
seed := seedForProfile(exp)
cidStr := node.IPFSAddDeterministicBytes(fileSize, seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, exp.FileMaxLinks, len(root.Links),
"expected exactly %d links at max", exp.FileMaxLinks)
verifyHashFunction(t, node, cidStr, exp.HashFunc)
if exp.FileAtMaxLinksCID != "" {
require.Equal(t, exp.FileAtMaxLinksCID, cidStr, "expected deterministic CID for file at max links")
}
if exportCARs {
carPath := filepath.Join(carOutputDir, exp.Name+"_file-at-max-links.car")
require.NoError(t, node.IPFSDagExport(cidStr, carPath))
t.Logf("exported: %s -> %s", cidStr, carPath)
}
})
// Test: file 1 byte over UnixFSFileMaxLinks threshold (rebalanced DAG)
// Adding 1 byte requires an additional chunk, exceeding maxLinks.
// This triggers DAG rebalancing: chunks are grouped into intermediate nodes,
// producing a 2-layer DAG with 2 links at the root.
t.Run("file 1 byte over UnixFSFileMaxLinks threshold (rebalanced DAG)", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(exp.ProfileArgs...)
node.StartDaemon()
defer node.StopDaemon()
// +1 byte over max links threshold triggers DAG rebalancing
fileSize := fileOverMaxLinksBytes(exp)
seed := seedForProfile(exp)
cidStr := node.IPFSAddDeterministicBytes(fileSize, seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 2, len(root.Links), "expected 2 links after DAG rebalancing")
verifyHashFunction(t, node, cidStr, exp.HashFunc)
if exp.FileOverMaxLinksCID != "" {
require.Equal(t, exp.FileOverMaxLinksCID, cidStr, "expected deterministic CID for rebalanced file")
}
if exportCARs {
carPath := filepath.Join(carOutputDir, exp.Name+"_file-over-max-links.car")
require.NoError(t, node.IPFSDagExport(cidStr, carPath))
t.Logf("exported: %s -> %s", cidStr, carPath)
}
})
// Test: directory at HAMTThreshold (basic flat dir)
// A directory exactly at HAMTThreshold stays as a basic (flat) UnixFS directory.
// Threshold uses > comparison (not >=), so size == threshold stays basic.
// Size estimation method depends on profile:
// - v0-2015 "links": size = sum(nameLen + cidLen)
// - v1-2025 "block": size = serialized protobuf block size
t.Run("directory at HAMTThreshold (basic flat dir)", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(exp.ProfileArgs...)
node.StartDaemon()
defer node.StopDaemon()
// Use consistent seed for deterministic CIDs
seed := hamtSeedForProfile(exp)
randDir, err := os.MkdirTemp(node.Dir, seed)
require.NoError(t, err)
// Create basic (flat) directory exactly at threshold
basicLastNameLen := exp.DirBasicLastNameLen
if basicLastNameLen == 0 {
basicLastNameLen = exp.DirBasicNameLen
}
if exp.HAMTSizeEstimation == "block" {
err = createDirectoryForHAMTBlockEstimation(randDir, exp.DirBasicFiles, exp.DirBasicNameLen, basicLastNameLen, seed)
} else {
err = createDirectoryForHAMTLinksEstimation(randDir, exp.DirBasicFiles, exp.DirBasicNameLen, basicLastNameLen, seed)
}
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
// Verify UnixFS type is TDirectory (1), not THAMTShard (5)
fsType, err := node.UnixFSDataType(cidStr)
require.NoError(t, err)
require.Equal(t, ft.TDirectory, fsType, "expected basic directory (type=1) at exact threshold")
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, exp.DirBasicFiles, len(root.Links),
"expected basic directory with %d links", exp.DirBasicFiles)
verifyHashFunction(t, node, cidStr, exp.HashFunc)
// Verify size is exactly at threshold
if exp.HAMTSizeEstimation == "block" {
blockSize := getBlockSize(t, node, cidStr)
require.Equal(t, exp.HAMTThreshold, blockSize,
"expected basic directory block size to be exactly at threshold (%d), got %d", exp.HAMTThreshold, blockSize)
}
if exp.HAMTSizeEstimation == "links" {
linksSize := 0
for _, link := range root.Links {
linksSize += len(link.Name) + cidLen
}
require.Equal(t, exp.HAMTThreshold, linksSize,
"expected basic directory links size to be exactly at threshold (%d), got %d", exp.HAMTThreshold, linksSize)
}
if exp.DirBasicCID != "" {
require.Equal(t, exp.DirBasicCID, cidStr, "expected deterministic CID for basic directory")
}
if exportCARs {
carPath := filepath.Join(carOutputDir, exp.Name+"_dir-basic.car")
require.NoError(t, node.IPFSDagExport(cidStr, carPath))
t.Logf("exported: %s (%d files) -> %s", cidStr, exp.DirBasicFiles, carPath)
}
})
// Test: directory 1 byte over HAMTThreshold (HAMT sharded)
// A directory 1 byte over HAMTThreshold is converted to a HAMT sharded structure.
// HAMT distributes entries across buckets using consistent hashing.
// Root has at most HAMTFanout links (256), with entries distributed across buckets.
t.Run("directory 1 byte over HAMTThreshold (HAMT sharded)", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(exp.ProfileArgs...)
node.StartDaemon()
defer node.StopDaemon()
// Use consistent seed for deterministic CIDs
seed := hamtSeedForProfile(exp)
randDir, err := os.MkdirTemp(node.Dir, seed)
require.NoError(t, err)
// Create HAMT (sharded) directory exactly +1 byte over threshold
lastNameLen := exp.DirHAMTLastNameLen
if lastNameLen == 0 {
lastNameLen = exp.DirHAMTNameLen
}
if exp.HAMTSizeEstimation == "block" {
err = createDirectoryForHAMTBlockEstimation(randDir, exp.DirHAMTFiles, exp.DirHAMTNameLen, lastNameLen, seed)
} else {
err = createDirectoryForHAMTLinksEstimation(randDir, exp.DirHAMTFiles, exp.DirHAMTNameLen, lastNameLen, seed)
}
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
// Verify UnixFS type is THAMTShard (5), not TDirectory (1)
fsType, err := node.UnixFSDataType(cidStr)
require.NoError(t, err)
require.Equal(t, ft.THAMTShard, fsType, "expected HAMT directory (type=5) when over threshold")
// HAMT root has at most fanout links (actual count depends on hash distribution)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.LessOrEqual(t, len(root.Links), exp.HAMTFanout,
"expected HAMT directory root to have <= %d links", exp.HAMTFanout)
verifyHashFunction(t, node, cidStr, exp.HashFunc)
if exp.DirHAMTCID != "" {
require.Equal(t, exp.DirHAMTCID, cidStr, "expected deterministic CID for HAMT directory")
}
if exportCARs {
carPath := filepath.Join(carOutputDir, exp.Name+"_dir-hamt.car")
require.NoError(t, node.IPFSDagExport(cidStr, carPath))
t.Logf("exported: %s (%d files, HAMT root links: %d) -> %s",
cidStr, exp.DirHAMTFiles, len(root.Links), carPath)
}
})
}
// verifyCIDVersion checks that the CID has the expected version.
func verifyCIDVersion(t *testing.T, _ *harness.Node, cidStr string, expectedVersion int) {
t.Helper()
if expectedVersion == 0 {
require.True(t, strings.HasPrefix(cidStr, "Qm"),
"expected CIDv0 (starts with Qm), got: %s", cidStr)
} else {
require.True(t, strings.HasPrefix(cidStr, "b"),
"expected CIDv1 (base32, starts with b), got: %s", cidStr)
}
}
// verifyHashFunction checks that the CID uses the expected hash function.
func verifyHashFunction(t *testing.T, node *harness.Node, cidStr, expectedHash string) {
t.Helper()
// Use ipfs cid format to get hash function info
// Format string %h gives the hash function name
res := node.IPFS("cid", "format", "-f", "%h", cidStr)
hashFunc := strings.TrimSpace(res.Stdout.String())
require.Equal(t, expectedHash, hashFunc,
"expected hash function %s, got %s for CID %s", expectedHash, hashFunc, cidStr)
}
// verifyRawLeaves checks whether the CID represents a raw leaf or dag-pb wrapped block.
// For CIDv1: raw leaves have codec 0x55 (raw), wrapped have codec 0x70 (dag-pb).
// For CIDv0: always dag-pb (no raw leaves possible).
func verifyRawLeaves(t *testing.T, node *harness.Node, cidStr string, expectRaw bool) {
t.Helper()
// Use ipfs cid format to get codec info
// Format string %c gives the codec name
res := node.IPFS("cid", "format", "-f", "%c", cidStr)
codec := strings.TrimSpace(res.Stdout.String())
if expectRaw {
require.Equal(t, "raw", codec,
"expected raw codec for raw leaves, got %s for CID %s", codec, cidStr)
} else {
require.Equal(t, "dag-pb", codec,
"expected dag-pb codec for wrapped leaves, got %s for CID %s", codec, cidStr)
}
}
// getBlockSize returns the size of a block in bytes using ipfs block stat.
func getBlockSize(t *testing.T, node *harness.Node, cidStr string) int {
t.Helper()
res := node.IPFS("block", "stat", "--enc=json", cidStr)
var stat struct {
Size int `json:"Size"`
}
require.NoError(t, json.Unmarshal(res.Stdout.Bytes(), &stat))
return stat.Size
}
// fileAtMaxLinksBytes returns the file size in bytes that produces exactly FileMaxLinks chunks.
func fileAtMaxLinksBytes(exp cidProfileExpectations) int64 {
return int64(exp.FileMaxLinks) * int64(exp.ChunkSize)
}
// fileOverMaxLinksBytes returns the file size in bytes that triggers DAG rebalancing (+1 byte over max links threshold).
func fileOverMaxLinksBytes(exp cidProfileExpectations) int64 {
return int64(exp.FileMaxLinks)*int64(exp.ChunkSize) + 1
}
// seedForProfile returns the deterministic seed used in add_test.go for file max links tests.
func seedForProfile(exp cidProfileExpectations) string {
switch exp.Name {
case "unixfs-v0-2015", "default":
return "v0-seed"
case "unixfs-v1-2025":
return "v1-2025-seed"
default:
return exp.Name + "-seed"
}
}
// chunkSeedForProfile returns the deterministic seed for chunk threshold tests.
func chunkSeedForProfile(exp cidProfileExpectations) string {
switch exp.Name {
case "unixfs-v0-2015", "default":
return "chunk-v0-seed"
case "unixfs-v1-2025":
return "chunk-v1-seed"
default:
return "chunk-" + exp.Name + "-seed"
}
}
// hamtSeedForProfile returns the deterministic seed for HAMT directory tests.
// Uses the same seed for both under/at threshold tests to ensure consistency.
func hamtSeedForProfile(exp cidProfileExpectations) string {
switch exp.Name {
case "unixfs-v0-2015", "default":
return "hamt-unixfs-v0-2015"
case "unixfs-v1-2025":
return "hamt-unixfs-v1-2025"
default:
return "hamt-" + exp.Name
}
}
// TestDefaultMatchesExpectedProfile verifies that default ipfs add behavior
// matches the expected profile (currently unixfs-v0-2015).
func TestDefaultMatchesExpectedProfile(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
node.StartDaemon()
defer node.StopDaemon()
// Small file test
cidDefault := node.IPFSAddStr("x")
// Same file with explicit profile
nodeWithProfile := harness.NewT(t).NewNode().Init(defaultProfile.ProfileArgs...)
nodeWithProfile.StartDaemon()
defer nodeWithProfile.StopDaemon()
cidWithProfile := nodeWithProfile.IPFSAddStr("x")
require.Equal(t, cidWithProfile, cidDefault,
"default behavior should match %s profile", defaultProfile.Name)
}
// TestProtobufHelpers verifies the protobuf size calculation helpers.
func TestProtobufHelpers(t *testing.T) {
t.Parallel()
t.Run("VarintLen", func(t *testing.T) {
// Varint encoding: 7 bits per byte, MSB indicates continuation
cases := []struct {
value uint64
expected int
}{
{0, 1},
{127, 1}, // 0x7F - max 1-byte varint
{128, 2}, // 0x80 - min 2-byte varint
{16383, 2}, // 0x3FFF - max 2-byte varint
{16384, 3}, // 0x4000 - min 3-byte varint
{2097151, 3}, // 0x1FFFFF - max 3-byte varint
{2097152, 4}, // 0x200000 - min 4-byte varint
{268435455, 4}, // 0xFFFFFFF - max 4-byte varint
{268435456, 5}, // 0x10000000 - min 5-byte varint
{34359738367, 5}, // 0x7FFFFFFFF - max 5-byte varint
}
for _, tc := range cases {
got := testutils.VarintLen(tc.value)
require.Equal(t, tc.expected, got, "VarintLen(%d)", tc.value)
}
})
t.Run("LinkSerializedSize", func(t *testing.T) {
// Test typical cases for directory links
cases := []struct {
nameLen int
cidLen int
tsize uint64
expected int
}{
// 255-char name, CIDv0 (34 bytes), tsize=0
// Inner: 1+1+34 + 1+2+255 + 1+1 = 296
// Outer: 1 + 2 + 296 = 299
{255, 34, 0, 299},
// 255-char name, CIDv1 (36 bytes), tsize=0
// Inner: 1+1+36 + 1+2+255 + 1+1 = 298
// Outer: 1 + 2 + 298 = 301
{255, 36, 0, 301},
// Short name (10 chars), CIDv1, tsize=0
// Inner: 1+1+36 + 1+1+10 + 1+1 = 52
// Outer: 1 + 1 + 52 = 54
{10, 36, 0, 54},
// 255-char name, CIDv1, large tsize
// Inner: 1+1+36 + 1+2+255 + 1+5 = 302 (tsize uses 5-byte varint)
// Outer: 1 + 2 + 302 = 305
{255, 36, 34359738367, 305},
}
for _, tc := range cases {
got := testutils.LinkSerializedSize(tc.nameLen, tc.cidLen, tc.tsize)
require.Equal(t, tc.expected, got, "LinkSerializedSize(%d, %d, %d)", tc.nameLen, tc.cidLen, tc.tsize)
}
})
t.Run("EstimateFilesForBlockThreshold", func(t *testing.T) {
threshold := 262144
nameLen := 255
cidLen := 36
var tsize uint64 = 0
numFiles := testutils.EstimateFilesForBlockThreshold(threshold, nameLen, cidLen, tsize)
require.Equal(t, 870, numFiles, "expected 870 files for threshold 262144")
numFilesUnder := testutils.EstimateFilesForBlockThreshold(threshold-1, nameLen, cidLen, tsize)
require.Equal(t, 870, numFilesUnder, "expected 870 files for threshold 262143")
numFilesOver := testutils.EstimateFilesForBlockThreshold(262185, nameLen, cidLen, tsize)
require.Equal(t, 871, numFilesOver, "expected 871 files for threshold 262185")
})
}

View File

@ -76,6 +76,7 @@ func TestContentBlocking(t *testing.T) {
// Start daemon, it should pick up denylist from $IPFS_PATH/denylists/test.deny
node.StartDaemon() // we need online mode for GatewayOverLibp2p tests
t.Cleanup(func() { node.StopDaemon() })
client := node.GatewayClient()
// First, confirm gateway works

147
test/cli/dag_layout_test.go Normal file
View File

@ -0,0 +1,147 @@
package cli
import (
"os"
"path/filepath"
"strings"
"testing"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/stretchr/testify/require"
)
// TestBalancedDAGLayout verifies that kubo uses the "balanced" DAG layout
// (all leaves at same depth) rather than "balanced-packed" (varying leaf depths).
//
// DAG layout differences across implementations:
//
// - balanced: kubo, helia (all leaves at same depth, uniform traversal distance)
// - balanced-packed: singularity (trailing leaves may be at different depths)
// - trickle: kubo --trickle (varying depths, optimized for append-only/streaming)
//
// kubo does not implement balanced-packed. The trickle layout also produces
// non-uniform leaf depths but with different trade-offs: trickle is optimized
// for append-only and streaming reads (no seeking), while balanced-packed
// minimizes node count.
//
// IPIP-499 documents the balanced vs balanced-packed distinction. Files larger
// than dag_width × chunk_size will have different CIDs between implementations
// using different layouts.
//
// Set DAG_LAYOUT_CAR_OUTPUT environment variable to export CAR files.
// Example: DAG_LAYOUT_CAR_OUTPUT=/tmp/dag-layout go test -run TestBalancedDAGLayout -v
func TestBalancedDAGLayout(t *testing.T) {
t.Parallel()
carOutputDir := os.Getenv("DAG_LAYOUT_CAR_OUTPUT")
exportCARs := carOutputDir != ""
if exportCARs {
if err := os.MkdirAll(carOutputDir, 0755); err != nil {
t.Fatalf("failed to create CAR output directory: %v", err)
}
t.Logf("CAR export enabled, writing to: %s", carOutputDir)
}
t.Run("balanced layout has uniform leaf depth", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
// Create file that triggers multi-level DAG.
// For default v0: 175 chunks × 256KiB = ~44.8 MiB (just over 174 max links)
// This creates a 2-level DAG where balanced layout ensures uniform depth.
fileSize := "45MiB"
seed := "balanced-test"
cidStr := node.IPFSAddDeterministic(fileSize, seed)
// Collect leaf depths by walking DAG
depths := collectLeafDepths(t, node, cidStr, 0)
// All leaves must be at same depth for balanced layout
require.NotEmpty(t, depths, "expected at least one leaf node")
firstDepth := depths[0]
for i, d := range depths {
require.Equal(t, firstDepth, d,
"leaf %d at depth %d, expected %d (balanced layout requires uniform leaf depth)",
i, d, firstDepth)
}
t.Logf("verified %d leaves all at depth %d (CID: %s)", len(depths), firstDepth, cidStr)
if exportCARs {
carPath := filepath.Join(carOutputDir, "balanced_"+fileSize+".car")
require.NoError(t, node.IPFSDagExport(cidStr, carPath))
t.Logf("exported: %s -> %s", cidStr, carPath)
}
})
t.Run("trickle layout has varying leaf depth", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
fileSize := "45MiB"
seed := "trickle-test"
// Add with trickle layout (--trickle flag).
// Trickle produces non-uniform leaf depths, optimized for append-only
// and streaming reads (no seeking). This subtest validates the test
// logic by confirming we can detect varying depths.
cidStr := node.IPFSAddDeterministic(fileSize, seed, "--trickle")
depths := collectLeafDepths(t, node, cidStr, 0)
// Trickle layout should have varying depths
require.NotEmpty(t, depths, "expected at least one leaf node")
minDepth, maxDepth := depths[0], depths[0]
for _, d := range depths {
if d < minDepth {
minDepth = d
}
if d > maxDepth {
maxDepth = d
}
}
require.NotEqual(t, minDepth, maxDepth,
"trickle layout should have varying leaf depths, got uniform depth %d", minDepth)
t.Logf("verified %d leaves with depths ranging from %d to %d (CID: %s)", len(depths), minDepth, maxDepth, cidStr)
if exportCARs {
carPath := filepath.Join(carOutputDir, "trickle_"+fileSize+".car")
require.NoError(t, node.IPFSDagExport(cidStr, carPath))
t.Logf("exported: %s -> %s", cidStr, carPath)
}
})
}
// collectLeafDepths recursively walks DAG and returns depth of each leaf node.
// A node is a leaf if it's a raw block or a dag-pb node with no links.
func collectLeafDepths(t *testing.T, node *harness.Node, cid string, depth int) []int {
t.Helper()
// Check codec to see if this is a raw leaf
res := node.IPFS("cid", "format", "-f", "%c", cid)
codec := strings.TrimSpace(res.Stdout.String())
if codec == "raw" {
// Raw blocks are always leaves
return []int{depth}
}
// Try to inspect as dag-pb node
pbNode, err := node.InspectPBNode(cid)
if err != nil {
// Can't parse as dag-pb, treat as leaf
return []int{depth}
}
// No links = leaf node
if len(pbNode.Links) == 0 {
return []int{depth}
}
// Recurse into children
var depths []int
for _, link := range pbNode.Links {
childDepths := collectLeafDepths(t, node, link.Hash.Slash, depth+1)
depths = append(depths, childDepths...)
}
return depths
}

View File

@ -47,6 +47,8 @@ func TestDag(t *testing.T) {
t.Run("ipfs dag stat --enc=json", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Import fixture
r, err := os.Open(fixtureFile)
assert.Nil(t, err)
@ -91,6 +93,7 @@ func TestDag(t *testing.T) {
t.Run("ipfs dag stat", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
r, err := os.Open(fixtureFile)
assert.NoError(t, err)
defer r.Close()

View File

@ -60,6 +60,10 @@ func TestRoutingV1Proxy(t *testing.T) {
})
nodes[2].StartDaemon()
t.Cleanup(func() {
nodes.StopDaemons()
})
// Connect them.
nodes.Connect()

View File

@ -2,7 +2,6 @@ package cli
import (
"context"
"encoding/json"
"strings"
"testing"
"time"
@ -21,11 +20,6 @@ import (
"github.com/stretchr/testify/require"
)
// swarmPeersOutput is used to parse the JSON output of 'ipfs swarm peers --enc=json'
type swarmPeersOutput struct {
Peers []struct{} `json:"Peers"`
}
func TestRoutingV1Server(t *testing.T) {
t.Parallel()
@ -38,6 +32,7 @@ func TestRoutingV1Server(t *testing.T) {
})
})
nodes.StartDaemons().Connect()
t.Cleanup(func() { nodes.StopDaemons() })
return nodes
}
@ -139,6 +134,7 @@ func TestRoutingV1Server(t *testing.T) {
cfg.Routing.Type = config.NewOptionalString("dht")
})
node.StartDaemon()
defer node.StopDaemon()
// Put IPNS record in lonely node. It should be accepted as it is a valid record.
c, err = client.New(node.GatewayURL())
@ -202,15 +198,19 @@ func TestRoutingV1Server(t *testing.T) {
}
})
node.StartDaemon()
defer node.StopDaemon()
c, err := client.New(node.GatewayURL())
require.NoError(t, err)
// Try to get closest peers - should fail gracefully with an error
// Try to get closest peers - should fail gracefully with an error.
// Use 60-second timeout (server has 30s routing timeout).
testCid, err := cid.Decode("QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn")
require.NoError(t, err)
_, err = c.GetClosestPeers(context.Background(), testCid)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
_, err = c.GetClosestPeers(ctx, testCid)
require.Error(t, err)
// All these routing types should indicate DHT is not available
// The exact error message may vary based on implementation details
@ -224,7 +224,7 @@ func TestRoutingV1Server(t *testing.T) {
}
})
t.Run("GetClosestPeers returns peers for self", func(t *testing.T) {
t.Run("GetClosestPeers returns peers", func(t *testing.T) {
t.Parallel()
routingTypes := []string{"auto", "autoclient", "dht", "dhtclient"}
@ -241,19 +241,7 @@ func TestRoutingV1Server(t *testing.T) {
cfg.Bootstrap = autoconf.FallbackBootstrapPeers
})
node.StartDaemon()
// Wait for node to connect to bootstrap peers and populate WAN DHT routing table
minPeers := len(autoconf.FallbackBootstrapPeers)
require.EventuallyWithT(t, func(t *assert.CollectT) {
res := node.RunIPFS("swarm", "peers", "--enc=json")
var output swarmPeersOutput
err := json.Unmarshal(res.Stdout.Bytes(), &output)
assert.NoError(t, err)
peerCount := len(output.Peers)
// Wait until we have at least minPeers connected
assert.GreaterOrEqual(t, peerCount, minPeers,
"waiting for at least %d bootstrap peers, currently have %d", minPeers, peerCount)
}, 30*time.Second, time.Second)
defer node.StopDaemon()
c, err := client.New(node.GatewayURL())
require.NoError(t, err)
@ -261,16 +249,27 @@ func TestRoutingV1Server(t *testing.T) {
// Query for closest peers to our own peer ID
key := peer.ToCid(node.PeerID())
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
resultsIter, err := c.GetClosestPeers(ctx, key)
require.NoError(t, err)
records, err := iter.ReadAllResults(resultsIter)
require.NoError(t, err)
// Wait for WAN DHT routing table to be populated.
// The server has a 30-second routing timeout, so we use 60 seconds
// per request to allow for network latency while preventing hangs.
// Total wait time is 2 minutes (locally passes in under 1 minute).
var records []*types.PeerRecord
require.EventuallyWithT(t, func(ct *assert.CollectT) {
ctx, cancel := context.WithTimeout(t.Context(), 60*time.Second)
defer cancel()
resultsIter, err := c.GetClosestPeers(ctx, key)
if !assert.NoError(ct, err) {
return
}
records, err = iter.ReadAllResults(resultsIter)
assert.NoError(ct, err)
}, 2*time.Minute, 5*time.Second)
// Verify we got some peers back from WAN DHT
assert.NotEmpty(t, records, "should return some peers close to own peerid")
require.NotEmpty(t, records, "should return peers close to own peerid")
// Per IPIP-0476, GetClosestPeers returns at most 20 peers
assert.LessOrEqual(t, len(records), 20, "IPIP-0476 limits GetClosestPeers to 20 peers")
// Verify structure of returned records
for _, record := range records {

View File

@ -16,6 +16,7 @@ func TestDHTAutoclient(t *testing.T) {
node.IPFS("config", "Routing.Type", "autoclient")
})
nodes.StartDaemons().Connect()
t.Cleanup(func() { nodes.StopDaemons() })
t.Run("file added on node in client mode is retrievable from node in client mode", func(t *testing.T) {
t.Parallel()

View File

@ -22,6 +22,7 @@ func TestDHTOptimisticProvide(t *testing.T) {
})
nodes.StartDaemons().Connect()
defer nodes.StopDaemons()
hash := nodes[0].IPFSAddStr(string(random.Bytes(100)))
nodes[0].IPFS("routing", "provide", hash)

View File

@ -0,0 +1,147 @@
package cli
import (
"encoding/json"
"testing"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDiagDatastore(t *testing.T) {
t.Parallel()
t.Run("diag datastore get returns error for non-existent key", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Don't start daemon - these commands require daemon to be stopped
res := node.RunIPFS("diag", "datastore", "get", "/nonexistent/key")
assert.Error(t, res.Err)
assert.Contains(t, res.Stderr.String(), "key not found")
})
t.Run("diag datastore get returns raw bytes by default", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Add some data to create a known datastore key
// We need daemon for add, then stop it
node.StartDaemon()
cid := node.IPFSAddStr("test data for diag datastore")
node.IPFS("pin", "add", cid)
node.StopDaemon()
// Test count to verify we have entries
count := node.DatastoreCount("/")
t.Logf("total datastore entries: %d", count)
assert.NotEqual(t, int64(0), count, "should have datastore entries after pinning")
})
t.Run("diag datastore get --hex returns hex dump", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Add and pin some data
node.StartDaemon()
cid := node.IPFSAddStr("test data for hex dump")
node.IPFS("pin", "add", cid)
node.StopDaemon()
// Test with existing keys in pins namespace
count := node.DatastoreCount("/pins/")
t.Logf("pins datastore entries: %d", count)
if count != 0 {
t.Log("pins datastore has entries, hex dump format tested implicitly")
}
})
t.Run("diag datastore count returns 0 for empty prefix", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
count := node.DatastoreCount("/definitely/nonexistent/prefix/")
assert.Equal(t, int64(0), count)
})
t.Run("diag datastore count returns JSON with --enc=json", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
res := node.IPFS("diag", "datastore", "count", "/pubsub/seqno/", "--enc=json")
assert.NoError(t, res.Err)
var result struct {
Prefix string `json:"prefix"`
Count int64 `json:"count"`
}
err := json.Unmarshal(res.Stdout.Bytes(), &result)
require.NoError(t, err)
assert.Equal(t, "/pubsub/seqno/", result.Prefix)
assert.Equal(t, int64(0), result.Count)
})
t.Run("diag datastore get returns JSON with --enc=json", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Test error case with JSON encoding
res := node.RunIPFS("diag", "datastore", "get", "/nonexistent", "--enc=json")
assert.Error(t, res.Err)
})
t.Run("diag datastore count counts entries correctly", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Add multiple pins to create multiple entries
node.StartDaemon()
cid1 := node.IPFSAddStr("data 1")
cid2 := node.IPFSAddStr("data 2")
cid3 := node.IPFSAddStr("data 3")
node.IPFS("pin", "add", cid1)
node.IPFS("pin", "add", cid2)
node.IPFS("pin", "add", cid3)
node.StopDaemon()
// Count should reflect the pins (plus any system entries)
count := node.DatastoreCount("/")
t.Logf("total entries after adding 3 pins: %d", count)
// Should have more than 0 entries
assert.NotEqual(t, int64(0), count)
})
t.Run("diag datastore commands work offline", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init()
// Don't start daemon - these commands require daemon to be stopped
// Count should work offline
count := node.DatastoreCount("/pubsub/seqno/")
assert.Equal(t, int64(0), count)
// Get should return error for missing key (but command should work)
res := node.RunIPFS("diag", "datastore", "get", "/nonexistent/key")
assert.Error(t, res.Err)
assert.Contains(t, res.Stderr.String(), "key not found")
})
t.Run("diag datastore commands require daemon to be stopped", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
defer node.StopDaemon()
// Both get and count require repo lock, which is held by the running daemon
res := node.RunIPFS("diag", "datastore", "get", "/test")
assert.Error(t, res.Err, "get should fail when daemon is running")
assert.Contains(t, res.Stderr.String(), "ipfs daemon is running")
res = node.RunIPFS("diag", "datastore", "count", "/pubsub/seqno/")
assert.Error(t, res.Err, "count should fail when daemon is running")
assert.Contains(t, res.Stderr.String(), "ipfs daemon is running")
})
}

View File

@ -0,0 +1,143 @@
package cli
import (
"strings"
"testing"
"time"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// testDomainSuffix is the default p2p-forge domain used in tests
const testDomainSuffix = config.DefaultDomainSuffix // libp2p.direct
// TestDNSResolversApplyToMultiaddr is a regression test for:
// https://github.com/ipfs/kubo/issues/9199
//
// It verifies that DNS.Resolvers config is used when resolving /dnsaddr,
// /dns, /dns4, /dns6 multiaddrs during peer connections, not just for
// DNSLink resolution.
func TestDNSResolversApplyToMultiaddr(t *testing.T) {
t.Parallel()
t.Run("invalid DoH resolver causes multiaddr resolution to fail", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init("--profile=test")
// Set an invalid DoH resolver that will fail when used.
// If DNS.Resolvers is properly wired to multiaddr resolution,
// swarm connect to a /dnsaddr will fail with an error mentioning
// the invalid resolver URL.
invalidResolver := "https://invalid.broken.resolver.test/dns-query"
node.SetIPFSConfig("DNS.Resolvers", map[string]string{
".": invalidResolver,
})
// Clear bootstrap peers to prevent background connection attempts
node.SetIPFSConfig("Bootstrap", []string{})
node.StartDaemon()
defer node.StopDaemon()
// Give daemon time to fully start
time.Sleep(2 * time.Second)
// Verify daemon is responsive
result := node.RunIPFS("id")
require.Equal(t, 0, result.ExitCode(), "daemon should be responsive")
// Try to connect to a /dnsaddr peer - this should fail because
// the DNS.Resolvers config points to an invalid DoH server
result = node.RunIPFS("swarm", "connect", "/dnsaddr/bootstrap.libp2p.io")
// The connection should fail
require.NotEqual(t, 0, result.ExitCode(),
"swarm connect should fail when DNS.Resolvers points to invalid DoH server")
// The error should mention the invalid resolver, proving DNS.Resolvers
// is being used for multiaddr resolution
stderr := result.Stderr.String()
assert.True(t,
strings.Contains(stderr, "invalid.broken.resolver.test") ||
strings.Contains(stderr, "no such host") ||
strings.Contains(stderr, "lookup") ||
strings.Contains(stderr, "dial"),
"error should indicate DNS resolution failure using custom resolver. got: %s", stderr)
})
t.Run("libp2p.direct resolves locally even with broken DNS.Resolvers", func(t *testing.T) {
t.Parallel()
h := harness.NewT(t)
nodes := h.NewNodes(2).Init("--profile=test")
// Configure node0 with a broken DNS resolver
// This would break all DNS resolution if libp2p.direct wasn't resolved locally
invalidResolver := "https://invalid.broken.resolver.test/dns-query"
nodes[0].SetIPFSConfig("DNS.Resolvers", map[string]string{
".": invalidResolver,
})
// Clear bootstrap peers on both nodes
for _, n := range nodes {
n.SetIPFSConfig("Bootstrap", []string{})
}
nodes.StartDaemons()
defer nodes.StopDaemons()
// Get node1's peer ID in base36 format (what p2p-forge uses in DNS hostnames)
// DNS is case-insensitive, and base36 is lowercase-only, making it ideal for DNS
idResult := nodes[1].RunIPFS("id", "--peerid-base", "base36", "-f", "<id>")
require.Equal(t, 0, idResult.ExitCode())
node1IDBase36 := strings.TrimSpace(idResult.Stdout.String())
node1ID := nodes[1].PeerID().String()
node1Addrs := nodes[1].SwarmAddrs()
// Find a TCP address we can use
var tcpAddr string
for _, addr := range node1Addrs {
addrStr := addr.String()
if strings.Contains(addrStr, "/tcp/") && strings.Contains(addrStr, "/ip4/127.0.0.1") {
tcpAddr = addrStr
break
}
}
require.NotEmpty(t, tcpAddr, "node1 should have a local TCP address")
// Extract port from address like /ip4/127.0.0.1/tcp/12345/...
parts := strings.Split(tcpAddr, "/")
var port string
for i, p := range parts {
if p == "tcp" && i+1 < len(parts) {
port = parts[i+1]
break
}
}
require.NotEmpty(t, port, "should find TCP port in address")
// Construct a libp2p.direct hostname that encodes 127.0.0.1
// Format: /dns4/<ip-encoded>.<peerID-base36>.libp2p.direct/tcp/<port>/p2p/<peerID>
// p2p-forge uses base36 peerIDs in DNS hostnames (lowercase, DNS-safe)
libp2pDirectAddr := "/dns4/127-0-0-1." + node1IDBase36 + "." + testDomainSuffix + "/tcp/" + port + "/p2p/" + node1ID
// This connection should succeed because libp2p.direct is resolved locally
// even though DNS.Resolvers points to a broken server
result := nodes[0].RunIPFS("swarm", "connect", libp2pDirectAddr)
// The connection should succeed - local resolution bypasses broken DNS
assert.Equal(t, 0, result.ExitCode(),
"swarm connect to libp2p.direct should succeed with local resolution. stderr: %s",
result.Stderr.String())
// Verify the connection was actually established
result = nodes[0].RunIPFS("swarm", "peers")
require.Equal(t, 0, result.ExitCode())
assert.Contains(t, result.Stdout.String(), node1ID,
"node0 should be connected to node1")
})
}

Some files were not shown because too many files have changed in this diff Show More