Compare commits

..

No commits in common. "master" and "v0.39.0-rc1" have entirely different histories.

283 changed files with 3996 additions and 13681 deletions

View File

@ -1,45 +1,6 @@
# Dependabot PRs are auto-tidied by .github/workflows/dependabot-tidy.yml
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "monthly"
open-pull-requests-limit: 10
labels:
- "dependencies"
ignore:
# Updated via go-ds-* wrappers in ipfs-ecosystem group
- dependency-name: "github.com/cockroachdb/pebble*"
- dependency-name: "github.com/syndtr/goleveldb"
- dependency-name: "github.com/dgraph-io/badger*"
groups:
ipfs-ecosystem:
patterns:
- "github.com/ipfs/*"
- "github.com/ipfs-shipyard/*"
- "github.com/ipshipyard/*"
- "github.com/multiformats/*"
- "github.com/ipld/*"
libp2p-ecosystem:
patterns:
- "github.com/libp2p/*"
golang-x:
patterns:
- "golang.org/x/*"
opentelemetry:
patterns:
- "go.opentelemetry.io/*"
prometheus:
patterns:
- "github.com/prometheus/*"
- "contrib.go.opencensus.io/*"
- "go.opencensus.io"
uber:
patterns:
- "go.uber.org/*"

26
.github/legacy/Dockerfile.goipfs-stub vendored Normal file
View File

@ -0,0 +1,26 @@
# syntax=docker/dockerfile:1
# Stub Dockerfile for the deprecated 'ipfs/go-ipfs' image name.
# This image redirects users to the new 'ipfs/kubo' name.
FROM busybox:stable-glibc
# Copy stub entrypoint that displays deprecation message
COPY .github/legacy/goipfs_stub.sh /usr/local/bin/ipfs
# Make it executable
RUN chmod +x /usr/local/bin/ipfs
# Use the same ports as the real image for compatibility
EXPOSE 4001 4001/udp 5001 8080 8081
# Create ipfs user for consistency
ENV IPFS_PATH=/data/ipfs
RUN mkdir -p $IPFS_PATH \
&& adduser -D -h $IPFS_PATH -u 1000 -G users ipfs \
&& chown ipfs:users $IPFS_PATH
# Run as ipfs user
USER ipfs
# The stub script will run and exit with an error message
ENTRYPOINT ["/usr/local/bin/ipfs"]
CMD ["daemon"]

20
.github/legacy/goipfs_stub.sh vendored Executable file
View File

@ -0,0 +1,20 @@
#!/bin/sh
# Stub script for the deprecated 'ipfs/go-ipfs' Docker image.
# This informs users to switch to 'ipfs/kubo'.
cat >&2 <<'EOF'
ERROR: The name 'go-ipfs' is no longer used.
Please update your Docker scripts to use 'ipfs/kubo' instead of 'ipfs/go-ipfs'.
For example:
docker pull ipfs/kubo:release
More information:
- https://github.com/ipfs/kubo#docker
- https://hub.docker.com/r/ipfs/kubo
- https://docs.ipfs.tech/install/run-ipfs-inside-docker/
EOF
exit 1

View File

@ -29,7 +29,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Setup Go
uses: actions/setup-go@v6
@ -38,12 +38,12 @@ jobs:
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v4
uses: github/codeql-action/init@v3
with:
languages: go
- name: Autobuild
uses: github/codeql-action/autobuild@v4
uses: github/codeql-action/autobuild@v3
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v4
uses: github/codeql-action/analyze@v3

View File

@ -1,61 +0,0 @@
# Dependabot only updates go.mod/go.sum in the root module, but this repo has
# multiple Go modules (see docs/examples/). This workflow runs `make mod_tidy`
# on Dependabot PRs to keep all go.sum files in sync, preventing go-check CI
# failures.
name: Dependabot Tidy
on:
pull_request_target:
types: [opened, synchronize]
workflow_dispatch:
inputs:
pr_number:
description: 'PR number to run mod_tidy on'
required: true
type: number
permissions:
contents: write
pull-requests: write
jobs:
tidy:
if: github.actor == 'dependabot[bot]' || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
steps:
- name: Get PR info
id: pr
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
pr_number="${{ inputs.pr_number }}"
else
pr_number="${{ github.event.pull_request.number }}"
fi
echo "number=$pr_number" >> $GITHUB_OUTPUT
branch=$(gh pr view "$pr_number" --repo "${{ github.repository }}" --json headRefName -q '.headRefName')
echo "branch=$branch" >> $GITHUB_OUTPUT
- uses: actions/checkout@v6
with:
ref: ${{ steps.pr.outputs.branch }}
token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
- name: Run make mod_tidy
run: make mod_tidy
- name: Check for changes
id: git-check
run: |
if [[ -n $(git status --porcelain) ]]; then
echo "modified=true" >> $GITHUB_OUTPUT
fi
- name: Commit changes
if: steps.git-check.outputs.modified == 'true'
run: |
git config user.name "github-actions[bot]"
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
git add -A
git commit -m "chore: run make mod_tidy"
git push

View File

@ -22,7 +22,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- uses: hadolint/hadolint-action@v3.3.0
with:
dockerfile: Dockerfile
@ -41,7 +41,7 @@ jobs:
run:
shell: bash
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3

View File

@ -43,7 +43,7 @@ jobs:
tags: ${{ steps.tags.outputs.value }}
steps:
- name: Check out the repo
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
@ -141,3 +141,52 @@ jobs:
cache-to: |
type=gha,mode=max
type=registry,ref=${{ env.IMAGE_NAME }}:buildcache,mode=max
# Build and push stub image to the legacy ipfs/go-ipfs name
# This redirects users to use ipfs/kubo instead
legacy-name:
needs: docker-hub
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
name: Push stub to legacy ipfs/go-ipfs name
runs-on: ubuntu-latest
timeout-minutes: 5
env:
LEGACY_IMAGE_NAME: ipfs/go-ipfs
steps:
- name: Check out the repo
uses: actions/checkout@v5
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ vars.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Convert tags to legacy image name
id: legacy_tags
run: |
TAGS="${{ github.event.inputs.tags || needs.docker-hub.outputs.tags }}"
if ! echo "$TAGS" | grep -q "kubo"; then
echo "ERROR: Tags must contain kubo image name"
exit 1
fi
echo "value<<EOF" >> $GITHUB_OUTPUT
echo "$TAGS" | sed "s|ipfs/kubo|$LEGACY_IMAGE_NAME|g" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
shell: bash
- if: github.event_name != 'workflow_dispatch' || github.event.inputs.push == 'true'
name: Build and push legacy stub image
uses: docker/build-push-action@v6
with:
platforms: linux/amd64,linux/arm/v7,linux/arm64/v8
context: .
push: true
file: ./.github/legacy/Dockerfile.goipfs-stub
tags: ${{ steps.legacy_tags.outputs.value }}

View File

@ -41,13 +41,13 @@ jobs:
steps:
# 1. Download the gateway-conformance fixtures
- name: Download gateway-conformance fixtures
uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.10
uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.8
with:
output: fixtures
# 2. Build the kubo-gateway
- name: Checkout kubo-gateway
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
path: kubo-gateway
- name: Setup Go
@ -93,7 +93,7 @@ jobs:
# 6. Run the gateway-conformance tests
- name: Run gateway-conformance tests
uses: ipfs/gateway-conformance/.github/actions/test@v0.10
uses: ipfs/gateway-conformance/.github/actions/test@v0.8
with:
gateway-url: http://127.0.0.1:8080
subdomain-url: http://localhost:8080
@ -109,13 +109,13 @@ jobs:
run: cat output.md >> $GITHUB_STEP_SUMMARY
- name: Upload HTML report
if: failure() || success()
uses: actions/upload-artifact@v6
uses: actions/upload-artifact@v5
with:
name: gateway-conformance.html
path: output.html
- name: Upload JSON report
if: failure() || success()
uses: actions/upload-artifact@v6
uses: actions/upload-artifact@v5
with:
name: gateway-conformance.json
path: output.json
@ -127,13 +127,13 @@ jobs:
steps:
# 1. Download the gateway-conformance fixtures
- name: Download gateway-conformance fixtures
uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.10
uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.8
with:
output: fixtures
# 2. Build the kubo-gateway
- name: Checkout kubo-gateway
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
path: kubo-gateway
- name: Setup Go
@ -199,7 +199,7 @@ jobs:
# 9. Run the gateway-conformance tests over libp2p
- name: Run gateway-conformance tests over libp2p
uses: ipfs/gateway-conformance/.github/actions/test@v0.10
uses: ipfs/gateway-conformance/.github/actions/test@v0.8
with:
gateway-url: http://127.0.0.1:8092
args: --specs "trustless-gateway,-trustless-ipns-gateway" -skip 'TestGatewayCar/GET_response_for_application/vnd.ipld.car/Header_Content-Length'
@ -214,13 +214,13 @@ jobs:
run: cat output.md >> $GITHUB_STEP_SUMMARY
- name: Upload HTML report
if: failure() || success()
uses: actions/upload-artifact@v6
uses: actions/upload-artifact@v5
with:
name: gateway-conformance-libp2p.html
path: output.html
- name: Upload JSON report
if: failure() || success()
uses: actions/upload-artifact@v6
uses: actions/upload-artifact@v5
with:
name: gateway-conformance-libp2p.json
path: output.json

View File

@ -27,7 +27,7 @@ jobs:
run:
shell: bash
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'

View File

@ -22,7 +22,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
with:
submodules: recursive
- uses: actions/setup-go@v6
@ -47,15 +47,6 @@ jobs:
echo "$out"
exit 1
fi
- name: go fix
if: always() # run this step even if the previous one failed
run: |
go fix ./...
if [[ -n $(git diff --name-only) ]]; then
echo "go fix produced changes. Run 'go fix ./...' locally and commit the result."
git diff
exit 1
fi
- name: go vet
if: always() # run this step even if the previous one failed
uses: protocol/multiple-go-modules@v1.4

View File

@ -28,7 +28,7 @@ jobs:
run:
shell: bash
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'

View File

@ -14,13 +14,11 @@ concurrency:
cancel-in-progress: true
jobs:
# Unit tests with coverage collection (uploaded to Codecov)
unit-tests:
go-test:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
timeout-minutes: 15
timeout-minutes: 20
env:
GOTRACEBACK: single # reduce noise on test timeout panics
TEST_DOCKER: 0
TEST_FUSE: 0
TEST_VERBOSE: 1
@ -31,25 +29,48 @@ jobs:
shell: bash
steps:
- name: Check out Kubo
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Set up Go
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
- name: Install missing tools
run: sudo apt update && sudo apt install -y zsh
- name: Run unit tests
- name: 👉️ If this step failed, go to «Summary» (top left) → inspect the «Failures/Errors» table
env:
# increasing parallelism beyond 2 doesn't speed up the tests much
PARALLEL: 2
run: |
make test_unit &&
make -j "$PARALLEL" test/unit/gotest.junit.xml &&
[[ ! $(jq -s -c 'map(select(.Action == "fail")) | .[]' test/unit/gotest.json) ]]
- name: Upload coverage to Codecov
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
if: failure() || success()
with:
name: unittests
files: coverage/unit_tests.coverprofile
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: false
- name: Test kubo-as-a-library example
run: |
# we want to first test with the kubo version in the go.mod file
go test -v ./...
# we also want to test the examples against the current version of kubo
# however, that version might be in a fork so we need to replace the dependency
# backup the go.mod and go.sum files to restore them after we run the tests
cp go.mod go.mod.bak
cp go.sum go.sum.bak
# make sure the examples run against the current version of kubo
go mod edit -replace github.com/ipfs/kubo=./../../..
go mod tidy
go test -v ./...
# restore the go.mod and go.sum files to their original state
mv go.mod.bak go.mod
mv go.sum.bak go.sum
working-directory: docs/examples/kubo-as-a-library
- name: Create a proper JUnit XML report
uses: ipdxco/gotest-json-to-junit-xml@v1
with:
@ -57,9 +78,9 @@ jobs:
output: test/unit/gotest.junit.xml
if: failure() || success()
- name: Archive the JUnit XML report
uses: actions/upload-artifact@v6
uses: actions/upload-artifact@v5
with:
name: unit-tests-junit
name: unit
path: test/unit/gotest.junit.xml
if: failure() || success()
- name: Create a HTML report
@ -70,9 +91,9 @@ jobs:
output: test/unit/gotest.html
if: failure() || success()
- name: Archive the HTML report
uses: actions/upload-artifact@v6
uses: actions/upload-artifact@v5
with:
name: unit-tests-html
name: html
path: test/unit/gotest.html
if: failure() || success()
- name: Create a Markdown report
@ -85,86 +106,3 @@ jobs:
- name: Set the summary
run: cat test/unit/gotest.md >> $GITHUB_STEP_SUMMARY
if: failure() || success()
# End-to-end integration/regression tests from test/cli
# (Go-based replacement for legacy test/sharness shell scripts)
cli-tests:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
timeout-minutes: 15
env:
GOTRACEBACK: single # reduce noise on test timeout panics
TEST_VERBOSE: 1
GIT_PAGER: cat
IPFS_CHECK_RCMGR_DEFAULTS: 1
defaults:
run:
shell: bash
steps:
- name: Check out Kubo
uses: actions/checkout@v6
- name: Set up Go
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
- name: Install missing tools
run: sudo apt update && sudo apt install -y zsh
- name: Run CLI tests
env:
IPFS_PATH: ${{ runner.temp }}/ipfs-test
run: make test_cli
- name: Create JUnit XML report
uses: ipdxco/gotest-json-to-junit-xml@v1
with:
input: test/cli/cli-tests.json
output: test/cli/cli-tests.junit.xml
if: failure() || success()
- name: Archive JUnit XML report
uses: actions/upload-artifact@v6
with:
name: cli-tests-junit
path: test/cli/cli-tests.junit.xml
if: failure() || success()
- name: Create HTML report
uses: ipdxco/junit-xml-to-html@v1
with:
mode: no-frames
input: test/cli/cli-tests.junit.xml
output: test/cli/cli-tests.html
if: failure() || success()
- name: Archive HTML report
uses: actions/upload-artifact@v6
with:
name: cli-tests-html
path: test/cli/cli-tests.html
if: failure() || success()
- name: Create Markdown report
uses: ipdxco/junit-xml-to-html@v1
with:
mode: summary
input: test/cli/cli-tests.junit.xml
output: test/cli/cli-tests.md
if: failure() || success()
- name: Set summary
run: cat test/cli/cli-tests.md >> $GITHUB_STEP_SUMMARY
if: failure() || success()
# Example tests (kubo-as-a-library)
example-tests:
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
timeout-minutes: 5
env:
GOTRACEBACK: single
defaults:
run:
shell: bash
steps:
- name: Check out Kubo
uses: actions/checkout@v6
- name: Set up Go
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
- name: Run example tests
run: make test_examples

View File

@ -1,17 +1,3 @@
# Interoperability Tests
#
# This workflow ensures Kubo remains compatible with the broader IPFS ecosystem.
# It builds Kubo from source, then runs:
#
# 1. helia-interop: Tests compatibility with Helia (JavaScript IPFS implementation)
# using Playwright-based tests from @helia/interop package.
#
# 2. ipfs-webui: Runs E2E tests from ipfs/ipfs-webui repository to verify
# the web interface works correctly with the locally built Kubo binary.
#
# Both jobs use caching to speed up repeated runs (npm dependencies, Playwright
# browsers, and webui build artifacts).
name: Interop
on:
@ -46,12 +32,12 @@ jobs:
run:
shell: bash
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
- run: make build
- uses: actions/upload-artifact@v6
- uses: actions/upload-artifact@v5
with:
name: kubo
path: cmd/ipfs/ipfs
@ -66,47 +52,23 @@ jobs:
- uses: actions/setup-node@v6
with:
node-version: lts/*
- uses: actions/download-artifact@v7
- uses: actions/download-artifact@v6
with:
name: kubo
path: cmd/ipfs
- run: chmod +x cmd/ipfs/ipfs
- run: echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT
id: npm-cache-dir
- uses: actions/cache@v4
with:
path: ${{ steps.npm-cache-dir.outputs.dir }}
key: ${{ runner.os }}-${{ github.job }}-helia-${{ hashFiles('**/package-lock.json') }}
restore-keys: ${{ runner.os }}-${{ github.job }}-helia-
- run: sudo apt update
- run: sudo apt install -y libxkbcommon0 libxdamage1 libgbm1 libpango-1.0-0 libcairo2 # dependencies for playwright
# Cache node_modules based on latest @helia/interop version from npm registry.
# This ensures we always test against the latest release while still benefiting
# from caching when the version hasn't changed.
- name: Get latest @helia/interop version
id: helia-version
run: echo "version=$(npm view @helia/interop version)" >> $GITHUB_OUTPUT
- name: Cache helia-interop node_modules
uses: actions/cache@v5
id: helia-cache
with:
path: node_modules
key: ${{ runner.os }}-helia-interop-${{ steps.helia-version.outputs.version }}
- name: Install @helia/interop
if: steps.helia-cache.outputs.cache-hit != 'true'
run: npm install @helia/interop
# TODO(IPIP-499): Remove --grep --invert workaround once helia implements IPIP-499
# Tracking issue: https://github.com/ipfs/helia/issues/941
#
# PROVISIONAL HACK: Skip '@helia/mfs - should have the same CID after
# creating a file' test due to IPIP-499 changes in kubo.
#
# WHY IT FAILS: The test creates a 5-byte file in MFS on both kubo and helia,
# then compares the root directory CID. With kubo PR #11148, `ipfs files write`
# now produces raw CIDs for single-block files (matching `ipfs add --raw-leaves`),
# while helia uses `reduceSingleLeafToSelf: false` which keeps the dag-pb wrapper.
# Different file CIDs lead to different directory CIDs.
#
# We run aegir directly (instead of helia-interop binary) because only aegir
# supports the --grep/--invert flags needed to exclude specific tests.
- name: Run helia-interop tests (excluding IPIP-499 incompatible test)
run: npx aegir test -t node --bail -- --grep 'should have the same CID after creating a file' --invert
- run: npx --package @helia/interop helia-interop
env:
KUBO_BINARY: ${{ github.workspace }}/cmd/ipfs/ipfs
working-directory: node_modules/@helia/interop
ipfs-webui:
needs: [interop-prep]
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
@ -122,82 +84,48 @@ jobs:
run:
shell: bash
steps:
- uses: actions/download-artifact@v7
- uses: actions/setup-node@v6
with:
node-version: 20.x
- uses: actions/download-artifact@v6
with:
name: kubo
path: cmd/ipfs
- run: chmod +x cmd/ipfs/ipfs
- uses: actions/checkout@v6
- uses: actions/checkout@v5
with:
repository: ipfs/ipfs-webui
path: ipfs-webui
- uses: actions/setup-node@v6
- run: |
echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT
id: npm-cache-dir
- uses: actions/cache@v4
with:
node-version-file: 'ipfs-webui/.tool-versions'
- id: webui-ref
path: ${{ steps.npm-cache-dir.outputs.dir }}
key: ${{ runner.os }}-${{ github.job }}-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-${{ github.job }}-
- env:
NPM_CACHE_DIR: ${{ steps.npm-cache-dir.outputs.dir }}
run: |
npm ci --prefer-offline --no-audit --progress=false --cache "$NPM_CACHE_DIR"
npx playwright install --with-deps
working-directory: ipfs-webui
- id: ref
run: echo "ref=$(git rev-parse --short HEAD)" | tee -a $GITHUB_OUTPUT
working-directory: ipfs-webui
- id: webui-state
- id: state
env:
GITHUB_TOKEN: ${{ github.token }}
ENDPOINT: repos/ipfs/ipfs-webui/commits/${{ steps.webui-ref.outputs.ref }}/status
ENDPOINT: repos/ipfs/ipfs-webui/commits/${{ steps.ref.outputs.ref }}/status
SELECTOR: .state
KEY: state
run: gh api "$ENDPOINT" --jq "$SELECTOR" | xargs -I{} echo "$KEY={}" | tee -a $GITHUB_OUTPUT
# Cache node_modules based on package-lock.json
- name: Cache node_modules
uses: actions/cache@v5
id: node-modules-cache
with:
path: ipfs-webui/node_modules
key: ${{ runner.os }}-webui-node-modules-${{ hashFiles('ipfs-webui/package-lock.json') }}
restore-keys: |
${{ runner.os }}-webui-node-modules-
- name: Install dependencies
if: steps.node-modules-cache.outputs.cache-hit != 'true'
run: npm ci --prefer-offline --no-audit --progress=false
working-directory: ipfs-webui
# Cache Playwright browsers
- name: Cache Playwright browsers
uses: actions/cache@v5
id: playwright-cache
with:
path: ~/.cache/ms-playwright
key: ${{ runner.os }}-playwright-${{ hashFiles('ipfs-webui/package-lock.json') }}
restore-keys: |
${{ runner.os }}-playwright-
# On cache miss: download browsers and install OS dependencies
- name: Install Playwright with dependencies
if: steps.playwright-cache.outputs.cache-hit != 'true'
run: npx playwright install --with-deps
working-directory: ipfs-webui
# On cache hit: only ensure OS dependencies are present (fast, idempotent)
- name: Install Playwright OS dependencies
if: steps.playwright-cache.outputs.cache-hit == 'true'
run: npx playwright install-deps
working-directory: ipfs-webui
# Cache test build output
- name: Cache test build
uses: actions/cache@v5
id: test-build-cache
with:
path: ipfs-webui/build
key: ${{ runner.os }}-webui-build-${{ hashFiles('ipfs-webui/package-lock.json', 'ipfs-webui/src/**', 'ipfs-webui/public/**') }}
restore-keys: |
${{ runner.os }}-webui-build-
- name: Build ipfs-webui@${{ steps.webui-ref.outputs.ref }} (state=${{ steps.webui-state.outputs.state }})
if: steps.test-build-cache.outputs.cache-hit != 'true'
- name: Build ipfs-webui@main (state=${{ steps.state.outputs.state }})
run: npm run test:build
working-directory: ipfs-webui
- name: Test ipfs-webui@${{ steps.webui-ref.outputs.ref }} (state=${{ steps.webui-state.outputs.state }}) E2E against the locally built Kubo binary
- name: Test ipfs-webui@main (state=${{ steps.state.outputs.state }}) E2E against the locally built Kubo binary
run: npm run test:e2e
env:
IPFS_GO_EXEC: ${{ github.workspace }}/cmd/ipfs/ipfs
working-directory: ipfs-webui
- name: Upload test artifacts on failure
if: failure()
uses: actions/upload-artifact@v6
with:
name: webui-test-results
path: ipfs-webui/test-results/
retention-days: 7

View File

@ -23,7 +23,7 @@ jobs:
shell: bash
steps:
- name: Checkout Kubo
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
path: kubo
- name: Setup Go
@ -32,7 +32,7 @@ jobs:
go-version-file: 'kubo/go.mod'
- name: Install missing tools
run: sudo apt update && sudo apt install -y socat net-tools fish libxml2-utils
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: test/sharness/lib/dependencies
key: ${{ runner.os }}-test-generate-junit-html-${{ hashFiles('test/sharness/lib/test-generate-junit-html.sh') }}
@ -55,13 +55,11 @@ jobs:
# increasing parallelism beyond 10 doesn't speed up the tests much
PARALLEL: ${{ github.repository == 'ipfs/kubo' && 10 || 3 }}
- name: Upload coverage report
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
if: failure() || success()
with:
name: sharness
files: kubo/coverage/sharness_tests.coverprofile
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: false
- name: Aggregate results
run: find kubo/test/sharness/test-results -name 't*-*.sh.*.counts' | kubo/test/sharness/lib/sharness/aggregate-results.sh > kubo/test/sharness/test-results/summary.txt
- name: 👉️ If this step failed, go to «Summary» (top left) → «HTML Report» → inspect the «Failures» column
@ -90,7 +88,7 @@ jobs:
destination: sharness.html
- name: Upload one-page HTML report
if: github.repository != 'ipfs/kubo' && (failure() || success())
uses: actions/upload-artifact@v6
uses: actions/upload-artifact@v5
with:
name: sharness.html
path: kubo/test/sharness/test-results/sharness.html
@ -110,7 +108,7 @@ jobs:
destination: sharness-html/
- name: Upload full HTML report
if: github.repository != 'ipfs/kubo' && (failure() || success())
uses: actions/upload-artifact@v6
uses: actions/upload-artifact@v5
with:
name: sharness-html
path: kubo/test/sharness/test-results/sharness-html

View File

@ -41,7 +41,7 @@ jobs:
shell: bash
steps:
- name: Check out Kubo
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Set up Go
uses: actions/setup-go@v6
@ -77,7 +77,7 @@ jobs:
- name: Upload test results
if: always()
uses: actions/upload-artifact@v6
uses: actions/upload-artifact@v5
with:
name: ${{ matrix.os }}-test-results
path: |

5
.gitignore vendored
View File

@ -28,11 +28,6 @@ go-ipfs-source.tar.gz
docs/examples/go-ipfs-as-a-library/example-folder/Qm*
/test/sharness/t0054-dag-car-import-export-data/*.car
# test artifacts from make test_unit / test_cli
/test/unit/gotest.json
/test/unit/gotest.junit.xml
/test/cli/cli-tests.json
# ignore build output from snapcraft
/ipfs_*.snap
/parts

218
AGENTS.md
View File

@ -1,218 +0,0 @@
# AI Agent Instructions for Kubo
This file provides instructions for AI coding agents working on the [Kubo](https://github.com/ipfs/kubo) codebase (the Go implementation of IPFS). Follow the [Developer Guide](docs/developer-guide.md) for full details.
## Quick Reference
| Task | Command |
|-------------------|----------------------------------------------------------|
| Tidy deps | `make mod_tidy` (run first if `go.mod` changed) |
| Build | `make build` |
| Unit tests | `go test ./... -run TestName -v` |
| Integration tests | `make build && go test ./test/cli/... -run TestName -v` |
| Lint | `make -O test_go_lint` |
| Format | `go fmt ./...` |
## Project Overview
Kubo is the reference implementation of IPFS in Go. Most IPFS protocol logic lives in [boxo](https://github.com/ipfs/boxo) (the IPFS SDK); kubo wires it together and exposes it via CLI and HTTP RPC API. If a change belongs in the protocol layer, it likely belongs in boxo, not here.
Key directories:
| Directory | Purpose |
|--------------------|----------------------------------------------------------|
| `cmd/ipfs/` | CLI entry point and binary |
| `core/` | core IPFS node implementation |
| `core/commands/` | CLI command definitions |
| `core/coreapi/` | Go API implementation |
| `client/rpc/` | HTTP RPC client |
| `plugin/` | plugin system |
| `repo/` | repository management |
| `test/cli/` | Go-based CLI integration tests (preferred for new tests) |
| `test/sharness/` | legacy shell-based integration tests |
| `docs/` | documentation |
Other key external dependencies: [go-libp2p](https://github.com/libp2p/go-libp2p) (networking), [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht) (DHT).
## Go Style
Follow these Go style references:
- [Go Code Review Comments](https://go.dev/wiki/CodeReviewComments)
- [Google Go Style Decisions](https://google.github.io/styleguide/go/decisions)
Specific conventions for this project:
- check the Go version in `go.mod` and use idiomatic features available at that version
- readability over micro-optimization: clear code is more important than saving microseconds
- prefer standard library functions and utilities over writing your own
- use early returns and indent the error flow, not the happy path
- use `slices.Contains`, `slices.DeleteFunc`, and the `maps` package instead of manual loops
- preallocate slices and maps when the size is known: `make([]T, 0, n)`
- use `map[K]struct{}` for sets, not `map[K]bool`
- receiver names: single-letter abbreviations matching the type (e.g., `s *Server`, `c *Client`)
- run `go fmt` after modifying Go source files, never indent manually
### Error Handling
- wrap errors with `fmt.Errorf("context: %w", err)`, never discard errors silently
- use `errors.Is` / `errors.As` for error checking, not string comparison
- never use `panic` in library code; only in `main` or test helpers
- return `nil` explicitly for the error value on success paths
### Canonical Examples
When adding or modifying code, follow the patterns established in these files:
- CLI command structure: `core/commands/dag/dag.go`
- CLI integration test: `test/cli/dag_test.go`
- Test harness usage: `test/cli/harness/` package
## Building
Always run commands from the repository root.
```bash
make mod_tidy # update go.mod/go.sum (use this instead of go mod tidy)
make build # build the ipfs binary to cmd/ipfs/ipfs
make install # install to $GOPATH/bin
make -O test_go_lint # run linter (use this instead of golangci-lint directly)
```
If you modify `go.mod` (add/remove/update dependencies), you must run `make mod_tidy` first, before building or testing. Use `make mod_tidy` instead of `go mod tidy` directly, as the project has multiple `go.mod` files.
If you modify any `.go` files outside of `test/`, you must run `make build` before running integration tests.
## Testing
The full test suite is composed of several targets:
| Make target | What it runs |
|----------------------|-----------------------------------------------------------------------|
| `make test` | all tests (`test_go_fmt` + `test_unit` + `test_cli` + `test_sharness`) |
| `make test_short` | fast subset (`test_go_fmt` + `test_unit`) |
| `make test_unit` | unit tests with coverage (excludes `test/cli`) |
| `make test_cli` | CLI integration tests (requires `make build` first) |
| `make test_sharness` | legacy shell-based integration tests |
| `make test_go_fmt` | checks Go source formatting |
| `make -O test_go_lint` | runs `golangci-lint` |
During development, prefer running a specific test rather than the full suite:
```bash
# run a single unit test
go test ./core/... -run TestSpecificUnit -v
# run a single CLI integration test (requires make build first)
go test ./test/cli/... -run TestSpecificCLI -v
```
### Environment Setup for Integration Tests
Before running `test_cli` or `test_sharness`, set these environment variables from the repo root:
```bash
export PATH="$PWD/cmd/ipfs:$PATH"
export IPFS_PATH="$(mktemp -d)"
```
- `PATH`: integration tests use the `ipfs` binary from `PATH`, not Go source directly
- `IPFS_PATH`: isolates test data from `~/.ipfs` or other running nodes
If you see "version (N) is lower than repos (M)", the `ipfs` binary in `PATH` is outdated. Rebuild with `make build` and verify `PATH`.
### Running Sharness Tests
Sharness tests are legacy shell-based tests. Run individual tests with a timeout:
```bash
cd test/sharness && timeout 60s ./t0080-repo.sh
```
To investigate a failing test, pass `-v` for verbose output. In this mode, daemons spawned by the test are not shut down automatically and must be killed manually afterwards.
### Cleaning Up Stale Daemons
Before running `test/cli` or `test/sharness`, stop any stale `ipfs daemon` processes owned by the current user. Leftover daemons hold locks and bind ports, causing test failures:
```bash
pkill -f "ipfs daemon"
```
### Writing Tests
- all new integration tests go in `test/cli/`, not `test/sharness/`
- if a `test/sharness` test needs significant changes, remove it and add a replacement in `test/cli/`
- use [testify](https://github.com/stretchr/testify) for assertions (already a dependency)
- for Go 1.25+, use `testing/synctest` when testing concurrent code (goroutines, channels, timers)
- reuse existing `.car` fixtures in `test/cli/fixtures/` when possible; only add new fixtures when the test requires data not covered by existing ones
- always re-run modified tests locally before submitting to confirm they pass
- avoid emojis in test names and test log output
## Before Submitting
Run these steps in order before considering work complete:
1. `make mod_tidy` (if `go.mod` changed)
2. `go fmt ./...`
3. `make build` (if non-test `.go` files changed)
4. `make -O test_go_lint`
5. `go test ./...` (or the relevant subset)
## Documentation and Commit Messages
- after editing CLI help text in `core/commands/`, verify width: `go test ./test/cli/... -run TestCommandDocsWidth`
- config options are documented in `docs/config.md`
- changelogs in `docs/changelogs/`: only edit the Table of Contents and the Highlights section; the Changelog and Contributors sections are auto-generated and must not be modified
- follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
- keep commit titles short and messages terse
## Writing Style
When writing docs, comments, and commit messages:
- avoid emojis in code, comments, and log output
- keep an empty line before lists in markdown
- use backticks around CLI commands, paths, environment variables, and config options
## PR Guidelines
- explain what changed and why in the PR description
- include test coverage for new functionality and bug fixes
- run `make -O test_go_lint` and fix any lint issues before submitting
- verify that `go test ./...` passes locally
- when modifying `test/sharness` tests significantly, migrate them to `test/cli` instead
- end the PR description with a `## References` section listing related context, one link per line
- if the PR closes an issue in `ipfs/kubo`, each closing reference should be a bullet starting with `Closes`:
```markdown
## References
- Closes https://github.com/ipfs/kubo/issues/1234
- Closes https://github.com/ipfs/kubo/issues/5678
- https://discuss.ipfs.tech/t/related-topic/999
```
## Scope and Safety
Do not modify or touch:
- files under `test/sharness/lib/` (third-party sharness test framework)
- CI workflows in `.github/` unless explicitly asked
- auto-generated sections in `docs/changelogs/` (Changelog and Contributors are generated; only TOC and Highlights are human-edited)
Do not run without being asked:
- `make test` or `make test_sharness` (full suite is slow; prefer targeted tests)
- `ipfs daemon` without a timeout
## Running the Daemon
Always run the daemon with a timeout or shut it down promptly:
```bash
timeout 60s ipfs daemon # auto-kill after 60s
ipfs shutdown # graceful shutdown via API
```
Kill dangling daemons before re-running tests: `pkill -f "ipfs daemon"`

View File

@ -1,7 +1,5 @@
# Kubo Changelogs
- [v0.41](docs/changelogs/v0.41.md)
- [v0.40](docs/changelogs/v0.40.md)
- [v0.39](docs/changelogs/v0.39.md)
- [v0.38](docs/changelogs/v0.38.md)
- [v0.37](docs/changelogs/v0.37.md)

View File

@ -1,10 +1,6 @@
# Contributing to Kubo
IPFS as a project, including go-ipfs and all of its modules, follows the [standard IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md).
**For development setup, building, and testing, see the [Developer Guide](docs/developer-guide.md).**
IPFS as a project, including Kubo and all of its modules, follows the [standard IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md).
We also adhere to the [Go IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md) which provide additional information on how to collaborate and contribute to the Go implementation of IPFS.
We also adhere to the [GO IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md) which provide additional information of how to collaborate and contribute in the Go implementation of IPFS.
We appreciate your time and attention for going over these. Please open an issue on ipfs/community if you have any questions.

View File

@ -1,6 +1,6 @@
# syntax=docker/dockerfile:1
# Enables BuildKit with cache mounts for faster builds
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.26 AS builder
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.25 AS builder
ARG TARGETOS TARGETARCH

530
README.md
View File

@ -2,7 +2,7 @@
<br>
<a href="https://github.com/ipfs/kubo/blob/master/docs/logo/"><img src="https://user-images.githubusercontent.com/157609/250148884-d6d12db8-fdcf-4be3-8546-2550b69845d8.png" alt="Kubo logo" title="Kubo logo" width="200"></a>
<br>
Kubo: IPFS Implementation in Go
Kubo: IPFS Implementation in GO
<br>
</h1>
@ -11,61 +11,111 @@
<p align="center">
<a href="https://ipfs.tech"><img src="https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square" alt="Official Part of IPFS Project"></a>
<a href="https://discuss.ipfs.tech"><img alt="Discourse Forum" src="https://img.shields.io/discourse/posts?server=https%3A%2F%2Fdiscuss.ipfs.tech"></a>
<a href="https://docs.ipfs.tech/community/"><img alt="Matrix" src="https://img.shields.io/matrix/ipfs-space%3Aipfs.io?server_fqdn=matrix.org"></a>
<a href="https://matrix.to/#/#ipfs-space:ipfs.io"><img alt="Matrix" src="https://img.shields.io/matrix/ipfs-space%3Aipfs.io?server_fqdn=matrix.org"></a>
<a href="https://github.com/ipfs/kubo/actions"><img src="https://img.shields.io/github/actions/workflow/status/ipfs/kubo/gobuild.yml?branch=master"></a>
<a href="https://github.com/ipfs/kubo/releases"><img alt="GitHub release" src="https://img.shields.io/github/v/release/ipfs/kubo?filter=!*rc*"></a>
</p>
<hr />
<p align="center">
<b><a href="#what-is-kubo">What is Kubo?</a></b> | <b><a href="#quick-taste">Quick Taste</a></b> | <b><a href="#install">Install</a></b> | <b><a href="#documentation">Documentation</a></b> | <b><a href="#development">Development</a></b> | <b><a href="#getting-help">Getting Help</a></b>
</p>
## What is Kubo?
Kubo was the first [IPFS](https://docs.ipfs.tech/concepts/what-is-ipfs/) implementation and is the [most widely used one today](https://probelab.io/ipfs/topology/#chart-agent-types-avg). It takes an opinionated approach to content-addressing ([CIDs](https://docs.ipfs.tech/concepts/glossary/#cid), [DAGs](https://docs.ipfs.tech/concepts/glossary/#dag)) that maximizes interoperability: [UnixFS](https://docs.ipfs.tech/concepts/glossary/#unixfs) for files and directories, [HTTP Gateways](https://docs.ipfs.tech/concepts/glossary/#gateway) for web browsers, [Bitswap](https://docs.ipfs.tech/concepts/glossary/#bitswap) and [HTTP](https://specs.ipfs.tech/http-gateways/trustless-gateway/) for verifiable data transfer.
Kubo was the first IPFS implementation and is the most widely used one today. Implementing the *Interplanetary Filesystem* - the standard for content-addressing on the Web, interoperable with HTTP. Thus powered by future-proof data models and the libp2p for network communication. Kubo is written in Go.
**Features:**
Featureset
- Runs an IPFS-Node as a network service that is part of LAN and WAN DHT
- Native support for UnixFS (most popular way to represent files and directories on IPFS)
- [HTTP Gateway](https://specs.ipfs.tech/http-gateways/) (`/ipfs` and `/ipns`) functionality for trusted and [trustless](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) content retrieval
- [HTTP Routing V1](https://specs.ipfs.tech/routing/http-routing-v1/) (`/routing/v1`) client and server implementation for [delegated routing](./docs/delegated-routing.md) lookups
- [HTTP Kubo RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) (`/api/v0`) to access and control the daemon
- [Command Line Interface](https://docs.ipfs.tech/reference/kubo/cli/) based on (`/api/v0`) RPC API
- [WebUI](https://github.com/ipfs/ipfs-webui/#readme) to manage the Kubo node
- [Content blocking](/docs/content-blocking.md) support for operators of public nodes
- Runs an IPFS node as a network service (LAN [mDNS](https://github.com/libp2p/specs/blob/master/discovery/mdns.md) and WAN [Amino DHT](https://docs.ipfs.tech/concepts/glossary/#dht))
- [Command-line interface](https://docs.ipfs.tech/reference/kubo/cli/) (`ipfs --help`)
- [WebUI](https://github.com/ipfs/ipfs-webui/#readme) for node management
- [HTTP Gateway](https://specs.ipfs.tech/http-gateways/) for trusted and [trustless](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) content retrieval
- [HTTP RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) to control the daemon
- [HTTP Routing V1](https://specs.ipfs.tech/routing/http-routing-v1/) client and server for [delegated routing](./docs/delegated-routing.md)
- [Content blocking](./docs/content-blocking.md) for public node operators
### Other implementations
**Other IPFS implementations:** [Helia](https://github.com/ipfs/helia) (JavaScript), [more...](https://docs.ipfs.tech/concepts/ipfs-implementations/)
See [List](https://docs.ipfs.tech/basics/ipfs-implementations/)
## Quick Taste
## What is IPFS?
After [installing Kubo](#install), verify it works:
IPFS is a global, versioned, peer-to-peer filesystem. It combines good ideas from previous systems such as Git, BitTorrent, Kademlia, SFS, and the Web. It is like a single BitTorrent swarm, exchanging git objects. IPFS provides an interface as simple as the HTTP web, but with permanence built-in. You can also mount the world at /ipfs.
```console
$ ipfs init
generating ED25519 keypair...done
peer identity: 12D3KooWGcSLQdLDBi2BvoP8WnpdHvhWPbxpGcqkf93rL2XMZK7R
For more info see: https://docs.ipfs.tech/concepts/what-is-ipfs/
$ ipfs daemon &
Daemon is ready
Before opening an issue, consider using one of the following locations to ensure you are opening your thread in the right place:
- kubo (previously named go-ipfs) _implementation_ bugs in [this repo](https://github.com/ipfs/kubo/issues).
- Documentation issues in [ipfs/docs issues](https://github.com/ipfs/ipfs-docs/issues).
- IPFS _design_ in [ipfs/specs issues](https://github.com/ipfs/specs/issues).
- Exploration of new ideas in [ipfs/notes issues](https://github.com/ipfs/notes/issues).
- Ask questions and meet the rest of the community at the [IPFS Forum](https://discuss.ipfs.tech).
- Or [chat with us](https://docs.ipfs.tech/community/chat/).
$ echo "hello IPFS" | ipfs add -q --cid-version 1
bafkreicouv3sksjuzxb3rbb6rziy6duakk2aikegsmtqtz5rsuppjorxsa
[![YouTube Channel Subscribers](https://img.shields.io/youtube/channel/subscribers/UCdjsUXJ3QawK4O5L1kqqsew?label=Subscribe%20IPFS&style=social&cacheSeconds=3600)](https://www.youtube.com/channel/UCdjsUXJ3QawK4O5L1kqqsew) [![Follow @IPFS on Twitter](https://img.shields.io/twitter/follow/IPFS?style=social&cacheSeconds=3600)](https://twitter.com/IPFS)
$ ipfs cat bafkreicouv3sksjuzxb3rbb6rziy6duakk2aikegsmtqtz5rsuppjorxsa
hello IPFS
```
## Next milestones
Verify this CID is provided by your node to the IPFS network: <https://check.ipfs.network/?cid=bafkreicouv3sksjuzxb3rbb6rziy6duakk2aikegsmtqtz5rsuppjorxsa>
[Milestones on GitHub](https://github.com/ipfs/kubo/milestones)
See `ipfs add --help` for all import options. Ready for more? Follow the [command-line quick start](https://docs.ipfs.tech/how-to/command-line-quick-start/).
## Table of Contents
- [What is Kubo?](#what-is-kubo)
- [What is IPFS?](#what-is-ipfs)
- [Next milestones](#next-milestones)
- [Table of Contents](#table-of-contents)
- [Security Issues](#security-issues)
- [Install](#install)
- [Minimal System Requirements](#minimal-system-requirements)
- [Docker](#docker)
- [Official prebuilt binaries](#official-prebuilt-binaries)
- [Updating](#updating)
- [Downloading builds using IPFS](#downloading-builds-using-ipfs)
- [Unofficial Linux packages](#unofficial-linux-packages)
- [ArchLinux](#arch-linux)
- [Gentoo Linux](#gentoo-linux)
- [Nix](#nix)
- [Solus](#solus)
- [openSUSE](#opensuse)
- [Guix](#guix)
- [Snap](#snap)
- [Ubuntu PPA](#ubuntu-ppa)
- [Fedora](#fedora-copr)
- [Unofficial Windows packages](#unofficial-windows-packages)
- [Chocolatey](#chocolatey)
- [Scoop](#scoop)
- [Unofficial MacOS packages](#unofficial-macos-packages)
- [MacPorts](#macports)
- [Nix](#nix-macos)
- [Homebrew](#homebrew)
- [Build from Source](#build-from-source)
- [Install Go](#install-go)
- [Download and Compile IPFS](#download-and-compile-ipfs)
- [Cross Compiling](#cross-compiling)
- [Troubleshooting](#troubleshooting)
- [Getting Started](#getting-started)
- [Usage](#usage)
- [Some things to try](#some-things-to-try)
- [Troubleshooting](#troubleshooting-1)
- [Packages](#packages)
- [Development](#development)
- [Map of Implemented Subsystems](#map-of-implemented-subsystems)
- [CLI, HTTP-API, Architecture Diagram](#cli-http-api-architecture-diagram)
- [Testing](#testing)
- [Development Dependencies](#development-dependencies)
- [Developer Notes](#developer-notes)
- [Maintainer Info](#maintainer-info)
- [Contributing](#contributing)
- [License](#license)
## Security Issues
Please follow [`SECURITY.md`](SECURITY.md).
## Install
Follow the [official installation guide](https://docs.ipfs.tech/install/command-line/), or choose: [prebuilt binary](#official-prebuilt-binaries) | [Docker](#docker) | [package manager](#package-managers) | [from source](#build-from-source).
The canonical download instructions for IPFS are over at: https://docs.ipfs.tech/install/. It is **highly recommended** you follow those instructions if you are not interested in working on IPFS development.
Prefer a GUI? Try [IPFS Desktop](https://docs.ipfs.tech/install/ipfs-desktop/) and/or [IPFS Companion](https://docs.ipfs.tech/install/ipfs-companion/).
For production use, Release Docker images (below) are recommended.
### Minimal System Requirements
@ -77,148 +127,388 @@ Kubo runs on most Linux, macOS, and Windows systems. For optimal performance, we
> [!CAUTION]
> Systems with less than the recommended memory may experience instability, frequent OOM errors or restarts, and missing data announcement (reprovider window), which can make data fully or partially inaccessible to other peers. Running Kubo on underprovisioned hardware is at your own risk.
### Official Prebuilt Binaries
Download from https://dist.ipfs.tech#kubo or [GitHub Releases](https://github.com/ipfs/kubo/releases/latest).
### Docker
Official images are published at https://hub.docker.com/r/ipfs/kubo/: [![Docker Image Version (latest semver)](https://img.shields.io/docker/v/ipfs/kubo?color=blue&label=kubo%20docker%20image&logo=docker&sort=semver&style=flat-square&cacheSeconds=3600)](https://hub.docker.com/r/ipfs/kubo/)
#### 🟢 Release Images
- These are production grade images. Use them.
- `latest` and [`release`](https://hub.docker.com/r/ipfs/kubo/tags?name=release) tags always point at [the latest stable release](https://github.com/ipfs/kubo/releases/latest). If you use this, remember to `docker pull` periodically to update.
- [`vN.N.N`](https://hub.docker.com/r/ipfs/kubo/tags?name=v) points at a specific [release tag](https://github.com/ipfs/kubo/releases)
Use these for production deployments.
#### 🟠 Developer Preview Images
- These tags are used by developers for internal testing, not intended for end users or production use.
- [`master-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-latest) always points at the `HEAD` of the [`master`](https://github.com/ipfs/kubo/commits/master/) branch
- [`master-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-2) points at a specific commit from the `master` branch
- `latest` and [`release`](https://hub.docker.com/r/ipfs/kubo/tags?name=release) always point at [the latest stable release](https://github.com/ipfs/kubo/releases/latest)
- [`vN.N.N`](https://hub.docker.com/r/ipfs/kubo/tags?name=v) points at a specific [release tag](https://github.com/ipfs/kubo/releases)
#### 🔴 Internal Staging Images
- We use `staging` for testing arbitrary commits and experimental patches.
- To build image for current HEAD, force push to `staging` via `git push origin HEAD:staging --force`)
- [`staging-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-latest) always points at the `HEAD` of the [`staging`](https://github.com/ipfs/kubo/commits/staging/) branch
- [`staging-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-2) points at a specific commit from the `staging` branch
```console
$ docker pull ipfs/kubo:latest
$ docker run --rm -it --net=host ipfs/kubo:latest
```
To [customize your node](https://docs.ipfs.tech/install/run-ipfs-inside-docker/#customizing-your-node), pass config via `-e` or mount scripts in `/container-init.d`.
To [customize your node](https://docs.ipfs.tech/install/run-ipfs-inside-docker/#customizing-your-node),
pass necessary config via `-e` or by mounting scripts in the `/container-init.d`.
#### 🟠 Developer Preview Images
Learn more at https://docs.ipfs.tech/install/run-ipfs-inside-docker/
For internal testing, not intended for production.
### Official prebuilt binaries
- [`master-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-latest) points at `HEAD` of [`master`](https://github.com/ipfs/kubo/commits/master/)
- [`master-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-2) points at a specific commit
The official binaries are published at https://dist.ipfs.tech#kubo:
#### 🔴 Internal Staging Images
[![dist.ipfs.tech Downloads](https://img.shields.io/github/v/release/ipfs/kubo?label=dist.ipfs.tech&logo=ipfs&style=flat-square&cacheSeconds=3600)](https://dist.ipfs.tech#kubo)
For testing arbitrary commits and experimental patches (force push to `staging` branch).
From there:
- Click the blue "Download Kubo" on the right side of the page.
- Open/extract the archive.
- Move kubo (`ipfs`) to your path (`install.sh` can do it for you).
- [`staging-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-latest) points at `HEAD` of [`staging`](https://github.com/ipfs/kubo/commits/staging/)
- [`staging-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-2) points at a specific commit
If you are unable to access [dist.ipfs.tech](https://dist.ipfs.tech#kubo), you can also download kubo from:
- this project's GitHub [releases](https://github.com/ipfs/kubo/releases/latest) page
- `/ipns/dist.ipfs.tech` at [dweb.link](https://dweb.link/ipns/dist.ipfs.tech#kubo) gateway
#### Updating
##### Downloading builds using IPFS
List the available versions of Kubo implementation:
```console
$ ipfs cat /ipns/dist.ipfs.tech/kubo/versions
```
Then, to view available builds for a version from the previous command (`$VERSION`):
```console
$ ipfs ls /ipns/dist.ipfs.tech/kubo/$VERSION
```
To download a given build of a version:
```console
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_darwin-amd64.tar.gz # darwin amd64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_darwin-arm64.tar.gz # darwin arm64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_freebsd-amd64.tar.gz # freebsd amd64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-amd64.tar.gz # linux amd64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-riscv64.tar.gz # linux riscv64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-arm64.tar.gz # linux arm64 build
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_windows-amd64.zip # windows amd64 build
```
### Unofficial Linux packages
<a href="https://repology.org/project/kubo/versions">
<img src="https://repology.org/badge/vertical-allrepos/kubo.svg" alt="Packaging status" align="right">
</a>
- [ArchLinux](#arch-linux)
- [Gentoo Linux](#gentoo-linux)
- [Nix](#nix-linux)
- [Solus](#solus)
- [openSUSE](#opensuse)
- [Guix](#guix)
- [Snap](#snap)
- [Ubuntu PPA](#ubuntu-ppa)
- [Fedora](#fedora-copr)
#### Arch Linux
[![kubo via Community Repo](https://img.shields.io/archlinux/v/community/x86_64/kubo?color=1793d1&label=kubo&logo=arch-linux&style=flat-square&cacheSeconds=3600)](https://wiki.archlinux.org/title/IPFS)
```bash
# pacman -S kubo
```
[![kubo-git via AUR](https://img.shields.io/static/v1?label=kubo-git&message=latest%40master&color=1793d1&logo=arch-linux&style=flat-square&cacheSeconds=3600)](https://archlinux.org/packages/kubo/)
#### <a name="gentoo-linux">Gentoo Linux</a>
https://wiki.gentoo.org/wiki/Kubo
```bash
# emerge -a net-p2p/kubo
```
https://packages.gentoo.org/packages/net-p2p/kubo
#### <a name="nix-linux">Nix</a>
With the purely functional package manager [Nix](https://nixos.org/nix/) you can install kubo like this:
```
$ nix-env -i kubo
```
You can also install the Package by using its attribute name, which is also `kubo`.
#### Solus
[Package for Solus](https://dev.getsol.us/source/kubo/repository/master/)
```
$ sudo eopkg install kubo
```
You can also install it through the Solus software center.
#### openSUSE
[Community Package for kubo](https://software.opensuse.org/package/kubo)
#### Guix
[Community Package for kubo](https://packages.guix.gnu.org/search/?query=kubo) is available.
#### Snap
No longer supported, see rationale in [kubo#8688](https://github.com/ipfs/kubo/issues/8688).
#### Ubuntu PPA
[PPA homepage](https://launchpad.net/~twdragon/+archive/ubuntu/ipfs) on Launchpad.
##### Latest Ubuntu (>= 20.04 LTS)
```sh
sudo add-apt-repository ppa:twdragon/ipfs
sudo apt update
sudo apt install ipfs-kubo
```
### Fedora COPR
[`taw00/ipfs-rpm`](https://github.com/taw00/ipfs-rpm)
##### Any Ubuntu version
```sh
sudo su
echo 'deb https://ppa.launchpadcontent.net/twdragon/ipfs/ubuntu <<DISTRO>> main' >> /etc/apt/sources.list.d/ipfs
echo 'deb-src https://ppa.launchpadcontent.net/twdragon/ipfs/ubuntu <<DISTRO>> main' >> /etc/apt/sources.list.d/ipfs
exit
sudo apt update
sudo apt install ipfs-kubo
```
where `<<DISTRO>>` is the codename of your Ubuntu distribution (for example, `jammy` for 22.04 LTS). During the first installation the package maintenance script may automatically ask you about which networking profile, CPU accounting model, and/or existing node configuration file you want to use.
**NOTE**: this method also may work with any compatible Debian-based distro which has `libc6` inside, and APT as a package manager.
### Unofficial Windows packages
- [Chocolatey](#chocolatey)
- [Scoop](#scoop)
#### Chocolatey
No longer supported, see rationale in [kubo#9341](https://github.com/ipfs/kubo/issues/9341).
#### Scoop
Scoop provides kubo as `kubo` in its 'extras' bucket.
```Powershell
PS> scoop bucket add extras
PS> scoop install kubo
```
### Unofficial macOS packages
- [MacPorts](#macports)
- [Nix](#nix-macos)
- [Homebrew](#homebrew)
#### MacPorts
The package [ipfs](https://ports.macports.org/port/ipfs) currently points to kubo and is being maintained.
```
$ sudo port install ipfs
```
#### <a name="nix-macos">Nix</a>
In macOS you can use the purely functional package manager [Nix](https://nixos.org/nix/):
```
$ nix-env -i kubo
```
You can also install the Package by using its attribute name, which is also `kubo`.
#### Homebrew
A Homebrew formula [ipfs](https://formulae.brew.sh/formula/ipfs) is maintained too.
```
$ brew install --formula ipfs
```
### Build from Source
![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/ipfs/kubo?label=Requires%20Go&logo=go&style=flat-square&cacheSeconds=3600)
```bash
git clone https://github.com/ipfs/kubo.git
cd kubo
make build # creates cmd/ipfs/ipfs
make install # installs to $GOPATH/bin/ipfs
kubo's build system requires Go and some standard POSIX build tools:
* GNU make
* Git
* GCC (or some other go compatible C Compiler) (optional)
To build without GCC, build with `CGO_ENABLED=0` (e.g., `make build CGO_ENABLED=0`).
#### Install Go
![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/ipfs/kubo?label=Requires%20Go&logo=go&style=flat-square&cacheSeconds=3600)
If you need to update: [Download latest version of Go](https://golang.org/dl/).
You'll need to add Go's bin directories to your `$PATH` environment variable e.g., by adding these lines to your `/etc/profile` (for a system-wide installation) or `$HOME/.profile`:
```
export PATH=$PATH:/usr/local/go/bin
export PATH=$PATH:$GOPATH/bin
```
See the [Developer Guide](docs/developer-guide.md) for details, Windows instructions, and troubleshooting.
(If you run into trouble, see the [Go install instructions](https://golang.org/doc/install)).
### Package Managers
#### Download and Compile IPFS
Kubo is available in community-maintained packages across many operating systems, Linux distributions, and package managers. See [Repology](https://repology.org/project/kubo/versions) for the full list: [![Packaging status](https://repology.org/badge/tiny-repos/kubo.svg)](https://repology.org/project/kubo/versions)
```
$ git clone https://github.com/ipfs/kubo.git
> [!WARNING]
> These packages are maintained by third-party volunteers. The IPFS Project and Kubo maintainers are not responsible for their contents or supply chain security. For increased security, [build from source](#build-from-source).
$ cd kubo
$ make install
```
#### Linux
Alternatively, you can run `make build` to build the kubo binary (storing it in `cmd/ipfs/ipfs`) without installing it.
| Distribution | Install | Version |
|--------------|---------|---------|
| Ubuntu | [PPA](https://launchpad.net/~twdragon/+archive/ubuntu/ipfs): `sudo apt install ipfs-kubo` | [![PPA: twdragon](https://img.shields.io/badge/PPA-twdragon-E95420?logo=ubuntu)](https://launchpad.net/~twdragon/+archive/ubuntu/ipfs) |
| Arch | `pacman -S kubo` | [![Arch package](https://repology.org/badge/version-for-repo/arch/kubo.svg)](https://archlinux.org/packages/extra/x86_64/kubo/) |
| Fedora | [COPR](https://copr.fedorainfracloud.org/coprs/taw/ipfs/): `dnf install kubo` | [![COPR: taw](https://img.shields.io/badge/COPR-taw-51A2DA?logo=fedora)](https://copr.fedorainfracloud.org/coprs/taw/ipfs/) |
| Nix | `nix-env -i kubo` | [![nixpkgs unstable](https://repology.org/badge/version-for-repo/nix_unstable/kubo.svg)](https://search.nixos.org/packages?query=kubo) |
| Gentoo | `emerge -a net-p2p/kubo` | [![Gentoo package](https://repology.org/badge/version-for-repo/gentoo/kubo.svg)](https://packages.gentoo.org/packages/net-p2p/kubo) |
| openSUSE | `zypper install kubo` | [![openSUSE Tumbleweed](https://repology.org/badge/version-for-repo/opensuse_tumbleweed/kubo.svg)](https://software.opensuse.org/package/kubo) |
| Solus | `sudo eopkg install kubo` | [![Solus package](https://repology.org/badge/version-for-repo/solus/kubo.svg)](https://packages.getsol.us/shannon/k/kubo/) |
| Guix | `guix install kubo` | [![Guix package](https://repology.org/badge/version-for-repo/gnuguix/kubo.svg)](https://packages.guix.gnu.org/packages/kubo/) |
| _other_ | [See Repology for the full list](https://repology.org/project/kubo/versions) | |
**NOTE:** If you get an error along the lines of "fatal error: stdlib.h: No such file or directory", you're missing a C compiler. Either re-run `make` with `CGO_ENABLED=0` or install GCC.
~~Snap~~ no longer supported ([#8688](https://github.com/ipfs/kubo/issues/8688))
##### Cross Compiling
#### macOS
Compiling for a different platform is as simple as running:
| Manager | Install | Version |
|---------|---------|---------|
| Homebrew | `brew install ipfs` | [![Homebrew](https://repology.org/badge/version-for-repo/homebrew/kubo.svg)](https://formulae.brew.sh/formula/ipfs) |
| MacPorts | `sudo port install ipfs` | [![MacPorts](https://repology.org/badge/version-for-repo/macports/kubo.svg)](https://ports.macports.org/port/ipfs/) |
| Nix | `nix-env -i kubo` | [![nixpkgs unstable](https://repology.org/badge/version-for-repo/nix_unstable/kubo.svg)](https://search.nixos.org/packages?query=kubo) |
| _other_ | [See Repology for the full list](https://repology.org/project/kubo/versions) | |
```
make build GOOS=myTargetOS GOARCH=myTargetArchitecture
```
#### Windows
#### Troubleshooting
| Manager | Install | Version |
|---------|---------|---------|
| Scoop | `scoop install kubo` | [![Scoop](https://repology.org/badge/version-for-repo/scoop/kubo.svg)](https://scoop.sh/#/apps?q=kubo) |
| _other_ | [See Repology for the full list](https://repology.org/project/kubo/versions) | |
- Separate [instructions are available for building on Windows](docs/windows.md).
- `git` is required in order for `go get` to fetch all dependencies.
- Package managers often contain out-of-date `golang` packages.
Ensure that `go version` reports the minimum version required (see go.mod). See above for how to install go.
- If you are interested in development, please install the development
dependencies as well.
- Shell command completions can be generated with one of the `ipfs commands completion` subcommands. Read [docs/command-completion.md](docs/command-completion.md) to learn more.
- See the [misc folder](https://github.com/ipfs/kubo/tree/master/misc) for how to connect IPFS to systemd or whatever init system your distro uses.
~~Chocolatey~~ no longer supported ([#9341](https://github.com/ipfs/kubo/issues/9341))
## Getting Started
## Documentation
### Usage
| Topic | Description |
|-------|-------------|
| [Configuration](docs/config.md) | All config options reference |
| [Environment variables](docs/environment-variables.md) | Runtime settings via env vars |
| [Experimental features](docs/experimental-features.md) | Opt-in features in development |
| [HTTP Gateway](docs/gateway.md) | Path, subdomain, and trustless gateway setup |
| [HTTP RPC clients](docs/http-rpc-clients.md) | Client libraries for Go, JS |
| [Delegated routing](docs/delegated-routing.md) | Multi-router and HTTP routing |
| [Metrics & monitoring](docs/metrics.md) | Prometheus metrics |
| [Content blocking](docs/content-blocking.md) | Denylist for public nodes |
| [Customizing](docs/customizing.md) | Unsure if use Plugins, Boxo, or fork? |
| [Debug guide](docs/debug-guide.md) | CPU profiles, memory analysis, tracing |
| [Changelogs](docs/changelogs/) | Release notes for each version |
| [All documentation](https://github.com/ipfs/kubo/tree/master/docs) | Full list of docs |
[![docs: Command-line quick start](https://img.shields.io/static/v1?label=docs&message=Command-line%20quick%20start&color=blue&style=flat-square&cacheSeconds=3600)](https://docs.ipfs.tech/how-to/command-line-quick-start/)
[![docs: Command-line reference](https://img.shields.io/static/v1?label=docs&message=Command-line%20reference&color=blue&style=flat-square&cacheSeconds=3600)](https://docs.ipfs.tech/reference/kubo/cli/)
To start using IPFS, you must first initialize IPFS's config files on your
system, this is done with `ipfs init`. See `ipfs init --help` for information on
the optional arguments it takes. After initialization is complete, you can use
`ipfs mount`, `ipfs add` and any of the other commands to explore!
For detailed configuration options, see [docs/config.md](https://github.com/ipfs/kubo/blob/master/docs/config.md).
### Some things to try
Basic proof of 'ipfs working' locally:
echo "hello world" > hello
ipfs add hello
# This should output a hash string that looks something like:
# QmT78zSuBmuS4z925WZfrqQ1qHaJ56DQaTfyMUF7F8ff5o
ipfs cat <that hash>
### HTTP/RPC clients
For programmatic interaction with Kubo, see our [list of HTTP/RPC clients](docs/http-rpc-clients.md).
### Troubleshooting
If you have previously installed IPFS before and you are running into problems getting a newer version to work, try deleting (or backing up somewhere else) your IPFS config directory (~/.ipfs by default) and rerunning `ipfs init`. This will reinitialize the config file to its defaults and clear out the local datastore of any bad entries.
For more information about configuration options, see [docs/config.md](https://github.com/ipfs/kubo/blob/master/docs/config.md).
Please direct general questions and help requests to our [forums](https://discuss.ipfs.tech).
If you believe you've found a bug, check the [issues list](https://github.com/ipfs/kubo/issues) and, if you don't see your problem there, either come talk to us on [Matrix chat](https://docs.ipfs.tech/community/chat/), or file an issue of your own!
## Packages
See [IPFS in GO](https://docs.ipfs.tech/reference/go/api/) documentation.
## Development
See the [Developer Guide](docs/developer-guide.md) for build instructions, testing, and contribution workflow. AI coding agents should follow [AGENTS.md](AGENTS.md).
Some places to get you started on the codebase:
## Getting Help
- Main file: [./cmd/ipfs/main.go](https://github.com/ipfs/kubo/blob/master/cmd/ipfs/main.go)
- CLI Commands: [./core/commands/](https://github.com/ipfs/kubo/tree/master/core/commands)
- Bitswap (the data trading engine): [go-bitswap](https://github.com/ipfs/go-bitswap)
- libp2p
- libp2p: https://github.com/libp2p/go-libp2p
- DHT: https://github.com/libp2p/go-libp2p-kad-dht
- [IPFS : The `Add` command demystified](https://github.com/ipfs/kubo/tree/master/docs/add-code-flow.md)
- [IPFS Forum](https://discuss.ipfs.tech) - community support, questions, and discussion
- [Community](https://docs.ipfs.tech/community/) - chat, events, and working groups
- [GitHub Issues](https://github.com/ipfs/kubo/issues) - bug reports for Kubo specifically
- [IPFS Docs Issues](https://github.com/ipfs/ipfs-docs/issues) - documentation issues
### Map of Implemented Subsystems
**WIP**: This is a high-level architecture diagram of the various sub-systems of this specific implementation. To be updated with how they interact. Anyone who has suggestions is welcome to comment [here](https://docs.google.com/drawings/d/1OVpBT2q-NtSJqlPX3buvjYhOnWfdzb85YEsM_njesME/edit) on how we can improve this!
<img src="https://docs.google.com/drawings/d/e/2PACX-1vS_n1FvSu6mdmSirkBrIIEib2gqhgtatD9awaP2_WdrGN4zTNeg620XQd9P95WT-IvognSxIIdCM5uE/pub?w=1446&amp;h=1036">
## Security Issues
### CLI, HTTP-API, Architecture Diagram
![](./docs/cli-http-api-core-diagram.png)
> [Origin](https://github.com/ipfs/pm/pull/678#discussion_r210410924)
Description: Dotted means "likely going away". The "Legacy" parts are thin wrappers around some commands to translate between the new system and the old system. The grayed-out parts on the "daemon" diagram are there to show that the code is all the same, it's just that we turn some pieces on and some pieces off depending on whether we're running on the client or the server.
### Testing
```
make test
```
### Development Dependencies
If you make changes to the protocol buffers, you will need to install the [protoc compiler](https://github.com/google/protobuf).
### Developer Notes
Find more documentation for developers on [docs](./docs)
## Maintainer Info
Kubo is maintained by [Shipyard](https://ipshipyard.com/).
* This repository is part of [Shipyard's GO Triage triage](https://ipshipyard.notion.site/IPFS-Go-Triage-Boxo-Kubo-Rainbow-0ddee6b7f28d412da7dabe4f9107c29a).
* [Release Process](https://ipshipyard.notion.site/Kubo-Release-Process-6dba4f5755c9458ab5685eeb28173778)
See [`SECURITY.md`](SECURITY.md).
## Contributing
[![](https://cdn.rawgit.com/jbenet/contribute-ipfs-gif/master/img/contribute.gif)](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md)
We welcome contributions. See [CONTRIBUTING.md](CONTRIBUTING.md) and the [Developer Guide](docs/developer-guide.md).
We ❤️ all [our contributors](docs/AUTHORS); this project wouldnt be what it is without you! If you want to help out, please see [CONTRIBUTING.md](CONTRIBUTING.md).
This repository follows the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md).
This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md).
## Maintainer Info
Members of IPFS community provide Kubo support on [discussion forum category here](https://discuss.ipfs.tech/c/help/help-kubo/23).
<a href="https://ipshipyard.com/"><img align="right" src="https://github.com/user-attachments/assets/39ed3504-bb71-47f6-9bf8-cb9a1698f272" /></a>
> [!NOTE]
> Kubo is maintained by the [Shipyard](https://ipshipyard.com/) team.
>
> [Release Process](https://ipshipyard.notion.site/Kubo-Release-Process-6dba4f5755c9458ab5685eeb28173778)
Need help with IPFS itself? Learn where to get help and support at https://ipfs.tech/help.
## License
Dual-licensed under Apache 2.0 and MIT:
This project is dual-licensed under Apache 2.0 and MIT terms:
- [LICENSE-APACHE](LICENSE-APACHE)
- [LICENSE-MIT](LICENSE-MIT)
- Apache License, Version 2.0, ([LICENSE-APACHE](https://github.com/ipfs/kubo/blob/master/LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
- MIT license ([LICENSE-MIT](https://github.com/ipfs/kubo/blob/master/LICENSE-MIT) or http://opensource.org/licenses/MIT)

View File

@ -134,14 +134,15 @@ help:
@echo ''
@echo 'TESTING TARGETS:'
@echo ''
@echo ' test - Run all tests (test_go_fmt, test_unit, test_cli, test_sharness)'
@echo ' test_short - Run fast tests (test_go_fmt, test_unit)'
@echo ' test_unit - Run unit tests with coverage (excludes test/cli)'
@echo ' test_cli - Run CLI integration tests (requires built binary)'
@echo ' test_go_fmt - Check Go source formatting'
@echo ' test - Run all tests'
@echo ' test_short - Run short go tests and short sharness tests'
@echo ' test_go_short - Run short go tests'
@echo ' test_go_test - Run all go tests'
@echo ' test_go_build - Build kubo for all platforms from .github/build-platforms.yml'
@echo ' test_go_lint - Run golangci-lint'
@echo ' test_go_expensive - Run all go tests and build all platforms'
@echo ' test_go_race - Run go tests with the race detector enabled'
@echo ' test_go_lint - Run the `golangci-lint` vetting tool'
@echo ' test_sharness - Run sharness tests'
@echo ' coverage - Collect coverage info from unit tests and sharness'
@echo ' coverage - Collects coverage info from unit tests and sharness'
@echo
.PHONY: help

View File

@ -50,6 +50,6 @@ else
unset IPFS_SWARM_KEY_FILE
fi
find /container-init.d -maxdepth 1 \( -type f -o -type l \) -iname '*.sh' -print0 | sort -z | xargs -n 1 -0 -r container_init_run
find /container-init.d -maxdepth 1 -type f -iname '*.sh' -print0 | sort -z | xargs -n 1 -0 -r container_init_run
exec ipfs "$@"

View File

@ -41,7 +41,7 @@ if [[ $GIT_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+-rc ]]; then
elif [[ $GIT_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echoImageName "$GIT_TAG"
echoImageName "latest"
echoImageName "release" # see: https://github.com/ipfs/kubo/issues/3999#issuecomment-742228981
echoImageName "release" # see: https://github.com/ipfs/go-ipfs/issues/3999#issuecomment-742228981
elif [[ $GIT_BRANCH =~ ^bifrost-.* ]]; then
# sanitize the branch name since docker tags have stricter char limits than git branch names

View File

@ -1,19 +1,10 @@
#!/bin/bash
#!/bin/zsh
#
# Invocation: mkreleaselog [FIRST_REF [LAST_REF]]
#
# Generates release notes with contributor statistics, deduplicating by GitHub handle.
# GitHub handles are resolved from:
# 1. GitHub noreply emails (user@users.noreply.github.com)
# 2. Merge commit messages (Merge pull request #N from user/branch)
# 3. GitHub API via gh CLI (for squash merges)
#
# Results are cached in ~/.cache/mkreleaselog/github-handles.json
set -euo pipefail
export GO111MODULE=on
GOPATH="$(go env GOPATH)"
export GOPATH
export GOPATH="$(go env GOPATH)"
# List of PCRE regular expressions to match "included" modules.
INCLUDE_MODULES=(
@ -24,15 +15,10 @@ INCLUDE_MODULES=(
"^github.com/multiformats/"
"^github.com/filecoin-project/"
"^github.com/ipfs-shipyard/"
"^github.com/ipshipyard/"
"^github.com/probe-lab/"
# Authors of personal modules used by go-ipfs that should be mentioned in the
# release notes.
"^github.com/whyrusleeping/"
"^github.com/gammazero/"
"^github.com/Jorropo/"
"^github.com/guillaumemichel/"
"^github.com/Kubuxu/"
"^github.com/jbenet/"
"^github.com/Stebalien/"
@ -62,348 +48,15 @@ IGNORE_FILES=(
)
##########################################################################################
# GitHub Handle Resolution Infrastructure
##########################################################################################
# Cache location following XDG spec
GITHUB_CACHE_DIR="${XDG_CACHE_HOME:-$HOME/.cache}/mkreleaselog"
GITHUB_CACHE_FILE="$GITHUB_CACHE_DIR/github-handles.json"
# Timeout for gh CLI commands (seconds)
GH_TIMEOUT=10
# Associative array for email -> github handle mapping (runtime cache)
declare -A EMAIL_TO_GITHUB
# Check if gh CLI is available and authenticated
gh_available() {
command -v gh >/dev/null 2>&1 && gh auth status >/dev/null 2>&1
}
# Load cached email -> github handle mappings from disk
load_github_cache() {
EMAIL_TO_GITHUB=()
if [[ ! -f "$GITHUB_CACHE_FILE" ]]; then
return 0
fi
# Validate JSON before loading
if ! jq -e '.' "$GITHUB_CACHE_FILE" >/dev/null 2>&1; then
msg "Warning: corrupted cache file, ignoring"
return 0
fi
local email handle
while IFS=$'\t' read -r email handle; do
# Validate handle format (alphanumeric, hyphens, max 39 chars)
if [[ -n "$email" && -n "$handle" && "$handle" =~ ^[a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?$ ]]; then
EMAIL_TO_GITHUB["$email"]="$handle"
fi
done < <(jq -r 'to_entries[] | "\(.key)\t\(.value)"' "$GITHUB_CACHE_FILE" 2>/dev/null)
msg "Loaded ${#EMAIL_TO_GITHUB[@]} cached GitHub handle mappings"
}
# Save email -> github handle mappings to disk (atomic write)
save_github_cache() {
if [[ ${#EMAIL_TO_GITHUB[@]} -eq 0 ]]; then
return 0
fi
mkdir -p "$GITHUB_CACHE_DIR"
local tmp_file
tmp_file="$(mktemp "$GITHUB_CACHE_DIR/cache.XXXXXX")" || return 1
# Build JSON from associative array
{
echo "{"
local first=true
local key
for key in "${!EMAIL_TO_GITHUB[@]}"; do
if [[ "$first" == "true" ]]; then
first=false
else
echo ","
fi
# Escape special characters in email for JSON
printf ' %s: %s' "$(jq -n --arg e "$key" '$e')" "$(jq -n --arg h "${EMAIL_TO_GITHUB[$key]}" '$h')"
done
echo
echo "}"
} > "$tmp_file"
# Validate before replacing
if jq -e '.' "$tmp_file" >/dev/null 2>&1; then
mv "$tmp_file" "$GITHUB_CACHE_FILE"
msg "Saved ${#EMAIL_TO_GITHUB[@]} GitHub handle mappings to cache"
else
rm -f "$tmp_file"
msg "Warning: failed to save cache (invalid JSON)"
fi
}
# Extract GitHub handle from email if it's a GitHub noreply address
# Handles: user@users.noreply.github.com and 12345678+user@users.noreply.github.com
extract_handle_from_noreply() {
local email="$1"
if [[ "$email" =~ ^([0-9]+\+)?([a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?)@users\.noreply\.github\.com$ ]]; then
echo "${BASH_REMATCH[2]}"
return 0
fi
return 1
}
# Extract GitHub handle from merge commit subject
# Handles: "Merge pull request #123 from username/branch"
extract_handle_from_merge_commit() {
local subject="$1"
if [[ "$subject" =~ ^Merge\ pull\ request\ \#[0-9]+\ from\ ([a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?)/.*$ ]]; then
echo "${BASH_REMATCH[1]}"
return 0
fi
return 1
}
# Extract PR number from commit subject
# Handles: "Subject (#123)" and "Merge pull request #123 from"
extract_pr_number() {
local subject="$1"
if [[ "$subject" =~ \(#([0-9]+)\)$ ]]; then
echo "${BASH_REMATCH[1]}"
return 0
elif [[ "$subject" =~ ^Merge\ pull\ request\ \#([0-9]+)\ from ]]; then
echo "${BASH_REMATCH[1]}"
return 0
fi
return 1
}
# Query GitHub API for PR author (with timeout and error handling)
query_pr_author() {
local gh_repo="$1" # e.g., "ipfs/kubo"
local pr_num="$2"
if ! gh_available; then
return 1
fi
local handle
handle="$(timeout "$GH_TIMEOUT" gh pr view "$pr_num" --repo "$gh_repo" --json author -q '.author.login' 2>/dev/null)" || return 1
# Validate handle format
if [[ -n "$handle" && "$handle" =~ ^[a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?$ ]]; then
echo "$handle"
return 0
fi
return 1
}
# Query GitHub API for commit author (fallback when no PR available)
query_commit_author() {
local gh_repo="$1" # e.g., "ipfs/kubo"
local commit_sha="$2"
if ! gh_available; then
return 1
fi
local handle
handle="$(timeout "$GH_TIMEOUT" gh api "/repos/$gh_repo/commits/$commit_sha" --jq '.author.login // empty' 2>/dev/null)" || return 1
# Validate handle format
if [[ -n "$handle" && "$handle" =~ ^[a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?$ ]]; then
echo "$handle"
return 0
fi
return 1
}
# Resolve email to GitHub handle using all available methods
# Args: email, commit_hash (optional), repo_dir (optional), gh_repo (optional)
resolve_github_handle() {
local email="$1"
local commit="${2:-}"
local repo_dir="${3:-}"
local gh_repo="${4:-}"
# Skip empty emails
[[ -z "$email" ]] && return 1
# Check runtime cache first
if [[ -n "${EMAIL_TO_GITHUB[$email]:-}" ]]; then
echo "${EMAIL_TO_GITHUB[$email]}"
return 0
fi
local handle=""
# Method 1: Extract from noreply email
if handle="$(extract_handle_from_noreply "$email")"; then
EMAIL_TO_GITHUB["$email"]="$handle"
echo "$handle"
return 0
fi
# Method 2: Look at commit message for merge commit pattern
if [[ -n "$commit" && -n "$repo_dir" ]]; then
local subject
subject="$(git -C "$repo_dir" log -1 --format='%s' "$commit" 2>/dev/null)" || true
if [[ -n "$subject" ]]; then
if handle="$(extract_handle_from_merge_commit "$subject")"; then
EMAIL_TO_GITHUB["$email"]="$handle"
echo "$handle"
return 0
fi
# Method 3: Query GitHub API for PR author
if [[ -n "$gh_repo" ]]; then
local pr_num
if pr_num="$(extract_pr_number "$subject")"; then
if handle="$(query_pr_author "$gh_repo" "$pr_num")"; then
EMAIL_TO_GITHUB["$email"]="$handle"
echo "$handle"
return 0
fi
fi
fi
fi
fi
return 1
}
# Build GitHub handle mappings for all commits in a range
# This does a single pass to collect PR numbers, then batch queries them
build_github_mappings() {
local module="$1"
local start="$2"
local end="${3:-HEAD}"
local repo
repo="$(strip_version "$module")"
local dir
local gh_repo=""
if [[ "$module" == "github.com/ipfs/kubo" ]]; then
dir="$ROOT_DIR"
else
dir="$GOPATH/src/$repo"
fi
# Extract gh_repo for API calls (e.g., "ipfs/kubo" from "github.com/ipfs/kubo")
if [[ "$repo" =~ ^github\.com/(.+)$ ]]; then
gh_repo="${BASH_REMATCH[1]}"
fi
msg "Building GitHub handle mappings for $module..."
# Collect all unique emails and their commit context
declare -A email_commits=()
local hash email subject
while IFS=$'\t' read -r hash email subject; do
[[ -z "$email" ]] && continue
# Skip if already resolved
[[ -n "${EMAIL_TO_GITHUB[$email]:-}" ]] && continue
# Try to resolve without API first
local handle=""
# Method 1: noreply email
if handle="$(extract_handle_from_noreply "$email")"; then
EMAIL_TO_GITHUB["$email"]="$handle"
continue
fi
# Method 2: merge commit message
if handle="$(extract_handle_from_merge_commit "$subject")"; then
EMAIL_TO_GITHUB["$email"]="$handle"
continue
fi
# Store for potential API lookup
if [[ -z "${email_commits[$email]:-}" ]]; then
email_commits["$email"]="$hash"
fi
done < <(git -C "$dir" log --format='tformat:%H%x09%aE%x09%s' --no-merges "$start..$end" 2>/dev/null)
# API batch lookup for remaining emails (if gh is available)
if gh_available && [[ -n "$gh_repo" && ${#email_commits[@]} -gt 0 ]]; then
msg "Querying GitHub API for ${#email_commits[@]} unknown contributors..."
local key
for key in "${!email_commits[@]}"; do
# Skip if already resolved
[[ -n "${EMAIL_TO_GITHUB[$key]:-}" ]] && continue
local commit_hash="${email_commits[$key]}"
local subj handle
subj="$(git -C "$dir" log -1 --format='%s' "$commit_hash" 2>/dev/null)" || true
# Try PR author lookup first (cheaper API call)
local pr_num
if pr_num="$(extract_pr_number "$subj")"; then
if handle="$(query_pr_author "$gh_repo" "$pr_num")"; then
EMAIL_TO_GITHUB["$key"]="$handle"
continue
fi
fi
# Fallback: commit author API (works for any commit)
if handle="$(query_commit_author "$gh_repo" "$commit_hash")"; then
EMAIL_TO_GITHUB["$key"]="$handle"
fi
done
fi
}
##########################################################################################
# Original infrastructure with modifications
##########################################################################################
build_include_regex() {
local result=""
local mod
for mod in "${INCLUDE_MODULES[@]}"; do
if [[ -n "$result" ]]; then
result="$result|$mod"
else
result="$mod"
fi
done
echo "($result)"
}
build_exclude_regex() {
local result=""
local mod
for mod in "${EXCLUDE_MODULES[@]}"; do
if [[ -n "$result" ]]; then
result="$result|$mod"
else
result="$mod"
fi
done
if [[ -n "$result" ]]; then
echo "($result)"
else
echo '$^' # match nothing
fi
}
if [[ ${#INCLUDE_MODULES[@]} -gt 0 ]]; then
INCLUDE_REGEX="$(build_include_regex)"
INCLUDE_REGEX="(${$(printf "|%s" "${INCLUDE_MODULES[@]}"):1})"
else
INCLUDE_REGEX="" # "match anything"
fi
if [[ ${#EXCLUDE_MODULES[@]} -gt 0 ]]; then
EXCLUDE_REGEX="$(build_exclude_regex)"
EXCLUDE_REGEX="(${$(printf "|%s" "${EXCLUDE_MODULES[@]}"):1})"
else
EXCLUDE_REGEX='$^' # "match nothing"
fi
@ -418,6 +71,8 @@ NL=$'\n'
ROOT_DIR="$(git rev-parse --show-toplevel)"
alias jq="jq --unbuffered"
msg() {
echo "$*" >&2
}
@ -425,21 +80,11 @@ msg() {
statlog() {
local module="$1"
local rpath
local gh_repo=""
if [[ "$module" == "github.com/ipfs/kubo" ]]; then
rpath="$ROOT_DIR"
else
rpath="$GOPATH/src/$(strip_version "$module")"
fi
# Extract gh_repo for API calls
local repo
repo="$(strip_version "$module")"
if [[ "$repo" =~ ^github\.com/(.+)$ ]]; then
gh_repo="${BASH_REMATCH[1]}"
fi
local start="${2:-}"
local end="${3:-HEAD}"
local mailmap_file="$rpath/.mailmap"
@ -448,21 +93,18 @@ statlog() {
fi
local stack=()
local line
while read -r line; do
git -C "$rpath" -c mailmap.file="$mailmap_file" log --use-mailmap --shortstat --no-merges --pretty="tformat:%H%x09%aN%x09%aE" "$start..$end" -- . "${IGNORE_FILES_PATHSPEC[@]}" | while read -r line; do
if [[ -n "$line" ]]; then
stack+=("$line")
continue
fi
local changes
read -r changes
local changed=0
local insertions=0
local deletions=0
local count event
while read -r count event; do
changed=0
insertions=0
deletions=0
while read count event; do
if [[ "$event" =~ ^file ]]; then
changed=$count
elif [[ "$event" =~ ^insertion ]]; then
@ -475,32 +117,27 @@ statlog() {
fi
done<<<"${changes//,/$NL}"
local author
for author in "${stack[@]}"; do
local hash name email
IFS=$'\t' read -r hash name email <<<"$author"
# Resolve GitHub handle
local github_handle=""
github_handle="$(resolve_github_handle "$email" "$hash" "$rpath" "$gh_repo")" || true
jq -n \
--arg "hash" "$hash" \
--arg "name" "$name" \
--arg "email" "$email" \
--arg "github" "$github_handle" \
--argjson "changed" "$changed" \
--argjson "insertions" "$insertions" \
--argjson "deletions" "$deletions" \
'{Commit: $hash, Author: $name, Email: $email, GitHub: $github, Files: $changed, Insertions: $insertions, Deletions: $deletions}'
'{Commit: $hash, Author: $name, Email: $email, Files: $changed, Insertions: $insertions, Deletions: $deletions}'
done
stack=()
done < <(git -C "$rpath" -c mailmap.file="$mailmap_file" log --use-mailmap --shortstat --no-merges --pretty="tformat:%H%x09%aN%x09%aE" "$start..$end" -- . "${IGNORE_FILES_PATHSPEC[@]}")
done
}
# Returns a stream of deps changed between $1 and $2.
dep_changes() {
cat "$1" "$2" | jq -s 'JOIN(INDEX(.[0][]; .Path); .[1][]; .Path; {Path: .[0].Path, Old: (.[1] | del(.Path)), New: (.[0] | del(.Path))}) | select(.New.Version != .Old.Version)'
{
<"$1"
<"$2"
} | jq -s 'JOIN(INDEX(.[0][]; .Path); .[1][]; .Path; {Path: .[0].Path, Old: (.[1] | del(.Path)), New: (.[0] | del(.Path))}) | select(.New.Version != .Old.Version)'
}
# resolve_commits resolves a git ref for each version.
@ -528,11 +165,12 @@ ignored_commit() {
# Generate a release log for a range of commits in a single repo.
release_log() {
setopt local_options BASH_REMATCH
local module="$1"
local start="$2"
local end="${3:-HEAD}"
local repo
repo="$(strip_version "$1")"
local repo="$(strip_version "$1")"
local dir
if [[ "$module" == "github.com/ipfs/kubo" ]]; then
dir="$ROOT_DIR"
@ -540,25 +178,28 @@ release_log() {
dir="$GOPATH/src/$repo"
fi
local commit subject
while read -r commit subject; do
# Skip commits that only touch ignored files.
if ignored_commit "$dir" "$commit"; then
continue
fi
local commit pr
git -C "$dir" log \
--format='tformat:%H %s' \
--first-parent \
"$start..$end" |
while read commit subject; do
# Skip commits that only touch ignored files.
if ignored_commit "$dir" "$commit"; then
continue
fi
if [[ "$subject" =~ ^Merge\ pull\ request\ \#([0-9]+)\ from ]]; then
local prnum="${BASH_REMATCH[1]}"
local desc
desc="$(git -C "$dir" show --summary --format='tformat:%b' "$commit" | head -1)"
printf -- "- %s (%s)\n" "$desc" "$(pr_link "$repo" "$prnum")"
elif [[ "$subject" =~ \(#([0-9]+)\)$ ]]; then
local prnum="${BASH_REMATCH[1]}"
printf -- "- %s (%s)\n" "$subject" "$(pr_link "$repo" "$prnum")"
else
printf -- "- %s\n" "$subject"
fi
done < <(git -C "$dir" log --format='tformat:%H %s' --first-parent "$start..$end")
if [[ "$subject" =~ '^Merge pull request #([0-9]+) from' ]]; then
local prnum="${BASH_REMATCH[2]}"
local desc="$(git -C "$dir" show --summary --format='tformat:%b' "$commit" | head -1)"
printf -- "- %s (%s)\n" "$desc" "$(pr_link "$repo" "$prnum")"
elif [[ "$subject" =~ '\(#([0-9]+)\)$' ]]; then
local prnum="${BASH_REMATCH[2]}"
printf -- "- %s (%s)\n" "$subject" "$(pr_link "$repo" "$prnum")"
else
printf -- "- %s\n" "$subject"
fi
done
}
indent() {
@ -570,8 +211,7 @@ mod_deps() {
}
ensure() {
local repo
repo="$(strip_version "$1")"
local repo="$(strip_version "$1")"
local commit="$2"
local rpath
if [[ "$1" == "github.com/ipfs/kubo" ]]; then
@ -592,27 +232,14 @@ ensure() {
git -C "$rpath" rev-parse --verify "$commit" >/dev/null || return 1
}
# Summarize stats, grouping by GitHub handle (with fallback to email for dedup)
statsummary() {
jq -s '
# Group by GitHub handle if available, otherwise by email
group_by(if .GitHub != "" then .GitHub else .Email end)[] |
{
# Use first non-empty GitHub handle, or fall back to Author name
Author: .[0].Author,
GitHub: (map(select(.GitHub != "")) | .[0].GitHub // ""),
Email: .[0].Email,
Commits: (. | length),
Insertions: (map(.Insertions) | add),
Deletions: (map(.Deletions) | add),
Files: (map(.Files) | add)
}
' | jq '. + {Lines: (.Deletions + .Insertions)}'
jq -s 'group_by(.Author)[] | {Author: .[0].Author, Commits: (. | length), Insertions: (map(.Insertions) | add), Deletions: (map(.Deletions) | add), Files: (map(.Files) | add)}' |
jq '. + {Lines: (.Deletions + .Insertions)}'
}
strip_version() {
local repo="$1"
if [[ "$repo" =~ .*/v[0-9]+$ ]]; then
if [[ "$repo" =~ '.*/v[0-9]+$' ]]; then
repo="$(dirname "$repo")"
fi
echo "$repo"
@ -621,24 +248,16 @@ strip_version() {
recursive_release_log() {
local start="${1:-$(git tag -l | sort -V | grep -v -- '-rc' | grep 'v'| tail -n1)}"
local end="${2:-$(git rev-parse HEAD)}"
local repo_root
repo_root="$(git rev-parse --show-toplevel)"
local module
module="$(go list -m)"
local dir
dir="$(go list -m -f '{{.Dir}}')"
# Load cached GitHub handle mappings
load_github_cache
local repo_root="$(git rev-parse --show-toplevel)"
local module="$(go list -m)"
local dir="$(go list -m -f '{{.Dir}}')"
# Kubo can be run from any directory, dependencies still use GOPATH
(
local result=0
local workspace
workspace="$(mktemp -d)"
# shellcheck disable=SC2064
trap "rm -rf '$workspace'" INT TERM EXIT
local workspace="$(mktemp -d)"
trap "$(printf 'rm -rf "%q"' "$workspace")" INT TERM EXIT
cd "$workspace"
echo "Computing old deps..." >&2
@ -653,9 +272,6 @@ recursive_release_log() {
printf -- "Generating Changelog for %s %s..%s\n" "$module" "$start" "$end" >&2
# Pre-build GitHub mappings for main module
build_github_mappings "$module" "$start" "$end"
echo "### 📝 Changelog"
echo
echo "<details><summary>Full Changelog</summary>"
@ -666,26 +282,24 @@ recursive_release_log() {
statlog "$module" "$start" "$end" > statlog.json
local dep_module new new_ref old old_ref
while read -r dep_module new new_ref old old_ref; do
if ! ensure "$dep_module" "$new_ref"; then
result=1
local changelog="failed to fetch repo"
else
# Pre-build GitHub mappings for dependency
build_github_mappings "$dep_module" "$old_ref" "$new_ref"
statlog "$dep_module" "$old_ref" "$new_ref" >> statlog.json
local changelog
changelog="$(release_log "$dep_module" "$old_ref" "$new_ref")"
fi
if [[ -n "$changelog" ]]; then
printf -- "- %s (%s -> %s):\n" "$dep_module" "$old" "$new"
echo "$changelog" | indent
fi
done < <(dep_changes old_deps.json new_deps.json |
dep_changes old_deps.json new_deps.json |
jq --arg inc "$INCLUDE_REGEX" --arg exc "$EXCLUDE_REGEX" \
'select(.Path | test($inc)) | select(.Path | test($exc) | not)' |
jq -r '"\(.Path) \(.New.Version) \(.New.Ref) \(.Old.Version) \(.Old.Ref // "")"')
# Compute changelogs
jq -r '"\(.Path) \(.New.Version) \(.New.Ref) \(.Old.Version) \(.Old.Ref // "")"' |
while read module new new_ref old old_ref; do
if ! ensure "$module" "$new_ref"; then
result=1
local changelog="failed to fetch repo"
else
statlog "$module" "$old_ref" "$new_ref" >> statlog.json
local changelog="$(release_log "$module" "$old_ref" "$new_ref")"
fi
if [[ -n "$changelog" ]]; then
printf -- "- %s (%s -> %s):\n" "$module" "$old" "$new"
echo "$changelog" | indent
fi
done
echo
echo "</details>"
@ -697,18 +311,8 @@ recursive_release_log() {
echo "|-------------|---------|---------|---------------|"
statsummary <statlog.json |
jq -s 'sort_by(.Lines) | reverse | .[]' |
jq -r '
if .GitHub != "" then
"| [@\(.GitHub)](https://github.com/\(.GitHub)) | \(.Commits) | +\(.Insertions)/-\(.Deletions) | \(.Files) |"
else
"| \(.Author) | \(.Commits) | +\(.Insertions)/-\(.Deletions) | \(.Files) |"
end
'
# Save cache before exiting
save_github_cache
return "$result"
jq -r '"| \(.Author) | \(.Commits) | +\(.Insertions)/-\(.Deletions) | \(.Files) |"'
return "$status"
)
}

View File

@ -7,7 +7,7 @@
# Run from ci to tag images based on the current branch or tag name.
# A bit like dockerhub autobuild config, but somewhere we can version control it.
#
# The `docker-build` job builds the current commit in docker and tags it as ipfs/kubo:wip
# The `docker-build` job builds the current commit in docker and tags it as ipfs/go-ipfs:wip
#
# Then the `docker-publish` job runs this script to decide what tag, if any,
# to publish to dockerhub.
@ -42,7 +42,7 @@ GIT_TAG=${4:-$(git describe --tags --exact-match || echo "")}
DRY_RUN=${5:-false}
WIP_IMAGE_TAG=${WIP_IMAGE_TAG:-wip}
IMAGE_NAME=${IMAGE_NAME:-ipfs/kubo}
IMAGE_NAME=${IMAGE_NAME:-ipfs/go-ipfs}
pushTag () {
local IMAGE_TAG=$1
@ -63,7 +63,7 @@ if [[ $GIT_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+-rc ]]; then
elif [[ $GIT_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
pushTag "$GIT_TAG"
pushTag "latest"
pushTag "release" # see: https://github.com/ipfs/kubo/issues/3999#issuecomment-742228981
pushTag "release" # see: https://github.com/ipfs/go-ipfs/issues/3999#issuecomment-742228981
elif [[ $GIT_BRANCH =~ ^bifrost-.* ]]; then
# sanitize the branch name since docker tags have stricter char limits than git branch names

View File

@ -34,10 +34,10 @@ type RmBlocksOpts struct {
// It returns a channel where objects of type RemovedBlock are placed, when
// not using the Quiet option. Block removal is asynchronous and will
// skip any pinned blocks.
func RmBlocks(ctx context.Context, blocks bs.GCBlockstore, pins pin.Pinner, cids []cid.Cid, opts RmBlocksOpts) (<-chan any, error) {
func RmBlocks(ctx context.Context, blocks bs.GCBlockstore, pins pin.Pinner, cids []cid.Cid, opts RmBlocksOpts) (<-chan interface{}, error) {
// make the channel large enough to hold any result to avoid
// blocking while holding the GCLock
out := make(chan any, len(cids))
out := make(chan interface{}, len(cids))
go func() {
defer close(out)
@ -75,7 +75,7 @@ func RmBlocks(ctx context.Context, blocks bs.GCBlockstore, pins pin.Pinner, cids
// out channel, with an error which indicates that the Cid is pinned.
// This function is used in RmBlocks to filter out any blocks which are not
// to be removed (because they are pinned).
func FilterPinned(ctx context.Context, pins pin.Pinner, out chan<- any, cids []cid.Cid) []cid.Cid {
func FilterPinned(ctx context.Context, pins pin.Pinner, out chan<- interface{}, cids []cid.Cid) []cid.Cid {
stillOkay := make([]cid.Cid, 0, len(cids))
res, err := pins.CheckIfPinned(ctx, cids...)
if err != nil {

View File

@ -101,7 +101,7 @@ func (api *KeyAPI) List(ctx context.Context) ([]iface.Key, error) {
var out struct {
Keys []keyOutput
}
if err := api.core().Request("key/ls").Exec(ctx, &out); err != nil {
if err := api.core().Request("key/list").Exec(ctx, &out); err != nil {
return nil, err
}

View File

@ -18,10 +18,10 @@ type RequestBuilder interface {
BodyBytes(body []byte) RequestBuilder
Body(body io.Reader) RequestBuilder
FileBody(body io.Reader) RequestBuilder
Option(key string, value any) RequestBuilder
Option(key string, value interface{}) RequestBuilder
Header(name, value string) RequestBuilder
Send(ctx context.Context) (*Response, error)
Exec(ctx context.Context, res any) error
Exec(ctx context.Context, res interface{}) error
}
// encodedAbsolutePathVersion is the version from which the absolute path header in
@ -83,7 +83,7 @@ func (r *requestBuilder) FileBody(body io.Reader) RequestBuilder {
}
// Option sets the given option.
func (r *requestBuilder) Option(key string, value any) RequestBuilder {
func (r *requestBuilder) Option(key string, value interface{}) RequestBuilder {
var s string
switch v := value.(type) {
case bool:
@ -128,7 +128,7 @@ func (r *requestBuilder) Send(ctx context.Context) (*Response, error) {
}
// Exec sends the request a request and decodes the response.
func (r *requestBuilder) Exec(ctx context.Context, res any) error {
func (r *requestBuilder) Exec(ctx context.Context, res interface{}) error {
httpRes, err := r.Send(ctx)
if err != nil {
return err

View File

@ -64,7 +64,7 @@ func (r *Response) Cancel() error {
}
// Decode reads request body and decodes it as json.
func (r *Response) decode(dec any) error {
func (r *Response) decode(dec interface{}) error {
if r.Error != nil {
return r.Error
}

View File

@ -181,8 +181,8 @@ Headers.
cmds.BoolOption(enableGCKwd, "Enable automatic periodic repo garbage collection"),
cmds.BoolOption(adjustFDLimitKwd, "Check and raise file descriptor limits if needed").WithDefault(true),
cmds.BoolOption(migrateKwd, "If true, assume yes at the migrate prompt. If false, assume no."),
cmds.BoolOption(enablePubSubKwd, "DEPRECATED CLI flag. Use Pubsub.Enabled config instead."),
cmds.BoolOption(enableIPNSPubSubKwd, "DEPRECATED CLI flag. Use Ipns.UsePubsub config instead."),
cmds.BoolOption(enablePubSubKwd, "DEPRECATED"),
cmds.BoolOption(enableIPNSPubSubKwd, "Enable IPNS over pubsub. Implicitly enables pubsub, overrides Ipns.UsePubsub config."),
cmds.BoolOption(enableMultiplexKwd, "DEPRECATED"),
cmds.StringOption(agentVersionSuffix, "Optional suffix to the AgentVersion presented by `ipfs id` and exposed via libp2p identify protocol."),
@ -397,14 +397,10 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
fmt.Printf("PeerID: %s\n", cfg.Identity.PeerID)
if psSet {
log.Error("The --enable-pubsub-experiment flag is deprecated. Use Pubsub.Enabled config option instead.")
} else {
if !psSet {
pubsub = cfg.Pubsub.Enabled.WithDefault(false)
}
if ipnsPsSet {
log.Error("The --enable-namesys-pubsub flag is deprecated. Use Ipns.UsePubsub config option instead.")
} else {
if !ipnsPsSet {
ipnsps = cfg.Ipns.UsePubsub.WithDefault(false)
}
@ -519,7 +515,7 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
}
//nolint:staticcheck // intentionally checking deprecated fields
if !cfg.Reprovider.Interval.IsDefault() || !cfg.Reprovider.Strategy.IsDefault() {
log.Fatal("Deprecated configuration detected. Manually migrate 'Reprovider' fields to 'Provide': Reprovider.Strategy -> Provide.Strategy, Reprovider.Interval -> Provide.DHT.Interval. Remove 'Reprovider' from your config. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide")
log.Fatal("Deprecated configuration detected. Manually migrate 'Reprovider' fields to 'Provide': Reprovider.Strategy -> Provide.Strategy, Reprovider.Interval -> Provide.Interval. Remove 'Reprovider' from your config. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide")
}
// Check for deprecated "flat" strategy (should have been migrated to "all")
if cfg.Provide.Strategy.WithDefault("") == "flat" {
@ -887,38 +883,23 @@ func serveHTTPApi(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error
return nil, fmt.Errorf("serveHTTPApi: ConstructNode() failed: %s", err)
}
// Buffer channel to prevent deadlock when multiple servers write errors simultaneously
errc := make(chan error, len(listeners))
var wg sync.WaitGroup
// Start all servers and wait for them to be ready before writing api file.
// This prevents race conditions where external tools (like systemd path units)
// see the file and try to connect before servers can accept connections.
if len(listeners) > 0 {
readyChannels := make([]chan struct{}, len(listeners))
for i, lis := range listeners {
readyChannels[i] = make(chan struct{})
ready := readyChannels[i]
wg.Go(func() {
errc <- corehttp.ServeWithReady(node, manet.NetListener(lis), ready, opts...)
})
}
// Wait for all listeners to be ready or any to fail
for _, ready := range readyChannels {
select {
case <-ready:
// This listener is ready
case err := <-errc:
return nil, fmt.Errorf("serveHTTPApi: %w", err)
}
}
// Only add an api file if the API is running.
if err := node.Repo.SetAPIAddr(rewriteMaddrToUseLocalhostIfItsAny(listeners[0].Multiaddr())); err != nil {
return nil, fmt.Errorf("serveHTTPApi: SetAPIAddr() failed: %w", err)
}
}
errc := make(chan error)
var wg sync.WaitGroup
for _, apiLis := range listeners {
wg.Add(1)
go func(lis manet.Listener) {
defer wg.Done()
errc <- corehttp.Serve(node, manet.NetListener(lis), opts...)
}(apiLis)
}
go func() {
wg.Wait()
close(errc)
@ -1077,42 +1058,26 @@ func serveHTTPGateway(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, e
return nil, fmt.Errorf("serveHTTPGateway: ConstructNode() failed: %s", err)
}
// Buffer channel to prevent deadlock when multiple servers write errors simultaneously
errc := make(chan error, len(listeners))
var wg sync.WaitGroup
// Start all servers and wait for them to be ready before writing gateway file.
// This prevents race conditions where external tools (like systemd path units)
// see the file and try to connect before servers can accept connections.
if len(listeners) > 0 {
readyChannels := make([]chan struct{}, len(listeners))
for i, lis := range listeners {
readyChannels[i] = make(chan struct{})
ready := readyChannels[i]
wg.Go(func() {
errc <- corehttp.ServeWithReady(node, manet.NetListener(lis), ready, opts...)
})
}
// Wait for all listeners to be ready or any to fail
for _, ready := range readyChannels {
select {
case <-ready:
// This listener is ready
case err := <-errc:
return nil, fmt.Errorf("serveHTTPGateway: %w", err)
}
}
addr, err := manet.ToNetAddr(rewriteMaddrToUseLocalhostIfItsAny(listeners[0].Multiaddr()))
if err != nil {
return nil, fmt.Errorf("serveHTTPGateway: manet.ToNetAddr() failed: %w", err)
return nil, fmt.Errorf("serveHTTPGateway: manet.ToIP() failed: %w", err)
}
if err := node.Repo.SetGatewayAddr(addr); err != nil {
return nil, fmt.Errorf("serveHTTPGateway: SetGatewayAddr() failed: %w", err)
}
}
errc := make(chan error)
var wg sync.WaitGroup
for _, lis := range listeners {
wg.Add(1)
go func(lis manet.Listener) {
defer wg.Done()
errc <- corehttp.Serve(node, manet.NetListener(lis), opts...)
}(lis)
}
go func() {
wg.Wait()
close(errc)
@ -1287,7 +1252,7 @@ func merge(cs ...<-chan error) <-chan error {
func YesNoPrompt(prompt string) bool {
var s string
for range 3 {
for i := 0; i < 3; i++ {
fmt.Printf("%s ", prompt)
_, err := fmt.Scanf("%s", &s)
if err != nil {

View File

@ -18,7 +18,7 @@ var (
func makeResolver(t *testing.T, n uint8) *madns.Resolver {
results := make([]net.IPAddr, n)
for i := range n {
for i := uint8(0); i < n; i++ {
results[i] = net.IPAddr{IP: net.ParseIP(fmt.Sprintf("192.0.2.%d", i))}
}

View File

@ -133,7 +133,7 @@ func applyProfiles(conf *config.Config, profiles string) error {
return nil
}
for profile := range strings.SplitSeq(profiles, ",") {
for _, profile := range strings.Split(profiles, ",") {
transformer, ok := config.Profiles[profile]
if !ok {
return fmt.Errorf("invalid configuration profile: %s", profile)

View File

@ -251,7 +251,7 @@ func apiAddrOption(req *cmds.Request) (ma.Multiaddr, error) {
// multipart requests is %-encoded. Before this version, its sent raw.
var encodedAbsolutePathVersion = semver.MustParse("0.23.0-dev")
func makeExecutor(req *cmds.Request, env any) (cmds.Executor, error) {
func makeExecutor(req *cmds.Request, env interface{}) (cmds.Executor, error) {
exe := tracingWrappedExecutor{cmds.NewExecutor(req.Root)}
cctx := env.(*oldcmds.Context)

View File

@ -37,7 +37,9 @@ func (ih *IntrHandler) Close() error {
func (ih *IntrHandler) Handle(handler func(count int, ih *IntrHandler), sigs ...os.Signal) {
notify := make(chan os.Signal, 1)
signal.Notify(notify, sigs...)
ih.wg.Go(func() {
ih.wg.Add(1)
go func() {
defer ih.wg.Done()
defer signal.Stop(notify)
count := 0
@ -50,7 +52,7 @@ func (ih *IntrHandler) Handle(handler func(count int, ih *IntrHandler), sigs ...
handler(count, ih)
}
}
})
}()
}
func SetupInterruptHandler(ctx context.Context) (io.Closer, context.Context) {

View File

@ -1,3 +0,0 @@
ipfswatch
ipfswatch-test-cover
ipfswatch.exe

View File

@ -9,7 +9,6 @@ import (
"os"
"os/signal"
"path/filepath"
"slices"
"syscall"
commands "github.com/ipfs/kubo/commands"
@ -18,11 +17,6 @@ import (
coreapi "github.com/ipfs/kubo/core/coreapi"
corehttp "github.com/ipfs/kubo/core/corehttp"
"github.com/ipfs/kubo/misc/fsutil"
"github.com/ipfs/kubo/plugin"
pluginbadgerds "github.com/ipfs/kubo/plugin/plugins/badgerds"
pluginflatfs "github.com/ipfs/kubo/plugin/plugins/flatfs"
pluginlevelds "github.com/ipfs/kubo/plugin/plugins/levelds"
pluginpebbleds "github.com/ipfs/kubo/plugin/plugins/pebbleds"
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
fsnotify "github.com/fsnotify/fsnotify"
@ -66,18 +60,6 @@ func main() {
}
}
func loadDatastorePlugins(plugins []plugin.Plugin) error {
for _, pl := range plugins {
if pl, ok := pl.(plugin.PluginDatastore); ok {
err := fsrepo.AddDatastoreConfigHandler(pl.DatastoreTypeName(), pl.DatastoreConfigParser())
if err != nil {
return err
}
}
}
return nil
}
func run(ipfsPath, watchPath string) error {
log.Printf("running IPFSWatch on '%s' using repo at '%s'...", watchPath, ipfsPath)
@ -95,15 +77,6 @@ func run(ipfsPath, watchPath string) error {
return err
}
if err = loadDatastorePlugins(slices.Concat(
pluginbadgerds.Plugins,
pluginflatfs.Plugins,
pluginlevelds.Plugins,
pluginpebbleds.Plugins,
)); err != nil {
return err
}
r, err := fsrepo.Open(ipfsPath)
if err != nil {
// TODO handle case: daemon running
@ -150,7 +123,6 @@ func run(ipfsPath, watchPath string) error {
log.Printf("received event: %s", e)
isDir, err := IsDirectory(e.Name)
if err != nil {
log.Println(err)
continue
}
switch e.Op {
@ -221,7 +193,7 @@ func addTree(w *fsnotify.Watcher, root string) error {
return filepath.SkipDir
case isDir:
log.Println(path)
if err = w.Add(path); err != nil {
if err := w.Add(path); err != nil {
return err
}
default:
@ -234,10 +206,7 @@ func addTree(w *fsnotify.Watcher, root string) error {
func IsDirectory(path string) (bool, error) {
fileInfo, err := os.Stat(path)
if err != nil {
return false, err
}
return fileInfo.IsDir(), nil
return fileInfo.IsDir(), err
}
func IsHidden(path string) bool {

View File

@ -11,7 +11,7 @@ type ReqLogEntry struct {
EndTime time.Time
Active bool
Command string
Options map[string]any
Options map[string]interface{}
Args []string
ID int

View File

@ -2,7 +2,7 @@ package config
import (
"maps"
"math/rand/v2"
"math/rand"
"strings"
"github.com/ipfs/boxo/autoconf"
@ -70,7 +70,7 @@ func selectRandomResolver(resolvers []string) string {
if len(resolvers) == 0 {
return ""
}
return resolvers[rand.IntN(len(resolvers))]
return resolvers[rand.Intn(len(resolvers))]
}
// DNSResolversWithAutoConf returns DNS resolvers with "auto" values replaced by autoconf values

View File

@ -3,7 +3,6 @@ package config
import (
"fmt"
"path/filepath"
"slices"
"sync"
"github.com/ipfs/boxo/autoconf"
@ -83,9 +82,12 @@ func validateAutoConfDisabled(cfg *Config) error {
var errors []string
// Check Bootstrap
if slices.Contains(cfg.Bootstrap, AutoPlaceholder) {
hasAutoValues = true
errors = append(errors, "Bootstrap contains 'auto' but AutoConf.Enabled=false")
for _, peer := range cfg.Bootstrap {
if peer == AutoPlaceholder {
hasAutoValues = true
errors = append(errors, "Bootstrap contains 'auto' but AutoConf.Enabled=false")
break
}
}
// Check DNS.Resolvers
@ -100,15 +102,21 @@ func validateAutoConfDisabled(cfg *Config) error {
}
// Check Routing.DelegatedRouters
if slices.Contains(cfg.Routing.DelegatedRouters, AutoPlaceholder) {
hasAutoValues = true
errors = append(errors, "Routing.DelegatedRouters contains 'auto' but AutoConf.Enabled=false")
for _, router := range cfg.Routing.DelegatedRouters {
if router == AutoPlaceholder {
hasAutoValues = true
errors = append(errors, "Routing.DelegatedRouters contains 'auto' but AutoConf.Enabled=false")
break
}
}
// Check Ipns.DelegatedPublishers
if slices.Contains(cfg.Ipns.DelegatedPublishers, AutoPlaceholder) {
hasAutoValues = true
errors = append(errors, "Ipns.DelegatedPublishers contains 'auto' but AutoConf.Enabled=false")
for _, publisher := range cfg.Ipns.DelegatedPublishers {
if publisher == AutoPlaceholder {
hasAutoValues = true
errors = append(errors, "Ipns.DelegatedPublishers contains 'auto' but AutoConf.Enabled=false")
break
}
}
// Log all errors

View File

@ -84,5 +84,5 @@ type AutoNATThrottleConfig struct {
// global/peer dialback limits.
//
// When unset, this defaults to 1 minute.
Interval OptionalDuration
Interval OptionalDuration `json:",omitempty"`
}

View File

@ -16,13 +16,6 @@ type AutoTLS struct {
// Optional, controls if Kubo should add /tls/sni/.../ws listener to every /tcp port if no explicit /ws is defined in Addresses.Swarm
AutoWSS Flag `json:",omitempty"`
// Optional, controls whether to skip network DNS lookups for p2p-forge domains.
// Applies to resolution via DNS.Resolvers, including /dns* multiaddrs in go-libp2p.
// When enabled (default), A/AAAA queries for *.libp2p.direct are resolved
// locally by parsing the IP directly from the hostname, avoiding network I/O.
// Set to false to always use network DNS (useful for debugging).
SkipDNSLookup Flag `json:",omitempty"`
// Optional override of the parent domain that will be used
DomainSuffix *OptionalString `json:",omitempty"`
@ -49,6 +42,5 @@ const (
DefaultCAEndpoint = p2pforge.DefaultCAEndpoint
DefaultAutoWSS = true // requires AutoTLS.Enabled
DefaultAutoTLSShortAddrs = true // requires AutoTLS.Enabled
DefaultAutoTLSSkipDNSLookup = true // skip network DNS for p2p-forge domains
DefaultAutoTLSRegistrationDelay = 1 * time.Hour
)

View File

@ -47,7 +47,7 @@ type Config struct {
Internal Internal // experimental/unstable options
Bitswap Bitswap
Bitswap Bitswap `json:",omitempty"`
}
const (
@ -106,7 +106,7 @@ func Filename(configroot, userConfigFile string) (string, error) {
}
// HumanOutput gets a config value ready for printing.
func HumanOutput(value any) ([]byte, error) {
func HumanOutput(value interface{}) ([]byte, error) {
s, ok := value.(string)
if ok {
return []byte(strings.Trim(s, "\n")), nil
@ -115,12 +115,12 @@ func HumanOutput(value any) ([]byte, error) {
}
// Marshal configuration with JSON.
func Marshal(value any) ([]byte, error) {
func Marshal(value interface{}) ([]byte, error) {
// need to prettyprint, hence MarshalIndent, instead of Encoder
return json.MarshalIndent(value, "", " ")
}
func FromMap(v map[string]any) (*Config, error) {
func FromMap(v map[string]interface{}) (*Config, error) {
buf := new(bytes.Buffer)
if err := json.NewEncoder(buf).Encode(v); err != nil {
return nil, err
@ -132,12 +132,12 @@ func FromMap(v map[string]any) (*Config, error) {
return &conf, nil
}
func ToMap(conf *Config) (map[string]any, error) {
func ToMap(conf *Config) (map[string]interface{}, error) {
buf := new(bytes.Buffer)
if err := json.NewEncoder(buf).Encode(conf); err != nil {
return nil, err
}
var m map[string]any
var m map[string]interface{}
if err := json.NewDecoder(buf).Decode(&m); err != nil {
return nil, fmt.Errorf("failure to decode config: %w", err)
}
@ -147,14 +147,14 @@ func ToMap(conf *Config) (map[string]any, error) {
// Convert config to a map, without using encoding/json, since
// zero/empty/'omitempty' fields are excluded by encoding/json during
// marshaling.
func ReflectToMap(conf any) any {
func ReflectToMap(conf interface{}) interface{} {
v := reflect.ValueOf(conf)
if !v.IsValid() {
return nil
}
// Handle pointer type
if v.Kind() == reflect.Pointer {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
// Create a zero value of the pointer's element type
elemType := v.Type().Elem()
@ -166,7 +166,7 @@ func ReflectToMap(conf any) any {
switch v.Kind() {
case reflect.Struct:
result := make(map[string]any)
result := make(map[string]interface{})
t := v.Type()
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
@ -178,7 +178,7 @@ func ReflectToMap(conf any) any {
return result
case reflect.Map:
result := make(map[string]any)
result := make(map[string]interface{})
iter := v.MapRange()
for iter.Next() {
key := iter.Key()
@ -194,7 +194,7 @@ func ReflectToMap(conf any) any {
return result
case reflect.Slice, reflect.Array:
result := make([]any, v.Len())
result := make([]interface{}, v.Len())
for i := 0; i < v.Len(); i++ {
result[i] = ReflectToMap(v.Index(i).Interface())
}
@ -234,11 +234,11 @@ func CheckKey(key string) error {
// Parse the key and verify it's presence in the map.
var ok bool
var mapCursor map[string]any
var mapCursor map[string]interface{}
parts := strings.Split(key, ".")
for i, part := range parts {
mapCursor, ok = cursor.(map[string]any)
mapCursor, ok = cursor.(map[string]interface{})
if !ok {
if cursor == nil {
return nil

View File

@ -32,7 +32,7 @@ func TestReflectToMap(t *testing.T) {
// Helper function to create a test config with various field types
reflectedConfig := ReflectToMap(new(Config))
mapConfig, ok := reflectedConfig.(map[string]any)
mapConfig, ok := reflectedConfig.(map[string]interface{})
if !ok {
t.Fatal("Config didn't convert to map")
}
@ -42,7 +42,7 @@ func TestReflectToMap(t *testing.T) {
t.Fatal("Identity field not found")
}
mapIdentity, ok := reflectedIdentity.(map[string]any)
mapIdentity, ok := reflectedIdentity.(map[string]interface{})
if !ok {
t.Fatal("Identity field didn't convert to map")
}
@ -70,7 +70,7 @@ func TestReflectToMap(t *testing.T) {
if !ok {
t.Fatal("Bootstrap field not found in config")
}
bootstrap, ok := reflectedBootstrap.([]any)
bootstrap, ok := reflectedBootstrap.([]interface{})
if !ok {
t.Fatal("Bootstrap field didn't convert to []string")
}
@ -82,7 +82,7 @@ func TestReflectToMap(t *testing.T) {
if !ok {
t.Fatal("Datastore field not found in config")
}
datastore, ok := reflectedDatastore.(map[string]any)
datastore, ok := reflectedDatastore.(map[string]interface{})
if !ok {
t.Fatal("Datastore field didn't convert to map")
}
@ -107,7 +107,7 @@ func TestReflectToMap(t *testing.T) {
if !ok {
t.Fatal("DNS field not found in config")
}
DNS, ok := reflectedDNS.(map[string]any)
DNS, ok := reflectedDNS.(map[string]interface{})
if !ok {
t.Fatal("DNS field didn't convert to map")
}
@ -116,12 +116,12 @@ func TestReflectToMap(t *testing.T) {
t.Fatal("Resolvers field not found in DNS")
}
// Test map field
if _, ok := reflectedResolvers.(map[string]any); !ok {
if _, ok := reflectedResolvers.(map[string]interface{}); !ok {
t.Fatal("Resolvers field didn't convert to map")
}
// Test pointer field
if _, ok := DNS["MaxCacheTTL"].(map[string]any); !ok {
if _, ok := DNS["MaxCacheTTL"].(map[string]interface{}); !ok {
// Since OptionalDuration only field is private, we cannot test it
t.Fatal("MaxCacheTTL field didn't convert to map")
}

View File

@ -32,12 +32,12 @@ type Datastore struct {
NoSync bool `json:",omitempty"`
Params *json.RawMessage `json:",omitempty"`
Spec map[string]any
Spec map[string]interface{}
HashOnRead bool
BloomFilterSize int
BlockKeyCacheSize OptionalInteger
WriteThrough Flag `json:",omitempty"`
BlockKeyCacheSize OptionalInteger `json:",omitempty"`
WriteThrough Flag `json:",omitempty"`
}
// DataStorePath returns the default data store path given a configuration root

View File

@ -8,13 +8,11 @@ const (
DefaultInlineDNSLink = false
DefaultDeserializedResponses = true
DefaultDisableHTMLErrors = false
DefaultExposeRoutingAPI = true
DefaultExposeRoutingAPI = false
DefaultDiagnosticServiceURL = "https://check.ipfs.network"
DefaultAllowCodecConversion = false
// Gateway limit defaults from boxo
DefaultRetrievalTimeout = gateway.DefaultRetrievalTimeout
DefaultMaxRequestDuration = gateway.DefaultMaxRequestDuration
DefaultMaxConcurrentRequests = gateway.DefaultMaxConcurrentRequests
DefaultMaxRangeRequestFileSize = 0 // 0 means no limit
)
@ -74,12 +72,6 @@ type Gateway struct {
// be overridden per FQDN in PublicGateways.
DeserializedResponses Flag
// AllowCodecConversion enables automatic conversion between codecs when
// the requested format differs from the block's native codec (e.g.,
// converting dag-pb or dag-cbor to dag-json). When disabled, the gateway
// returns 406 Not Acceptable for codec mismatches per IPIP-524.
AllowCodecConversion Flag
// DisableHTMLErrors disables pretty HTML pages when an error occurs. Instead, a `text/plain`
// page will be sent with the raw error message.
DisableHTMLErrors Flag
@ -104,14 +96,6 @@ type Gateway struct {
// A value of 0 disables this timeout.
RetrievalTimeout *OptionalDuration `json:",omitempty"`
// MaxRequestDuration is an absolute deadline for the entire request.
// Unlike RetrievalTimeout (which resets on each data write and catches
// stalled transfers), this is a hard limit on the total time a request
// can take. Returns 504 Gateway Timeout when exceeded.
// This protects the gateway from edge cases and slow client attacks.
// A value of 0 uses the default (1 hour).
MaxRequestDuration *OptionalDuration `json:",omitempty"`
// MaxConcurrentRequests limits concurrent HTTP requests handled by the gateway.
// Requests beyond this limit receive 429 Too Many Requests with Retry-After header.
// A value of 0 disables the limit.

View File

@ -2,13 +2,11 @@ package config
import (
"fmt"
"io"
"strconv"
"strings"
chunk "github.com/ipfs/boxo/chunker"
"github.com/ipfs/boxo/ipld/unixfs/importer/helpers"
uio "github.com/ipfs/boxo/ipld/unixfs/io"
"github.com/ipfs/boxo/ipld/unixfs/io"
"github.com/ipfs/boxo/verifcid"
mh "github.com/multiformats/go-multihash"
)
@ -31,44 +29,29 @@ const (
// write-batch. The total size of the batch is limited by
// BatchMaxnodes and BatchMaxSize.
DefaultBatchMaxSize = 100 << 20 // 20MiB
// HAMTSizeEstimation values for Import.UnixFSHAMTDirectorySizeEstimation
HAMTSizeEstimationLinks = "links" // legacy: estimate using link names + CID byte lengths (default)
HAMTSizeEstimationBlock = "block" // full serialized dag-pb block size
HAMTSizeEstimationDisabled = "disabled" // disable HAMT sharding entirely
// DAGLayout values for Import.UnixFSDAGLayout
DAGLayoutBalanced = "balanced" // balanced DAG layout (default)
DAGLayoutTrickle = "trickle" // trickle DAG layout
DefaultUnixFSHAMTDirectorySizeEstimation = HAMTSizeEstimationLinks // legacy behavior
DefaultUnixFSDAGLayout = DAGLayoutBalanced // balanced DAG layout
DefaultUnixFSIncludeEmptyDirs = true // include empty directories
)
var (
DefaultUnixFSFileMaxLinks = int64(helpers.DefaultLinksPerBlock)
DefaultUnixFSDirectoryMaxLinks = int64(0)
DefaultUnixFSHAMTDirectoryMaxFanout = int64(uio.DefaultShardWidth)
DefaultUnixFSHAMTDirectoryMaxFanout = int64(io.DefaultShardWidth)
)
// Import configures the default options for ingesting data. This affects commands
// that ingest data, such as 'ipfs add', 'ipfs dag put, 'ipfs block put', 'ipfs files write'.
type Import struct {
CidVersion OptionalInteger
UnixFSRawLeaves Flag
UnixFSChunker OptionalString
HashFunction OptionalString
UnixFSFileMaxLinks OptionalInteger
UnixFSDirectoryMaxLinks OptionalInteger
UnixFSHAMTDirectoryMaxFanout OptionalInteger
UnixFSHAMTDirectorySizeThreshold OptionalBytes
UnixFSHAMTDirectorySizeEstimation OptionalString // "links", "block", or "disabled"
UnixFSDAGLayout OptionalString // "balanced" or "trickle"
BatchMaxNodes OptionalInteger
BatchMaxSize OptionalInteger
FastProvideRoot Flag
FastProvideWait Flag
CidVersion OptionalInteger
UnixFSRawLeaves Flag
UnixFSChunker OptionalString
HashFunction OptionalString
UnixFSFileMaxLinks OptionalInteger
UnixFSDirectoryMaxLinks OptionalInteger
UnixFSHAMTDirectoryMaxFanout OptionalInteger
UnixFSHAMTDirectorySizeThreshold OptionalBytes
BatchMaxNodes OptionalInteger
BatchMaxSize OptionalInteger
FastProvideRoot Flag
FastProvideWait Flag
}
// ValidateImportConfig validates the Import configuration according to UnixFS spec requirements.
@ -146,30 +129,6 @@ func ValidateImportConfig(cfg *Import) error {
}
}
// Validate UnixFSHAMTDirectorySizeEstimation
if !cfg.UnixFSHAMTDirectorySizeEstimation.IsDefault() {
est := cfg.UnixFSHAMTDirectorySizeEstimation.WithDefault(DefaultUnixFSHAMTDirectorySizeEstimation)
switch est {
case HAMTSizeEstimationLinks, HAMTSizeEstimationBlock, HAMTSizeEstimationDisabled:
// valid
default:
return fmt.Errorf("Import.UnixFSHAMTDirectorySizeEstimation must be %q, %q, or %q, got %q",
HAMTSizeEstimationLinks, HAMTSizeEstimationBlock, HAMTSizeEstimationDisabled, est)
}
}
// Validate UnixFSDAGLayout
if !cfg.UnixFSDAGLayout.IsDefault() {
layout := cfg.UnixFSDAGLayout.WithDefault(DefaultUnixFSDAGLayout)
switch layout {
case DAGLayoutBalanced, DAGLayoutTrickle:
// valid
default:
return fmt.Errorf("Import.UnixFSDAGLayout must be %q or %q, got %q",
DAGLayoutBalanced, DAGLayoutTrickle, layout)
}
}
return nil
}
@ -185,7 +144,8 @@ func isValidChunker(chunker string) bool {
}
// Check for size-<bytes> format
if sizeStr, ok := strings.CutPrefix(chunker, "size-"); ok {
if strings.HasPrefix(chunker, "size-") {
sizeStr := strings.TrimPrefix(chunker, "size-")
if sizeStr == "" {
return false
}
@ -207,7 +167,7 @@ func isValidChunker(chunker string) bool {
// Parse and validate min, avg, max values
values := make([]int, 3)
for i := range 3 {
for i := 0; i < 3; i++ {
val, err := strconv.Atoi(parts[i+1])
if err != nil {
return false
@ -222,41 +182,3 @@ func isValidChunker(chunker string) bool {
return false
}
// HAMTSizeEstimationMode returns the boxo SizeEstimationMode based on the config value.
func (i *Import) HAMTSizeEstimationMode() uio.SizeEstimationMode {
switch i.UnixFSHAMTDirectorySizeEstimation.WithDefault(DefaultUnixFSHAMTDirectorySizeEstimation) {
case HAMTSizeEstimationLinks:
return uio.SizeEstimationLinks
case HAMTSizeEstimationBlock:
return uio.SizeEstimationBlock
case HAMTSizeEstimationDisabled:
return uio.SizeEstimationDisabled
default:
return uio.SizeEstimationLinks
}
}
// UnixFSSplitterFunc returns a SplitterGen function based on Import.UnixFSChunker.
// The returned function creates a Splitter for the configured chunking strategy.
// The chunker string is parsed once when this method is called, not on each use.
func (i *Import) UnixFSSplitterFunc() chunk.SplitterGen {
chunkerStr := i.UnixFSChunker.WithDefault(DefaultUnixFSChunker)
// Parse size-based chunker (most common case) and return optimized generator
if sizeStr, ok := strings.CutPrefix(chunkerStr, "size-"); ok {
if size, err := strconv.ParseInt(sizeStr, 10, 64); err == nil && size > 0 {
return chunk.SizeSplitterGen(size)
}
}
// For other chunker types (rabin, buzhash) or invalid config,
// fall back to parsing per-use (these are rare cases)
return func(r io.Reader) chunk.Splitter {
s, err := chunk.FromString(r, chunkerStr)
if err != nil {
return chunk.DefaultSplitter(r)
}
return s
}
}

View File

@ -4,7 +4,6 @@ import (
"strings"
"testing"
"github.com/ipfs/boxo/ipld/unixfs/io"
mh "github.com/multiformats/go-multihash"
)
@ -407,104 +406,3 @@ func TestIsPowerOfTwo(t *testing.T) {
})
}
}
func TestValidateImportConfig_HAMTSizeEstimation(t *testing.T) {
tests := []struct {
name string
value string
wantErr bool
errMsg string
}{
{name: "valid links", value: HAMTSizeEstimationLinks, wantErr: false},
{name: "valid block", value: HAMTSizeEstimationBlock, wantErr: false},
{name: "valid disabled", value: HAMTSizeEstimationDisabled, wantErr: false},
{name: "invalid unknown", value: "unknown", wantErr: true, errMsg: "must be"},
{name: "invalid empty", value: "", wantErr: true, errMsg: "must be"},
{name: "invalid typo", value: "link", wantErr: true, errMsg: "must be"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := &Import{
UnixFSHAMTDirectorySizeEstimation: *NewOptionalString(tt.value),
}
err := ValidateImportConfig(cfg)
if tt.wantErr {
if err == nil {
t.Errorf("expected error for value=%q, got nil", tt.value)
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
t.Errorf("error = %v, want error containing %q", err, tt.errMsg)
}
} else {
if err != nil {
t.Errorf("unexpected error for value=%q: %v", tt.value, err)
}
}
})
}
}
func TestValidateImportConfig_DAGLayout(t *testing.T) {
tests := []struct {
name string
value string
wantErr bool
errMsg string
}{
{name: "valid balanced", value: DAGLayoutBalanced, wantErr: false},
{name: "valid trickle", value: DAGLayoutTrickle, wantErr: false},
{name: "invalid unknown", value: "unknown", wantErr: true, errMsg: "must be"},
{name: "invalid empty", value: "", wantErr: true, errMsg: "must be"},
{name: "invalid flat", value: "flat", wantErr: true, errMsg: "must be"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := &Import{
UnixFSDAGLayout: *NewOptionalString(tt.value),
}
err := ValidateImportConfig(cfg)
if tt.wantErr {
if err == nil {
t.Errorf("expected error for value=%q, got nil", tt.value)
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
t.Errorf("error = %v, want error containing %q", err, tt.errMsg)
}
} else {
if err != nil {
t.Errorf("unexpected error for value=%q: %v", tt.value, err)
}
}
})
}
}
func TestImport_HAMTSizeEstimationMode(t *testing.T) {
tests := []struct {
cfg string
want io.SizeEstimationMode
}{
{HAMTSizeEstimationLinks, io.SizeEstimationLinks},
{HAMTSizeEstimationBlock, io.SizeEstimationBlock},
{HAMTSizeEstimationDisabled, io.SizeEstimationDisabled},
{"", io.SizeEstimationLinks}, // default (unset returns default)
{"unknown", io.SizeEstimationLinks}, // fallback to default
}
for _, tt := range tests {
t.Run(tt.cfg, func(t *testing.T) {
var imp Import
if tt.cfg != "" {
imp.UnixFSHAMTDirectorySizeEstimation = *NewOptionalString(tt.cfg)
}
got := imp.HAMTSizeEstimationMode()
if got != tt.want {
t.Errorf("Import.HAMTSizeEstimationMode() with %q = %v, want %v", tt.cfg, got, tt.want)
}
})
}
}

View File

@ -130,8 +130,8 @@ func DefaultDatastoreConfig() Datastore {
}
}
func pebbleSpec() map[string]any {
return map[string]any{
func pebbleSpec() map[string]interface{} {
return map[string]interface{}{
"type": "pebbleds",
"prefix": "pebble.datastore",
"path": "pebbleds",
@ -139,11 +139,11 @@ func pebbleSpec() map[string]any {
}
}
func pebbleSpecMeasure() map[string]any {
return map[string]any{
func pebbleSpecMeasure() map[string]interface{} {
return map[string]interface{}{
"type": "measure",
"prefix": "pebble.datastore",
"child": map[string]any{
"child": map[string]interface{}{
"formatMajorVersion": int(pebble.FormatNewest),
"type": "pebbleds",
"path": "pebbleds",
@ -151,8 +151,8 @@ func pebbleSpecMeasure() map[string]any {
}
}
func badgerSpec() map[string]any {
return map[string]any{
func badgerSpec() map[string]interface{} {
return map[string]interface{}{
"type": "badgerds",
"prefix": "badger.datastore",
"path": "badgerds",
@ -161,11 +161,11 @@ func badgerSpec() map[string]any {
}
}
func badgerSpecMeasure() map[string]any {
return map[string]any{
func badgerSpecMeasure() map[string]interface{} {
return map[string]interface{}{
"type": "measure",
"prefix": "badger.datastore",
"child": map[string]any{
"child": map[string]interface{}{
"type": "badgerds",
"path": "badgerds",
"syncWrites": false,
@ -174,11 +174,11 @@ func badgerSpecMeasure() map[string]any {
}
}
func flatfsSpec() map[string]any {
return map[string]any{
func flatfsSpec() map[string]interface{} {
return map[string]interface{}{
"type": "mount",
"mounts": []any{
map[string]any{
"mounts": []interface{}{
map[string]interface{}{
"mountpoint": "/blocks",
"type": "flatfs",
"prefix": "flatfs.datastore",
@ -186,7 +186,7 @@ func flatfsSpec() map[string]any {
"sync": false,
"shardFunc": "/repo/flatfs/shard/v1/next-to-last/2",
},
map[string]any{
map[string]interface{}{
"mountpoint": "/",
"type": "levelds",
"prefix": "leveldb.datastore",
@ -197,26 +197,26 @@ func flatfsSpec() map[string]any {
}
}
func flatfsSpecMeasure() map[string]any {
return map[string]any{
func flatfsSpecMeasure() map[string]interface{} {
return map[string]interface{}{
"type": "mount",
"mounts": []any{
map[string]any{
"mounts": []interface{}{
map[string]interface{}{
"mountpoint": "/blocks",
"type": "measure",
"prefix": "flatfs.datastore",
"child": map[string]any{
"child": map[string]interface{}{
"type": "flatfs",
"path": "blocks",
"sync": false,
"shardFunc": "/repo/flatfs/shard/v1/next-to-last/2",
},
},
map[string]any{
map[string]interface{}{
"mountpoint": "/",
"type": "measure",
"prefix": "leveldb.datastore",
"child": map[string]any{
"child": map[string]interface{}{
"type": "levelds",
"path": "datastore",
"compression": "none",

View File

@ -41,7 +41,7 @@ type BitswapBroadcastControl struct {
// MaxPeers sets a hard limit on the number of peers to send broadcasts to.
// A value of 0 means no broadcasts are sent. A value of -1 means there is
// no limit. Default is [DefaultBroadcastControlMaxPeers].
MaxPeers OptionalInteger
MaxPeers OptionalInteger `json:",omitempty"`
// LocalPeers enables or disables broadcast control for peers on the local
// network. If false, than always broadcast to peers on the local network.
// If true, apply broadcast control to local peers. Default is
@ -58,7 +58,7 @@ type BitswapBroadcastControl struct {
// this number of random peers receives a broadcast. This may be helpful in
// cases where peers that are not receiving broadcasts my have wanted
// blocks. Default is [DefaultBroadcastControlMaxRandomPeers].
MaxRandomPeers OptionalInteger
MaxRandomPeers OptionalInteger `json:",omitempty"`
// SendToPendingPeers enables or disables sending broadcasts to any peers
// to which there is a pending message to send. When enabled, this sends
// broadcasts to many more peers, but does so in a way that does not

View File

@ -7,5 +7,5 @@ type Plugins struct {
type Plugin struct {
Disabled bool
Config any `json:",omitempty"`
Config interface{} `json:",omitempty"`
}

View File

@ -210,9 +210,7 @@ NOTE: This profile may only be applied when first initializing node at IPFS_PATH
},
},
"badgerds": {
Description: `DEPRECATED: Configures the node to use the legacy badgerv1 datastore.
This profile will be removed in a future Kubo release.
New deployments should use 'flatfs' or 'pebbleds' instead.
Description: `Configures the node to use the legacy badgerv1 datastore.
NOTE: this is badger 1.x, which has known bugs and is no longer supported by the upstream team.
It is provided here only for pre-existing users, allowing them to migrate away to more modern datastore.
@ -227,14 +225,6 @@ Other caveats:
* Good for medium-size datastores, but may run into performance issues
if your dataset is bigger than a terabyte.
To migrate: create a new IPFS_PATH with 'ipfs init --profile=flatfs',
move pinned data via 'ipfs dag export/import' or 'ipfs pin ls -t recursive|add',
and decommission the old badger-based node.
When it comes to block storage, use experimental 'pebbleds' only if you are sure
modern 'flatfs' does not serve your use case (most users will be perfectly fine
with flatfs, it is also possible to keep flatfs for blocks and replace leveldb
with pebble if preferred over leveldb).
See configuration documentation at:
https://github.com/ipfs/kubo/blob/master/docs/datastores.md#badgerds
@ -249,9 +239,8 @@ NOTE: This profile may only be applied when first initializing node at IPFS_PATH
},
},
"badgerds-measure": {
Description: `DEPRECATED: Configures the node to use the legacy badgerv1 datastore with metrics wrapper.
This profile will be removed in a future Kubo release.
New deployments should use 'flatfs' or 'pebbleds' instead.
Description: `Configures the node to use the legacy badgerv1 datastore with metrics wrapper.
Additional '*_datastore_*' metrics will be exposed on /debug/metrics/prometheus
NOTE: This profile may only be applied when first initializing node at IPFS_PATH
via 'ipfs init --profile badgerds-measure'
@ -323,33 +312,45 @@ fetching may be degraded.
return nil
},
},
"unixfs-v0-2015": {
Description: `Legacy UnixFS import profile for backward-compatible CID generation.
Produces CIDv0 with no raw leaves, sha2-256, 256 KiB chunks, and
link-based HAMT size estimation. Use only when legacy CIDs are required.
See https://github.com/ipfs/specs/pull/499. Alias: legacy-cid-v0`,
Transform: applyUnixFSv02015,
},
"legacy-cid-v0": {
Description: `Alias for unixfs-v0-2015 profile.`,
Transform: applyUnixFSv02015,
},
"unixfs-v1-2025": {
Description: `Recommended UnixFS import profile for cross-implementation CID determinism.
Uses CIDv1, raw leaves, sha2-256, 1 MiB chunks, 1024 links per file node,
256 HAMT fanout, and block-based size estimation for HAMT threshold.
See https://github.com/ipfs/specs/pull/499`,
Description: `Makes UnixFS import produce legacy CIDv0 with no raw leaves, sha2-256 and 256 KiB chunks. This is likely the least optimal preset, use only if legacy behavior is required.`,
Transform: func(c *Config) error {
c.Import.CidVersion = *NewOptionalInteger(1)
c.Import.UnixFSRawLeaves = True
c.Import.UnixFSChunker = *NewOptionalString("size-1048576") // 1 MiB
c.Import.CidVersion = *NewOptionalInteger(0)
c.Import.UnixFSRawLeaves = False
c.Import.UnixFSChunker = *NewOptionalString("size-262144")
c.Import.HashFunction = *NewOptionalString("sha2-256")
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(1024)
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174)
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("256KiB")
c.Import.UnixFSHAMTDirectorySizeEstimation = *NewOptionalString(HAMTSizeEstimationBlock)
c.Import.UnixFSDAGLayout = *NewOptionalString(DAGLayoutBalanced)
return nil
},
},
"test-cid-v1": {
Description: `Makes UnixFS import produce CIDv1 with raw leaves, sha2-256 and 1 MiB chunks (max 174 links per file, 256 per HAMT node, switch dir to HAMT above 256KiB).`,
Transform: func(c *Config) error {
c.Import.CidVersion = *NewOptionalInteger(1)
c.Import.UnixFSRawLeaves = True
c.Import.UnixFSChunker = *NewOptionalString("size-1048576")
c.Import.HashFunction = *NewOptionalString("sha2-256")
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174)
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("256KiB")
return nil
},
},
"test-cid-v1-wide": {
Description: `Makes UnixFS import produce CIDv1 with raw leaves, sha2-256 and 1MiB chunks and wider file DAGs (max 1024 links per every node type, switch dir to HAMT above 1MiB).`,
Transform: func(c *Config) error {
c.Import.CidVersion = *NewOptionalInteger(1)
c.Import.UnixFSRawLeaves = True
c.Import.UnixFSChunker = *NewOptionalString("size-1048576") // 1MiB
c.Import.HashFunction = *NewOptionalString("sha2-256")
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(1024)
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0) // no limit here, use size-based Import.UnixFSHAMTDirectorySizeThreshold instead
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(1024)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("1MiB") // 1MiB
return nil
},
},
@ -434,18 +435,3 @@ func mapKeys(m map[string]struct{}) []string {
}
return out
}
// applyUnixFSv02015 applies the legacy UnixFS v0 (2015) import settings.
func applyUnixFSv02015(c *Config) error {
c.Import.CidVersion = *NewOptionalInteger(0)
c.Import.UnixFSRawLeaves = False
c.Import.UnixFSChunker = *NewOptionalString("size-262144") // 256 KiB
c.Import.HashFunction = *NewOptionalString("sha2-256")
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174)
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("256KiB")
c.Import.UnixFSHAMTDirectorySizeEstimation = *NewOptionalString(HAMTSizeEstimationLinks)
c.Import.UnixFSDAGLayout = *NewOptionalString(DAGLayoutBalanced)
return nil
}

View File

@ -102,7 +102,7 @@ type ProvideDHT struct {
func ParseProvideStrategy(s string) ProvideStrategy {
var strategy ProvideStrategy
for part := range strings.SplitSeq(s, "+") {
for _, part := range strings.Split(s, "+") {
switch part {
case "all", "flat", "": // special case, does not mix with others ("flat" is deprecated, maps to "all")
return ProvideStrategyAll

View File

@ -5,7 +5,6 @@ import (
"fmt"
"os"
"runtime"
"slices"
"strings"
)
@ -60,7 +59,7 @@ type Router struct {
// Parameters are extra configuration that this router might need.
// A common one for HTTP router is "Endpoint".
Parameters any
Parameters interface{}
}
type (
@ -79,7 +78,13 @@ func (m Methods) Check() error {
// Check unsupported methods
for k := range m {
seen := slices.Contains(MethodNameList, k)
seen := false
for _, mn := range MethodNameList {
if mn == k {
seen = true
break
}
}
if seen {
continue
@ -103,7 +108,7 @@ func (r *RouterParser) UnmarshalJSON(b []byte) error {
}
raw := out.Parameters.(*json.RawMessage)
var p any
var p interface{}
switch out.Type {
case RouterTypeHTTP:
p = &HTTPRouterParams{}

View File

@ -18,7 +18,7 @@ import (
var ErrNotInitialized = errors.New("ipfs not initialized, please run 'ipfs init'")
// ReadConfigFile reads the config from `filename` into `cfg`.
func ReadConfigFile(filename string, cfg any) error {
func ReadConfigFile(filename string, cfg interface{}) error {
f, err := os.Open(filename)
if err != nil {
if os.IsNotExist(err) {
@ -34,7 +34,7 @@ func ReadConfigFile(filename string, cfg any) error {
}
// WriteConfigFile writes the config from `cfg` into `filename`.
func WriteConfigFile(filename string, cfg any) error {
func WriteConfigFile(filename string, cfg interface{}) error {
err := os.MkdirAll(filepath.Dir(filename), 0o755)
if err != nil {
return err
@ -50,7 +50,7 @@ func WriteConfigFile(filename string, cfg any) error {
}
// encode configuration with JSON.
func encode(w io.Writer, value any) error {
func encode(w io.Writer, value interface{}) error {
// need to prettyprint, hence MarshalIndent, instead of Encoder
buf, err := config.Marshal(value)
if err != nil {

View File

@ -298,7 +298,7 @@ func (d Duration) MarshalJSON() ([]byte, error) {
}
func (d *Duration) UnmarshalJSON(b []byte) error {
var v any
var v interface{}
if err := json.Unmarshal(b, &v); err != nil {
return err
}
@ -485,7 +485,7 @@ func (p *OptionalBytes) UnmarshalJSON(input []byte) error {
case "null", "undefined":
*p = OptionalBytes{}
default:
var value any
var value interface{}
err := json.Unmarshal(input, &value)
if err != nil {
return err

View File

@ -15,7 +15,6 @@ import (
"github.com/cheggaaa/pb"
"github.com/ipfs/boxo/files"
uio "github.com/ipfs/boxo/ipld/unixfs/io"
mfs "github.com/ipfs/boxo/mfs"
"github.com/ipfs/boxo/path"
"github.com/ipfs/boxo/verifcid"
@ -69,7 +68,6 @@ const (
mtimeNsecsOptionName = "mtime-nsecs"
fastProvideRootOptionName = "fast-provide-root"
fastProvideWaitOptionName = "fast-provide-wait"
emptyDirsOptionName = "empty-dirs"
)
const (
@ -149,18 +147,6 @@ to find it in the future:
See 'ipfs files --help' to learn more about using MFS
for keeping track of added files and directories.
SYMLINK HANDLING:
By default, symbolic links are preserved as UnixFS symlink nodes that store
the target path. Use --dereference-symlinks to resolve symlinks to their
target content instead:
> ipfs add -r --dereference-symlinks ./mydir
This resolves all symlinks, including CLI arguments and those found inside
directories. Symlinks to files become regular file content, symlinks to
directories are traversed and their contents are added.
CHUNKING EXAMPLES:
The chunker option, '-s', specifies the chunking strategy that dictates
@ -172,16 +158,6 @@ Buzhash or Rabin fingerprint chunker for content defined chunking by
specifying buzhash or rabin-[min]-[avg]-[max] (where min/avg/max refer
to the desired chunk sizes in bytes), e.g. 'rabin-262144-524288-1048576'.
The maximum accepted value for 'size-N' and rabin 'max' parameter is
2MiB minus 256 bytes (2096896 bytes). The 256-byte overhead budget is
reserved for protobuf/UnixFS framing so that serialized blocks stay
within the 2MiB block size limit from the bitswap spec. The buzhash
chunker uses a fixed internal maximum of 512KiB and is not affected.
Only the fixed-size chunker ('size-N') guarantees that the same data
will always produce the same CID. The rabin and buzhash chunkers may
change their internal parameters in a future release.
The following examples use very small byte sizes to demonstrate the
properties of the different chunkers on a small file. You'll likely
want to use a 1024 times larger chunk sizes for most files.
@ -224,13 +200,11 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
Options: []cmds.Option{
// Input Processing
cmds.OptionRecursivePath, // a builtin option that allows recursive paths (-r, --recursive)
cmds.OptionDerefArgs, // DEPRECATED: use --dereference-symlinks instead
cmds.OptionDerefArgs, // a builtin option that resolves passed in filesystem links (--dereference-args)
cmds.OptionStdinName, // a builtin option that optionally allows wrapping stdin into a named file
cmds.OptionHidden,
cmds.OptionIgnore,
cmds.OptionIgnoreRules,
cmds.BoolOption(emptyDirsOptionName, "E", "Include empty directories in the import.").WithDefault(config.DefaultUnixFSIncludeEmptyDirs),
cmds.OptionDerefSymlinks, // resolve symlinks to their target content
// Output Control
cmds.BoolOption(quietOptionName, "q", "Write minimal output."),
cmds.BoolOption(quieterOptionName, "Q", "Write only final hash."),
@ -300,7 +274,7 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
}
progress, _ := req.Options[progressOptionName].(bool)
trickle, trickleSet := req.Options[trickleOptionName].(bool)
trickle, _ := req.Options[trickleOptionName].(bool)
wrap, _ := req.Options[wrapOptionName].(bool)
onlyHash, _ := req.Options[onlyHashOptionName].(bool)
silent, _ := req.Options[silentOptionName].(bool)
@ -311,7 +285,6 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
maxFileLinks, maxFileLinksSet := req.Options[maxFileLinksOptionName].(int)
maxDirectoryLinks, maxDirectoryLinksSet := req.Options[maxDirectoryLinksOptionName].(int)
maxHAMTFanout, maxHAMTFanoutSet := req.Options[maxHAMTFanoutOptionName].(int)
var sizeEstimationMode uio.SizeEstimationMode
nocopy, _ := req.Options[noCopyOptionName].(bool)
fscache, _ := req.Options[fstoreCacheOptionName].(bool)
cidVer, cidVerSet := req.Options[cidVersionOptionName].(int)
@ -339,17 +312,6 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
mtimeNsecs, _ := req.Options[mtimeNsecsOptionName].(uint)
fastProvideRoot, fastProvideRootSet := req.Options[fastProvideRootOptionName].(bool)
fastProvideWait, fastProvideWaitSet := req.Options[fastProvideWaitOptionName].(bool)
emptyDirs, _ := req.Options[emptyDirsOptionName].(bool)
// Note: --dereference-args is deprecated but still works for backwards compatibility.
// The help text marks it as DEPRECATED. Users should use --dereference-symlinks instead,
// which is a superset (resolves both CLI arg symlinks AND nested symlinks in directories).
// Wire --trickle from config
if !trickleSet && !cfg.Import.UnixFSDAGLayout.IsDefault() {
layout := cfg.Import.UnixFSDAGLayout.WithDefault(config.DefaultUnixFSDAGLayout)
trickle = layout == config.DAGLayoutTrickle
}
if chunker == "" {
chunker = cfg.Import.UnixFSChunker.WithDefault(config.DefaultUnixFSChunker)
@ -386,9 +348,6 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
maxHAMTFanout = int(cfg.Import.UnixFSHAMTDirectoryMaxFanout.WithDefault(config.DefaultUnixFSHAMTDirectoryMaxFanout))
}
// SizeEstimationMode is always set from config (no CLI flag)
sizeEstimationMode = cfg.Import.HAMTSizeEstimationMode()
fastProvideRoot = config.ResolveBoolFromConfig(fastProvideRoot, fastProvideRootSet, cfg.Import.FastProvideRoot, config.DefaultFastProvideRoot)
fastProvideWait = config.ResolveBoolFromConfig(fastProvideWait, fastProvideWaitSet, cfg.Import.FastProvideWait, config.DefaultFastProvideWait)
@ -450,8 +409,6 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
options.Unixfs.PreserveMode(preserveMode),
options.Unixfs.PreserveMtime(preserveMtime),
options.Unixfs.IncludeEmptyDirs(emptyDirs),
}
if mode != 0 {
@ -484,9 +441,6 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
opts = append(opts, options.Unixfs.MaxHAMTFanout(maxHAMTFanout))
}
// SizeEstimationMode is always set from config
opts = append(opts, options.Unixfs.SizeEstimationMode(sizeEstimationMode))
if trickle {
opts = append(opts, options.Unixfs.Layout(options.TrickleLayout))
}

View File

@ -98,9 +98,6 @@ var blockGetCmd = &cmds.Command{
'ipfs block get' is a plumbing command for retrieving raw IPFS blocks.
It takes a <cid>, and outputs the block to stdout.
`,
HTTP: &cmds.HTTPHelpText{
ResponseContentType: "application/vnd.ipld.raw",
},
},
Arguments: []cmds.Argument{
@ -122,8 +119,6 @@ It takes a <cid>, and outputs the block to stdout.
return err
}
res.SetEncodingType(cmds.OctetStream)
res.SetContentType("application/vnd.ipld.raw")
return res.Emit(r)
},
}

View File

@ -112,7 +112,7 @@ The optional format string is a printf style format string:
return emitCids(req, resp, opts)
},
PostRun: cmds.PostRunMap{
cmds.CLI: streamResult(func(v any, out io.Writer) nonFatalError {
cmds.CLI: streamResult(func(v interface{}, out io.Writer) nonFatalError {
r := v.(*CidFormatRes)
if r.ErrorMsg != "" {
return nonFatalError(fmt.Sprintf("%s: %s", r.CidStr, r.ErrorMsg))

View File

@ -39,7 +39,7 @@ func TestCidFmtCmd(t *testing.T) {
// Mock request
req := &cmds.Request{
Options: map[string]any{
Options: map[string]interface{}{
cidToVersionOptionName: "0",
cidMultibaseOptionName: e.MultibaseName,
cidFormatOptionName: "%s",
@ -90,7 +90,7 @@ func TestCidFmtCmd(t *testing.T) {
for _, e := range testCases {
// Mock request
req := &cmds.Request{
Options: map[string]any{
Options: map[string]interface{}{
cidToVersionOptionName: e.Ver,
cidMultibaseOptionName: e.MultibaseName,
cidFormatOptionName: "%s",

View File

@ -21,7 +21,7 @@ import (
var log = logging.Logger("core/commands/cmdenv")
// GetNode extracts the node from the environment.
func GetNode(env any) (*core.IpfsNode, error) {
func GetNode(env interface{}) (*core.IpfsNode, error) {
ctx, ok := env.(*commands.Context)
if !ok {
return nil, fmt.Errorf("expected env to be of type %T, got %T", ctx, env)

View File

@ -2,28 +2,24 @@ package cmdutils
import (
"fmt"
"slices"
cmds "github.com/ipfs/go-ipfs-cmds"
"github.com/ipfs/boxo/path"
"github.com/ipfs/go-cid"
coreiface "github.com/ipfs/kubo/core/coreiface"
"github.com/libp2p/go-libp2p/core/peer"
)
const (
AllowBigBlockOptionName = "allow-big-block"
// SoftBlockLimit is the maximum block size for bitswap transfer.
// If this value changes, update the "2MiB" strings in error messages below.
SoftBlockLimit = 2 * 1024 * 1024 // https://specs.ipfs.tech/bitswap-protocol/#block-sizes
MaxPinNameBytes = 255 // Maximum number of bytes allowed for a pin name
SoftBlockLimit = 1024 * 1024 // https://github.com/ipfs/kubo/issues/7421#issuecomment-910833499
MaxPinNameBytes = 255 // Maximum number of bytes allowed for a pin name
)
var AllowBigBlockOption cmds.Option
func init() {
AllowBigBlockOption = cmds.BoolOption(AllowBigBlockOptionName, "Disable block size check and allow creation of blocks bigger than 2MiB. WARNING: such blocks won't be transferable over the standard bitswap.").WithDefault(false)
AllowBigBlockOption = cmds.BoolOption(AllowBigBlockOptionName, "Disable block size check and allow creation of blocks bigger than 1MiB. WARNING: such blocks won't be transferable over the standard bitswap.").WithDefault(false)
}
func CheckCIDSize(req *cmds.Request, c cid.Cid, dagAPI coreiface.APIDagService) error {
@ -46,10 +42,11 @@ func CheckBlockSize(req *cmds.Request, size uint64) error {
return nil
}
// Block size is limited to SoftBlockLimit (2MiB) as defined in the bitswap spec.
// https://specs.ipfs.tech/bitswap-protocol/#block-sizes
// We do not allow producing blocks bigger than 1 MiB to avoid errors
// when transmitting them over BitSwap. The 1 MiB constant is an
// unenforced and undeclared rule of thumb hard-coded here.
if size > SoftBlockLimit {
return fmt.Errorf("produced block is over 2MiB: big blocks can't be exchanged with other peers. consider using UnixFS for automatic chunking of bigger files, or pass --allow-big-block to override")
return fmt.Errorf("produced block is over 1MiB: big blocks can't be exchanged with other peers. consider using UnixFS for automatic chunking of bigger files, or pass --allow-big-block to override")
}
return nil
}
@ -87,13 +84,3 @@ func PathOrCidPath(str string) (path.Path, error) {
// Send back original err.
return nil, originalErr
}
// CloneAddrInfo returns a copy of the AddrInfo with a cloned Addrs slice.
// This prevents data races if the sender reuses the backing array.
// See: https://github.com/ipfs/kubo/issues/11116
func CloneAddrInfo(ai peer.AddrInfo) peer.AddrInfo {
return peer.AddrInfo{
ID: ai.ID,
Addrs: slices.Clone(ai.Addrs),
}
}

View File

@ -20,7 +20,7 @@ type commandEncoder struct {
w io.Writer
}
func (e *commandEncoder) Encode(v any) error {
func (e *commandEncoder) Encode(v interface{}) error {
var (
cmd *Command
ok bool
@ -232,7 +232,7 @@ type nonFatalError string
// streamResult is a helper function to stream results that possibly
// contain non-fatal errors. The helper function is allowed to panic
// on internal errors.
func streamResult(procVal func(any, io.Writer) nonFatalError) func(cmds.Response, cmds.ResponseEmitter) error {
func streamResult(procVal func(interface{}, io.Writer) nonFatalError) func(cmds.Response, cmds.ResponseEmitter) error {
return func(res cmds.Response, re cmds.ResponseEmitter) (rerr error) {
defer func() {
if r := recover(); r != nil {

View File

@ -76,9 +76,6 @@ func TestCommands(t *testing.T) {
"/diag/cmds",
"/diag/cmds/clear",
"/diag/cmds/set-time",
"/diag/datastore",
"/diag/datastore/count",
"/diag/datastore/get",
"/diag/profile",
"/diag/sys",
"/files",
@ -93,7 +90,6 @@ func TestCommands(t *testing.T) {
"/files/stat",
"/files/write",
"/files/chmod",
"/files/chroot",
"/files/touch",
"/filestore",
"/filestore/dups",
@ -106,7 +102,6 @@ func TestCommands(t *testing.T) {
"/key/gen",
"/key/import",
"/key/list",
"/key/ls",
"/key/rename",
"/key/rm",
"/key/rotate",
@ -124,14 +119,12 @@ func TestCommands(t *testing.T) {
"/multibase/transcode",
"/multibase/list",
"/name",
"/name/get",
"/name/inspect",
"/name/publish",
"/name/pubsub",
"/name/pubsub/cancel",
"/name/pubsub/state",
"/name/pubsub/subs",
"/name/put",
"/name/resolve",
"/object",
"/object/data",
@ -176,7 +169,6 @@ func TestCommands(t *testing.T) {
"/pubsub/ls",
"/pubsub/peers",
"/pubsub/pub",
"/pubsub/reset",
"/pubsub/sub",
"/refs",
"/refs/local",
@ -198,7 +190,6 @@ func TestCommands(t *testing.T) {
"/stats/repo",
"/swarm",
"/swarm/addrs",
"/swarm/addrs/autonat",
"/swarm/addrs/listen",
"/swarm/addrs/local",
"/swarm/connect",

View File

@ -22,13 +22,13 @@ import (
// ConfigUpdateOutput is config profile apply command's output
type ConfigUpdateOutput struct {
OldCfg map[string]any
NewCfg map[string]any
OldCfg map[string]interface{}
NewCfg map[string]interface{}
}
type ConfigField struct {
Key string
Value any
Value interface{}
}
const (
@ -117,7 +117,7 @@ Set multiple values in the 'Addresses.AppendAnnounce' array:
value := args[1]
if parseJSON, _ := req.Options[configJSONOptionName].(bool); parseJSON {
var jsonVal any
var jsonVal interface{}
if err := json.Unmarshal([]byte(value), &jsonVal); err != nil {
err = fmt.Errorf("failed to unmarshal json. %s", err)
return err
@ -199,7 +199,7 @@ var configShowCmd = &cmds.Command{
NOTE: For security reasons, this command will omit your private key and remote services. If you would like to make a full backup of your config (private key included), you must copy the config file from your repo.
`,
},
Type: make(map[string]any),
Type: make(map[string]interface{}),
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
cfgRoot, err := cmdenv.GetConfigRoot(env)
if err != nil {
@ -217,7 +217,7 @@ NOTE: For security reasons, this command will omit your private key and remote s
return err
}
var cfg map[string]any
var cfg map[string]interface{}
err = json.Unmarshal(data, &cfg)
if err != nil {
return err
@ -262,7 +262,7 @@ NOTE: For security reasons, this command will omit your private key and remote s
},
}
var HumanJSONEncoder = cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *map[string]any) error {
var HumanJSONEncoder = cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *map[string]interface{}) error {
buf, err := config.HumanOutput(out)
if err != nil {
return err
@ -273,35 +273,35 @@ var HumanJSONEncoder = cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer
})
// Scrubs value and returns error if missing
func scrubValue(m map[string]any, key []string) (map[string]any, error) {
func scrubValue(m map[string]interface{}, key []string) (map[string]interface{}, error) {
return scrubMapInternal(m, key, false)
}
// Scrubs value and returns no error if missing
func scrubOptionalValue(m map[string]any, key []string) (map[string]any, error) {
func scrubOptionalValue(m map[string]interface{}, key []string) (map[string]interface{}, error) {
return scrubMapInternal(m, key, true)
}
func scrubEither(u any, key []string, okIfMissing bool) (any, error) {
m, ok := u.(map[string]any)
func scrubEither(u interface{}, key []string, okIfMissing bool) (interface{}, error) {
m, ok := u.(map[string]interface{})
if ok {
return scrubMapInternal(m, key, okIfMissing)
}
return scrubValueInternal(m, key, okIfMissing)
}
func scrubValueInternal(v any, key []string, okIfMissing bool) (any, error) {
func scrubValueInternal(v interface{}, key []string, okIfMissing bool) (interface{}, error) {
if v == nil && !okIfMissing {
return nil, errors.New("failed to find specified key")
}
return nil, nil
}
func scrubMapInternal(m map[string]any, key []string, okIfMissing bool) (map[string]any, error) {
func scrubMapInternal(m map[string]interface{}, key []string, okIfMissing bool) (map[string]interface{}, error) {
if len(key) == 0 {
return make(map[string]any), nil // delete value
return make(map[string]interface{}), nil // delete value
}
n := map[string]any{}
n := map[string]interface{}{}
for k, v := range m {
if key[0] == "*" || strings.EqualFold(key[0], k) {
u, err := scrubEither(v, key[1:], okIfMissing)
@ -463,7 +463,7 @@ func buildProfileHelp() string {
}
// scrubPrivKey scrubs private key for security reasons.
func scrubPrivKey(cfg *config.Config) (map[string]any, error) {
func scrubPrivKey(cfg *config.Config) (map[string]interface{}, error) {
cfgMap, err := config.ToMap(cfg)
if err != nil {
return nil, err
@ -553,7 +553,7 @@ func getConfigWithAutoExpand(r repo.Repo, key string) (*ConfigField, error) {
}, nil
}
func setConfig(r repo.Repo, key string, value any) (*ConfigField, error) {
func setConfig(r repo.Repo, key string, value interface{}) (*ConfigField, error) {
err := r.SetConfigKey(key, value)
if err != nil {
return nil, fmt.Errorf("failed to set config value: %s (maybe use --json?)", err)
@ -646,7 +646,7 @@ func getRemotePinningServices(r repo.Repo) (map[string]config.RemotePinningServi
if remoteServicesTag, err := getConfig(r, config.RemoteServicesPath); err == nil {
// seems that golang cannot type assert map[string]interface{} to map[string]config.RemotePinningService
// so we have to manually copy the data :-|
if val, ok := remoteServicesTag.Value.(map[string]any); ok {
if val, ok := remoteServicesTag.Value.(map[string]interface{}); ok {
jsonString, err := json.Marshal(val)
if err != nil {
return nil, err

View File

@ -7,7 +7,6 @@ import (
"io"
"path"
"github.com/dustin/go-humanize"
"github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/ipfs/kubo/core/commands/cmdutils"
@ -276,9 +275,6 @@ Note that at present only single root selections / .car files are supported.
The output of blocks happens in strict DAG-traversal, first-seen, order.
CAR file follows the CARv1 format: https://ipld.io/specs/transport/car/carv1/
`,
HTTP: &cmds.HTTPHelpText{
ResponseContentType: "application/vnd.ipld.car",
},
},
Arguments: []cmds.Argument{
cmds.StringArg("root", true, false, "CID of a root to recursively export").EnableStdin(),
@ -294,9 +290,9 @@ CAR file follows the CARv1 format: https://ipld.io/specs/transport/car/carv1/
// DagStat is a dag stat command response
type DagStat struct {
Cid cid.Cid
Size uint64 `json:",omitempty"`
NumBlocks int64 `json:",omitempty"`
Cid cid.Cid `json:",omitempty"`
Size uint64 `json:",omitempty"`
NumBlocks int64 `json:",omitempty"`
}
func (s *DagStat) String() string {
@ -353,11 +349,7 @@ type DagStatSummary struct {
}
func (s *DagStatSummary) String() string {
return fmt.Sprintf("Total Size: %d (%s)\nUnique Blocks: %d\nShared Size: %d (%s)\nRatio: %f",
s.TotalSize, humanize.Bytes(s.TotalSize),
s.UniqueBlocks,
s.SharedSize, humanize.Bytes(s.SharedSize),
s.Ratio)
return fmt.Sprintf("Total Size: %d\nUnique Blocks: %d\nShared Size: %d\nRatio: %f", s.TotalSize, s.UniqueBlocks, s.SharedSize, s.Ratio)
}
func (s *DagStatSummary) incrementTotalSize(size uint64) {
@ -392,7 +384,7 @@ Note: This command skips duplicate blocks in reporting both size and the number
cmds.StringArg("root", true, true, "CID of a DAG root to get statistics for").EnableStdin(),
},
Options: []cmds.Option{
cmds.BoolOption(progressOptionName, "p", "Show progress on stderr. Auto-detected if stderr is a terminal."),
cmds.BoolOption(progressOptionName, "p", "Return progressive data while reading through the DAG").WithDefault(true),
},
Run: dagStat,
Type: DagStatSummary{},

View File

@ -73,8 +73,6 @@ func dagExport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment
}()
res.SetEncodingType(cmds.OctetStream)
res.SetContentType("application/vnd.ipld.car")
if err := res.Emit(pipeR); err != nil {
pipeR.Close() // ignore the error if any
return err

View File

@ -5,7 +5,6 @@ import (
"io"
"os"
"github.com/dustin/go-humanize"
mdag "github.com/ipfs/boxo/ipld/merkledag"
"github.com/ipfs/boxo/ipld/merkledag/traverse"
cid "github.com/ipfs/go-cid"
@ -20,11 +19,7 @@ import (
// to compute the new state
func dagStat(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
// Default to true (emit intermediate states) for HTTP/RPC clients that want progress
progressive := true
if val, specified := req.Options[progressOptionName].(bool); specified {
progressive = val
}
progressive := req.Options[progressOptionName].(bool)
api, err := cmdenv.GetApi(env, req)
if err != nil {
return err
@ -89,18 +84,6 @@ func dagStat(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment)
}
func finishCLIStat(res cmds.Response, re cmds.ResponseEmitter) error {
// Determine whether to show progress based on TTY detection or explicit flag
var showProgress bool
val, specified := res.Request().Options[progressOptionName]
if !specified {
// Auto-detect: show progress only if stderr is a TTY
if errStat, err := os.Stderr.Stat(); err == nil {
showProgress = (errStat.Mode() & os.ModeCharDevice) != 0
}
} else {
showProgress = val.(bool)
}
var dagStats *DagStatSummary
for {
v, err := res.Next()
@ -113,26 +96,17 @@ func finishCLIStat(res cmds.Response, re cmds.ResponseEmitter) error {
switch out := v.(type) {
case *DagStatSummary:
dagStats = out
// Ratio == 0 means this is a progress update (not final result)
if showProgress && dagStats.Ratio == 0 {
// Sum up total progress across all DAGs being scanned
var totalBlocks int64
var totalSize uint64
for _, stat := range dagStats.DagStatsArray {
totalBlocks += stat.NumBlocks
totalSize += stat.Size
if dagStats.Ratio == 0 {
length := len(dagStats.DagStatsArray)
if length > 0 {
currentStat := dagStats.DagStatsArray[length-1]
fmt.Fprintf(os.Stderr, "CID: %s, Size: %d, NumBlocks: %d\n", currentStat.Cid, currentStat.Size, currentStat.NumBlocks)
}
fmt.Fprintf(os.Stderr, "Fetched/Processed %d blocks, %d bytes (%s)\r", totalBlocks, totalSize, humanize.Bytes(totalSize))
}
default:
return e.TypeErr(out, v)
}
}
// Clear the progress line before final output
if showProgress {
fmt.Fprint(os.Stderr, "\033[2K\r")
}
return re.Emit(dagStats)
}

View File

@ -1,16 +1,7 @@
package commands
import (
"encoding/hex"
"errors"
"fmt"
"io"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
cmds "github.com/ipfs/go-ipfs-cmds"
oldcmds "github.com/ipfs/kubo/commands"
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
)
var DiagCmd = &cmds.Command{
@ -19,182 +10,8 @@ var DiagCmd = &cmds.Command{
},
Subcommands: map[string]*cmds.Command{
"sys": sysDiagCmd,
"cmds": ActiveReqsCmd,
"profile": sysProfileCmd,
"datastore": diagDatastoreCmd,
},
}
var diagDatastoreCmd = &cmds.Command{
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "Low-level datastore inspection for debugging and testing.",
ShortDescription: `
'ipfs diag datastore' provides low-level access to the datastore for debugging
and testing purposes.
WARNING: FOR DEBUGGING/TESTING ONLY
These commands expose internal datastore details and should not be used
in production workflows. The datastore format may change between versions.
The daemon must not be running when calling these commands.
EXAMPLE
Inspecting pubsub seqno validator state:
$ ipfs diag datastore count /pubsub/seqno/
2
$ ipfs diag datastore get --hex /pubsub/seqno/12D3KooW...
Key: /pubsub/seqno/12D3KooW...
Hex Dump:
00000000 18 81 81 c8 91 c0 ea f6 |........|
`,
},
Subcommands: map[string]*cmds.Command{
"get": diagDatastoreGetCmd,
"count": diagDatastoreCountCmd,
},
}
const diagDatastoreHexOptionName = "hex"
type diagDatastoreGetResult struct {
Key string `json:"key"`
Value []byte `json:"value"`
HexDump string `json:"hex_dump,omitempty"`
}
var diagDatastoreGetCmd = &cmds.Command{
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "Read a raw key from the datastore.",
ShortDescription: `
Returns the value stored at the given datastore key.
Default output is raw bytes. Use --hex for human-readable hex dump.
The daemon must not be running when using this command.
WARNING: FOR DEBUGGING/TESTING ONLY
`,
},
Arguments: []cmds.Argument{
cmds.StringArg("key", true, false, "Datastore key to read (e.g., /pubsub/seqno/<peerid>)"),
},
Options: []cmds.Option{
cmds.BoolOption(diagDatastoreHexOptionName, "Output hex dump instead of raw bytes"),
},
NoRemote: true,
PreRun: DaemonNotRunning,
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
cctx := env.(*oldcmds.Context)
repo, err := fsrepo.Open(cctx.ConfigRoot)
if err != nil {
return fmt.Errorf("failed to open repo: %w", err)
}
defer repo.Close()
keyStr := req.Arguments[0]
key := datastore.NewKey(keyStr)
ds := repo.Datastore()
val, err := ds.Get(req.Context, key)
if err != nil {
if errors.Is(err, datastore.ErrNotFound) {
return fmt.Errorf("key not found: %s", keyStr)
}
return fmt.Errorf("failed to read key: %w", err)
}
result := &diagDatastoreGetResult{
Key: keyStr,
Value: val,
}
if hexDump, _ := req.Options[diagDatastoreHexOptionName].(bool); hexDump {
result.HexDump = hex.Dump(val)
}
return cmds.EmitOnce(res, result)
},
Type: diagDatastoreGetResult{},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, result *diagDatastoreGetResult) error {
if result.HexDump != "" {
fmt.Fprintf(w, "Key: %s\nHex Dump:\n%s", result.Key, result.HexDump)
return nil
}
// Raw bytes output
_, err := w.Write(result.Value)
return err
}),
},
}
type diagDatastoreCountResult struct {
Prefix string `json:"prefix"`
Count int64 `json:"count"`
}
var diagDatastoreCountCmd = &cmds.Command{
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "Count entries matching a datastore prefix.",
ShortDescription: `
Counts the number of datastore entries whose keys start with the given prefix.
The daemon must not be running when using this command.
WARNING: FOR DEBUGGING/TESTING ONLY
`,
},
Arguments: []cmds.Argument{
cmds.StringArg("prefix", true, false, "Datastore key prefix (e.g., /pubsub/seqno/)"),
},
NoRemote: true,
PreRun: DaemonNotRunning,
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
cctx := env.(*oldcmds.Context)
repo, err := fsrepo.Open(cctx.ConfigRoot)
if err != nil {
return fmt.Errorf("failed to open repo: %w", err)
}
defer repo.Close()
prefix := req.Arguments[0]
ds := repo.Datastore()
q := query.Query{
Prefix: prefix,
KeysOnly: true,
}
results, err := ds.Query(req.Context, q)
if err != nil {
return fmt.Errorf("failed to query datastore: %w", err)
}
defer results.Close()
var count int64
for result := range results.Next() {
if result.Error != nil {
return fmt.Errorf("query error: %w", result.Error)
}
count++
}
return cmds.EmitOnce(res, &diagDatastoreCountResult{
Prefix: prefix,
Count: count,
})
},
Type: diagDatastoreCountResult{},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, result *diagDatastoreCountResult) error {
_, err := fmt.Fprintf(w, "%d\n", result.Count)
return err
}),
"sys": sysDiagCmd,
"cmds": ActiveReqsCmd,
"profile": sysProfileCmd,
},
}

View File

@ -6,7 +6,7 @@ import (
)
// TypeErr returns an error with a string that explains what error was expected and what was received.
func TypeErr(expected, actual any) error {
func TypeErr(expected, actual interface{}) error {
return fmt.Errorf("expected type %T, got %T", expected, actual)
}

View File

@ -1,8 +1,6 @@
package commands
import (
cmds "github.com/ipfs/go-ipfs-cmds"
)
import cmds "github.com/ipfs/go-ipfs-cmds"
func CreateCmdExtras(opts ...func(e *cmds.Extra)) *cmds.Extra {
e := new(cmds.Extra)
@ -56,8 +54,8 @@ func GetPreemptsAutoUpdate(e *cmds.Extra) (val bool, found bool) {
return getBoolFlag(e, preemptsAutoUpdate{})
}
func getBoolFlag(e *cmds.Extra, key any) (val bool, found bool) {
var ival any
func getBoolFlag(e *cmds.Extra, key interface{}) (val bool, found bool) {
var ival interface{}
ival, found = e.GetValue(key)
if !found {
return false, false

View File

@ -16,24 +16,18 @@ import (
"time"
humanize "github.com/dustin/go-humanize"
oldcmds "github.com/ipfs/kubo/commands"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/core"
"github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/ipfs/kubo/core/node"
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
bservice "github.com/ipfs/boxo/blockservice"
bstore "github.com/ipfs/boxo/blockstore"
offline "github.com/ipfs/boxo/exchange/offline"
dag "github.com/ipfs/boxo/ipld/merkledag"
ft "github.com/ipfs/boxo/ipld/unixfs"
uio "github.com/ipfs/boxo/ipld/unixfs/io"
mfs "github.com/ipfs/boxo/mfs"
"github.com/ipfs/boxo/path"
cid "github.com/ipfs/go-cid"
cidenc "github.com/ipfs/go-cidutil/cidenc"
"github.com/ipfs/go-datastore"
cmds "github.com/ipfs/go-ipfs-cmds"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log/v2"
@ -126,19 +120,18 @@ performance.`,
cmds.BoolOption(filesFlushOptionName, "f", "Flush target and ancestors after write.").WithDefault(true),
},
Subcommands: map[string]*cmds.Command{
"read": filesReadCmd,
"write": filesWriteCmd,
"mv": filesMvCmd,
"cp": filesCpCmd,
"ls": filesLsCmd,
"mkdir": filesMkdirCmd,
"stat": filesStatCmd,
"rm": filesRmCmd,
"flush": filesFlushCmd,
"chcid": filesChcidCmd,
"chmod": filesChmodCmd,
"chroot": filesChrootCmd,
"touch": filesTouchCmd,
"read": filesReadCmd,
"write": filesWriteCmd,
"mv": filesMvCmd,
"cp": filesCpCmd,
"ls": filesLsCmd,
"mkdir": filesMkdirCmd,
"stat": filesStatCmd,
"rm": filesRmCmd,
"flush": filesFlushCmd,
"chcid": filesChcidCmd,
"chmod": filesChmodCmd,
"touch": filesTouchCmd,
},
}
@ -500,12 +493,7 @@ being GC'ed.
return err
}
cfg, err := nd.Repo.Config()
if err != nil {
return err
}
prefix, err := getPrefixNew(req, &cfg.Import)
prefix, err := getPrefixNew(req)
if err != nil {
return err
}
@ -556,9 +544,7 @@ being GC'ed.
mkParents, _ := req.Options[filesParentsOptionName].(bool)
if mkParents {
maxDirLinks := int(cfg.Import.UnixFSDirectoryMaxLinks.WithDefault(config.DefaultUnixFSDirectoryMaxLinks))
sizeEstimationMode := cfg.Import.HAMTSizeEstimationMode()
err := ensureContainingDirectoryExists(nd.FilesRoot, dst, prefix, maxDirLinks, &sizeEstimationMode)
err := ensureContainingDirectoryExists(nd.FilesRoot, dst, prefix)
if err != nil {
return err
}
@ -997,13 +983,9 @@ stat' on the file or any of its ancestors.
WARNING:
The CID produced by 'files write' will be different from 'ipfs add' because
'ipfs files write' creates a trickle-dag optimized for append-only operations.
'ipfs file write' creates a trickle-dag optimized for append-only operations
See '--trickle' in 'ipfs add --help' for more information.
NOTE: The 'Import.UnixFSFileMaxLinks' config option does not apply to this command.
Trickle DAG has a fixed internal structure optimized for append operations.
To use configurable max-links, use 'ipfs add' with balanced DAG layout.
If you want to add a file without modifying an existing one,
use 'ipfs add' with '--to-files':
@ -1060,7 +1042,7 @@ See '--to-files' in 'ipfs add --help' for more information.
rawLeaves = cfg.Import.UnixFSRawLeaves.WithDefault(config.DefaultUnixFSRawLeaves)
}
prefix, err := getPrefixNew(req, &cfg.Import)
prefix, err := getPrefixNew(req)
if err != nil {
return err
}
@ -1071,9 +1053,7 @@ See '--to-files' in 'ipfs add --help' for more information.
}
if mkParents {
maxDirLinks := int(cfg.Import.UnixFSDirectoryMaxLinks.WithDefault(config.DefaultUnixFSDirectoryMaxLinks))
sizeEstimationMode := cfg.Import.HAMTSizeEstimationMode()
err := ensureContainingDirectoryExists(nd.FilesRoot, path, prefix, maxDirLinks, &sizeEstimationMode)
err := ensureContainingDirectoryExists(nd.FilesRoot, path, prefix)
if err != nil {
return err
}
@ -1177,11 +1157,6 @@ Examples:
return err
}
cfg, err := n.Repo.Config()
if err != nil {
return err
}
dashp, _ := req.Options[filesParentsOptionName].(bool)
dirtomake, err := checkPath(req.Arguments[0])
if err != nil {
@ -1194,21 +1169,16 @@ Examples:
return err
}
prefix, err := getPrefix(req, &cfg.Import)
prefix, err := getPrefix(req)
if err != nil {
return err
}
root := n.FilesRoot
maxDirLinks := int(cfg.Import.UnixFSDirectoryMaxLinks.WithDefault(config.DefaultUnixFSDirectoryMaxLinks))
sizeEstimationMode := cfg.Import.HAMTSizeEstimationMode()
err = mfs.Mkdir(root, dirtomake, mfs.MkdirOpts{
Mkparents: dashp,
Flush: flush,
CidBuilder: prefix,
MaxLinks: maxDirLinks,
SizeEstimationMode: &sizeEstimationMode,
Mkparents: dashp,
Flush: flush,
CidBuilder: prefix,
})
return err
@ -1286,9 +1256,7 @@ Change the CID version or hash function of the root node of a given path.
flush, _ := req.Options[filesFlushOptionName].(bool)
// Note: files chcid is for explicitly changing CID format, so we don't
// fall back to Import config here. If no options are provided, it does nothing.
prefix, err := getPrefix(req, nil)
prefix, err := getPrefix(req)
if err != nil {
return err
}
@ -1446,20 +1414,10 @@ func removePath(filesRoot *mfs.Root, path string, force bool, dashr bool) error
return pdir.Flush()
}
func getPrefixNew(req *cmds.Request, importCfg *config.Import) (cid.Builder, error) {
func getPrefixNew(req *cmds.Request) (cid.Builder, error) {
cidVer, cidVerSet := req.Options[filesCidVersionOptionName].(int)
hashFunStr, hashFunSet := req.Options[filesHashOptionName].(string)
// Fall back to Import config if CLI options not set
if !cidVerSet && importCfg != nil && !importCfg.CidVersion.IsDefault() {
cidVer = int(importCfg.CidVersion.WithDefault(config.DefaultCidVersion))
cidVerSet = true
}
if !hashFunSet && importCfg != nil && !importCfg.HashFunction.IsDefault() {
hashFunStr = importCfg.HashFunction.WithDefault(config.DefaultHashFunction)
hashFunSet = true
}
if !cidVerSet && !hashFunSet {
return nil, nil
}
@ -1485,20 +1443,10 @@ func getPrefixNew(req *cmds.Request, importCfg *config.Import) (cid.Builder, err
return &prefix, nil
}
func getPrefix(req *cmds.Request, importCfg *config.Import) (cid.Builder, error) {
func getPrefix(req *cmds.Request) (cid.Builder, error) {
cidVer, cidVerSet := req.Options[filesCidVersionOptionName].(int)
hashFunStr, hashFunSet := req.Options[filesHashOptionName].(string)
// Fall back to Import config if CLI options not set
if !cidVerSet && importCfg != nil && !importCfg.CidVersion.IsDefault() {
cidVer = int(importCfg.CidVersion.WithDefault(config.DefaultCidVersion))
cidVerSet = true
}
if !hashFunSet && importCfg != nil && !importCfg.HashFunction.IsDefault() {
hashFunStr = importCfg.HashFunction.WithDefault(config.DefaultHashFunction)
hashFunSet = true
}
if !cidVerSet && !hashFunSet {
return nil, nil
}
@ -1524,7 +1472,7 @@ func getPrefix(req *cmds.Request, importCfg *config.Import) (cid.Builder, error)
return &prefix, nil
}
func ensureContainingDirectoryExists(r *mfs.Root, path string, builder cid.Builder, maxLinks int, sizeEstimationMode *uio.SizeEstimationMode) error {
func ensureContainingDirectoryExists(r *mfs.Root, path string, builder cid.Builder) error {
dirtomake := gopath.Dir(path)
if dirtomake == "/" {
@ -1532,10 +1480,8 @@ func ensureContainingDirectoryExists(r *mfs.Root, path string, builder cid.Build
}
return mfs.Mkdir(r, dirtomake, mfs.MkdirOpts{
Mkparents: true,
CidBuilder: builder,
MaxLinks: maxLinks,
SizeEstimationMode: sizeEstimationMode,
Mkparents: true,
CidBuilder: builder,
})
}
@ -1702,141 +1648,3 @@ Examples:
return mfs.Touch(nd.FilesRoot, path, ts)
},
}
const chrootConfirmOptionName = "confirm"
var filesChrootCmd = &cmds.Command{
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "Change the MFS root CID.",
ShortDescription: `
'ipfs files chroot' changes the root CID used by MFS (Mutable File System).
This is a recovery command for when MFS becomes corrupted and prevents the
daemon from starting.
When run without a CID argument, resets MFS to an empty directory.
WARNING: The old MFS root and its unpinned children will be removed during
the next garbage collection. Pin the old root first if you want to preserve.
This command can only run when the daemon is not running.
Examples:
# Reset MFS to empty directory (recovery from corruption)
$ ipfs files chroot --confirm
# Restore MFS to a known good directory CID
$ ipfs files chroot --confirm QmYourBackupCID
`,
},
Arguments: []cmds.Argument{
cmds.StringArg("cid", false, false, "New root CID (defaults to empty directory if not specified)."),
},
Options: []cmds.Option{
cmds.BoolOption(chrootConfirmOptionName, "Confirm this potentially destructive operation."),
},
NoRemote: true,
Extra: CreateCmdExtras(SetDoesNotUseRepo(true)),
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
confirm, _ := req.Options[chrootConfirmOptionName].(bool)
if !confirm {
return errors.New("this is a potentially destructive operation; pass --confirm to proceed")
}
// Determine new root CID
var newRootCid cid.Cid
if len(req.Arguments) > 0 {
var err error
newRootCid, err = cid.Decode(req.Arguments[0])
if err != nil {
return fmt.Errorf("invalid CID %q: %w", req.Arguments[0], err)
}
} else {
// Default to empty directory
newRootCid = ft.EmptyDirNode().Cid()
}
// Get config root to open repo directly
cctx := env.(*oldcmds.Context)
cfgRoot := cctx.ConfigRoot
// Open repo directly (daemon must not be running)
repo, err := fsrepo.Open(cfgRoot)
if err != nil {
return fmt.Errorf("opening repo (is the daemon running?): %w", err)
}
defer repo.Close()
localDS := repo.Datastore()
bs := bstore.NewBlockstore(localDS)
// Check new root exists locally and is a directory
hasBlock, err := bs.Has(req.Context, newRootCid)
if err != nil {
return fmt.Errorf("checking if new root exists: %w", err)
}
if !hasBlock {
// Special case: empty dir is always available (hardcoded in boxo)
emptyDirCid := ft.EmptyDirNode().Cid()
if !newRootCid.Equals(emptyDirCid) {
return fmt.Errorf("new root %s does not exist locally; fetch it first with 'ipfs block get'", newRootCid)
}
}
// Validate it's a directory (not a file)
if hasBlock {
blk, err := bs.Get(req.Context, newRootCid)
if err != nil {
return fmt.Errorf("reading new root block: %w", err)
}
pbNode, err := dag.DecodeProtobuf(blk.RawData())
if err != nil {
return fmt.Errorf("new root is not a valid dag-pb node: %w", err)
}
fsNode, err := ft.FSNodeFromBytes(pbNode.Data())
if err != nil {
return fmt.Errorf("new root is not a valid UnixFS node: %w", err)
}
if fsNode.Type() != ft.TDirectory && fsNode.Type() != ft.THAMTShard {
return fmt.Errorf("new root must be a directory, got %s", fsNode.Type())
}
}
// Get old root for display (if exists)
var oldRootStr string
oldRootBytes, err := localDS.Get(req.Context, node.FilesRootDatastoreKey)
if err == nil {
oldRootCid, err := cid.Cast(oldRootBytes)
if err == nil {
oldRootStr = oldRootCid.String()
}
} else if !errors.Is(err, datastore.ErrNotFound) {
return fmt.Errorf("reading current MFS root: %w", err)
}
// Write new root
err = localDS.Put(req.Context, node.FilesRootDatastoreKey, newRootCid.Bytes())
if err != nil {
return fmt.Errorf("writing new MFS root: %w", err)
}
// Build output message
var msg string
if oldRootStr != "" {
msg = fmt.Sprintf("MFS root changed from %s to %s\n", oldRootStr, newRootCid)
msg += fmt.Sprintf("The old root %s will be garbage collected unless pinned.\n", oldRootStr)
} else {
msg = fmt.Sprintf("MFS root set to %s\n", newRootCid)
}
return cmds.EmitOnce(res, &MessageOutput{Message: msg})
},
Type: MessageOutput{},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *MessageOutput) error {
_, err := fmt.Fprint(w, out.Message)
return err
}),
},
}

View File

@ -1,6 +1,7 @@
package commands
import (
"context"
"io"
"testing"
@ -11,7 +12,8 @@ import (
)
func TestFilesCp_DagCborNodeFails(t *testing.T) {
ctx := t.Context()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cmdCtx, err := coremock.MockCmdsCtx()
require.NoError(t, err)
@ -30,7 +32,7 @@ func TestFilesCp_DagCborNodeFails(t *testing.T) {
"/ipfs/" + protoNode.Cid().String(),
"/test-destination",
},
Options: map[string]any{
Options: map[string]interface{}{
"force": false,
},
}

View File

@ -85,7 +85,7 @@ The output is:
if err != nil {
return err
}
return streamResult(func(v any, out io.Writer) nonFatalError {
return streamResult(func(v interface{}, out io.Writer) nonFatalError {
r := v.(*filestore.ListRes)
if r.ErrorMsg != "" {
return nonFatalError(r.ErrorMsg)

View File

@ -45,9 +45,6 @@ To output a TAR archive instead of unpacked files, use '--archive' or '-a'.
To compress the output with GZIP compression, use '--compress' or '-C'. You
may also specify the level of compression by specifying '-l=<1-9>'.
`,
HTTP: &cmds.HTTPHelpText{
ResponseContentType: "application/x-tar, or application/gzip when compress=true",
},
},
Arguments: []cmds.Argument{
@ -106,16 +103,6 @@ may also specify the level of compression by specifying '-l=<1-9>'.
reader.Close()
}()
// Set Content-Type based on output format.
// When compression is enabled, output is gzip (or tar.gz for directories).
// Otherwise, tar is used as the transport format.
res.SetEncodingType(cmds.OctetStream)
if cmplvl != gzip.NoCompression {
res.SetContentType("application/gzip")
} else {
res.SetContentType("application/x-tar")
}
return res.Emit(reader)
},
PostRun: cmds.PostRunMap{

View File

@ -1,6 +1,7 @@
package commands
import (
"context"
"fmt"
"testing"
@ -15,7 +16,7 @@ func TestGetOutputPath(t *testing.T) {
}{
{
args: []string{"/ipns/multiformats.io/"},
opts: map[string]any{
opts: map[string]interface{}{
"output": "takes-precedence",
},
outPath: "takes-precedence",
@ -51,7 +52,8 @@ func TestGetOutputPath(t *testing.T) {
for i, tc := range cases {
t.Run(fmt.Sprintf("%s-%d", t.Name(), i), func(t *testing.T) {
ctx := t.Context()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
req, err := cmds.NewRequest(ctx, []string{}, tc.opts, tc.args, nil, GetCmd)
if err != nil {

View File

@ -146,7 +146,7 @@ EXAMPLE:
Type: IdOutput{},
}
func printPeer(keyEnc ke.KeyEncoder, ps pstore.Peerstore, p peer.ID) (any, error) {
func printPeer(keyEnc ke.KeyEncoder, ps pstore.Peerstore, p peer.ID) (interface{}, error) {
if p == "" {
return nil, errors.New("attempted to print nil peer")
}
@ -189,7 +189,7 @@ func printPeer(keyEnc ke.KeyEncoder, ps pstore.Peerstore, p peer.ID) (any, error
}
// printing self is special cased as we get values differently.
func printSelf(keyEnc ke.KeyEncoder, node *core.IpfsNode) (any, error) {
func printSelf(keyEnc ke.KeyEncoder, node *core.IpfsNode) (interface{}, error) {
info := new(IdOutput)
info.ID = keyEnc.FormatID(node.Identity)

View File

@ -38,9 +38,9 @@ publish'.
> ipfs key gen --type=rsa --size=2048 mykey
> ipfs name publish --key=mykey QmSomeHash
'ipfs key ls' lists the available keys.
'ipfs key list' lists the available keys.
> ipfs key ls
> ipfs key list
self
mykey
`,
@ -49,8 +49,7 @@ publish'.
"gen": keyGenCmd,
"export": keyExportCmd,
"import": keyImportCmd,
"list": keyListDeprecatedCmd,
"ls": keyListCmd,
"list": keyListCmd,
"rename": keyRenameCmd,
"rm": keyRmCmd,
"rotate": keyRotateCmd,
@ -459,7 +458,7 @@ var keyListCmd = &cmds.Command{
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
keyEnc, err := ke.KeyEncoderFromString(req.Options[ke.OptionIPNSBase.Name()].(string))
if err != nil {
return fmt.Errorf("cannot get key encoder: %w", err)
return err
}
api, err := cmdenv.GetApi(env, req)
@ -469,7 +468,7 @@ var keyListCmd = &cmds.Command{
keys, err := api.Key().List(req.Context)
if err != nil {
return fmt.Errorf("listing keys failed: %w", err)
return err
}
list := make([]KeyOutput, 0, len(keys))
@ -489,17 +488,6 @@ var keyListCmd = &cmds.Command{
Type: KeyOutputList{},
}
var keyListDeprecatedCmd = &cmds.Command{
Status: cmds.Deprecated,
Helptext: cmds.HelpText{
Tagline: "Deprecated: use 'ipfs key ls' instead.",
},
Options: keyListCmd.Options,
Run: keyListCmd.Run,
Encoders: keyListCmd.Encoders,
Type: keyListCmd.Type,
}
const (
keyStoreForceOptionName = "force"
)
@ -785,7 +773,7 @@ the signed payload is always prefixed with "libp2p-key signed message:".
`,
},
Options: []cmds.Option{
cmds.StringOption("key", "k", "The name of the key to use for verifying."),
cmds.StringOption("key", "k", "The name of the key to use for signing."),
cmds.StringOption("signature", "s", "Multibase-encoded signature to verify."),
ke.OptionIPNSBase,
},

View File

@ -48,7 +48,6 @@ const (
lsResolveTypeOptionName = "resolve-type"
lsSizeOptionName = "size"
lsStreamOptionName = "stream"
lsLongOptionName = "long"
)
var LsCmd = &cmds.Command{
@ -58,26 +57,7 @@ var LsCmd = &cmds.Command{
Displays the contents of an IPFS or IPNS object(s) at the given path, with
the following format:
<cid> <size> <name>
With the --long (-l) option, display optional file mode (permissions) and
modification time in a format similar to Unix 'ls -l':
<mode> <cid> <size> <mtime> <name>
Mode and mtime are optional UnixFS metadata. They are only present if the
content was imported with 'ipfs add --preserve-mode' and '--preserve-mtime'.
Without preserved metadata, both mode and mtime display '-'. Times are in UTC.
Example with --long and preserved metadata:
-rw-r--r-- QmZULkCELmmk5XNf... 1234 Jan 15 10:30 document.txt
-rwxr-xr-x QmaRGe7bVmVaLmxb... 5678 Dec 01 2023 script.sh
drwxr-xr-x QmWWEQhcLufF3qPm... - Nov 20 2023 subdir/
Example with --long without preserved metadata:
- QmZULkCELmmk5XNf... 1234 - document.txt
<link base58 hash> <link size in bytes> <link name>
The JSON output contains type information.
`,
@ -91,7 +71,6 @@ The JSON output contains type information.
cmds.BoolOption(lsResolveTypeOptionName, "Resolve linked objects to find out their types.").WithDefault(true),
cmds.BoolOption(lsSizeOptionName, "Resolve linked objects to find out their file size.").WithDefault(true),
cmds.BoolOption(lsStreamOptionName, "s", "Enable experimental streaming of directory entries as they are traversed."),
cmds.BoolOption(lsLongOptionName, "l", "Use a long listing format, showing file mode and modification time."),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
api, err := cmdenv.GetApi(env, req)
@ -236,121 +215,10 @@ The JSON output contains type information.
Type: LsOutput{},
}
// formatMode converts os.FileMode to a 10-character Unix ls-style string.
//
// Format: [type][owner rwx][group rwx][other rwx]
//
// Type indicators: - (regular), d (directory), l (symlink), p (named pipe),
// s (socket), c (char device), b (block device).
//
// Special bits replace the execute position: setuid on owner (s/S),
// setgid on group (s/S), sticky on other (t/T). Lowercase when the
// underlying execute bit is also set, uppercase when not.
func formatMode(mode os.FileMode) string {
var buf [10]byte
// File type - handle all special file types like ls does
switch {
case mode&os.ModeDir != 0:
buf[0] = 'd'
case mode&os.ModeSymlink != 0:
buf[0] = 'l'
case mode&os.ModeNamedPipe != 0:
buf[0] = 'p'
case mode&os.ModeSocket != 0:
buf[0] = 's'
case mode&os.ModeDevice != 0:
if mode&os.ModeCharDevice != 0 {
buf[0] = 'c'
} else {
buf[0] = 'b'
}
default:
buf[0] = '-'
}
// Owner permissions (bits 8,7,6)
buf[1] = permBit(mode, 0400, 'r') // read
buf[2] = permBit(mode, 0200, 'w') // write
// Handle setuid bit for owner execute
if mode&os.ModeSetuid != 0 {
if mode&0100 != 0 {
buf[3] = 's'
} else {
buf[3] = 'S'
}
} else {
buf[3] = permBit(mode, 0100, 'x') // execute
}
// Group permissions (bits 5,4,3)
buf[4] = permBit(mode, 0040, 'r') // read
buf[5] = permBit(mode, 0020, 'w') // write
// Handle setgid bit for group execute
if mode&os.ModeSetgid != 0 {
if mode&0010 != 0 {
buf[6] = 's'
} else {
buf[6] = 'S'
}
} else {
buf[6] = permBit(mode, 0010, 'x') // execute
}
// Other permissions (bits 2,1,0)
buf[7] = permBit(mode, 0004, 'r') // read
buf[8] = permBit(mode, 0002, 'w') // write
// Handle sticky bit for other execute
if mode&os.ModeSticky != 0 {
if mode&0001 != 0 {
buf[9] = 't'
} else {
buf[9] = 'T'
}
} else {
buf[9] = permBit(mode, 0001, 'x') // execute
}
return string(buf[:])
}
// permBit returns the permission character if the bit is set.
func permBit(mode os.FileMode, bit os.FileMode, char byte) byte {
if mode&bit != 0 {
return char
}
return '-'
}
// formatModTime formats time.Time for display, following Unix ls conventions.
//
// Returns "-" for zero time. Otherwise returns a 12-character string:
// recent files (within 6 months) show "Jan 02 15:04",
// older or future files show "Jan 02 2006".
//
// The output uses the timezone embedded in t (UTC for IPFS metadata).
func formatModTime(t time.Time) string {
if t.IsZero() {
return "-"
}
// Format: "Jan 02 15:04" for times within the last 6 months
// Format: "Jan 02 2006" for older times (similar to ls)
now := time.Now()
sixMonthsAgo := now.AddDate(0, -6, 0)
if t.After(sixMonthsAgo) && t.Before(now.Add(24*time.Hour)) {
return t.Format("Jan 02 15:04")
}
return t.Format("Jan 02 2006")
}
func tabularOutput(req *cmds.Request, w io.Writer, out *LsOutput, lastObjectHash string, ignoreBreaks bool) string {
headers, _ := req.Options[lsHeadersOptionNameTime].(bool)
stream, _ := req.Options[lsStreamOptionName].(bool)
size, _ := req.Options[lsSizeOptionName].(bool)
long, _ := req.Options[lsLongOptionName].(bool)
// in streaming mode we can't automatically align the tabs
// so we take a best guess
var minTabWidth int
@ -374,21 +242,9 @@ func tabularOutput(req *cmds.Request, w io.Writer, out *LsOutput, lastObjectHash
fmt.Fprintf(tw, "%s:\n", object.Hash)
}
if headers {
var s string
if long {
// Long format: Mode Hash [Size] ModTime Name
if size {
s = "Mode\tHash\tSize\tModTime\tName"
} else {
s = "Mode\tHash\tModTime\tName"
}
} else {
// Standard format: Hash [Size] Name
if size {
s = "Hash\tSize\tName"
} else {
s = "Hash\tName"
}
s := "Hash\tName"
if size {
s = "Hash\tSize\tName"
}
fmt.Fprintln(tw, s)
}
@ -397,54 +253,23 @@ func tabularOutput(req *cmds.Request, w io.Writer, out *LsOutput, lastObjectHash
for _, link := range object.Links {
var s string
isDir := link.Type == unixfs.TDirectory || link.Type == unixfs.THAMTShard || link.Type == unixfs.TMetadata
if long {
// Long format: Mode Hash Size ModTime Name
var mode string
if link.Mode == 0 {
// No mode metadata preserved. Show "-" to indicate
// "not available" rather than "----------" (mode 0000).
mode = "-"
switch link.Type {
case unixfs.TDirectory, unixfs.THAMTShard, unixfs.TMetadata:
if size {
s = "%[1]s\t-\t%[3]s/\n"
} else {
mode = formatMode(link.Mode)
s = "%[1]s\t%[3]s/\n"
}
modTime := formatModTime(link.ModTime)
if isDir {
if size {
s = "%s\t%s\t-\t%s\t%s/\n"
} else {
s = "%s\t%s\t%s\t%s/\n"
}
fmt.Fprintf(tw, s, mode, link.Hash, modTime, cmdenv.EscNonPrint(link.Name))
default:
if size {
s = "%s\t%v\t%s\n"
} else {
if size {
s = "%s\t%s\t%v\t%s\t%s\n"
fmt.Fprintf(tw, s, mode, link.Hash, link.Size, modTime, cmdenv.EscNonPrint(link.Name))
} else {
s = "%s\t%s\t%s\t%s\n"
fmt.Fprintf(tw, s, mode, link.Hash, modTime, cmdenv.EscNonPrint(link.Name))
}
s = "%[1]s\t%[3]s\n"
}
} else {
// Standard format: Hash [Size] Name
switch {
case isDir:
if size {
s = "%[1]s\t-\t%[3]s/\n"
} else {
s = "%[1]s\t%[3]s/\n"
}
default:
if size {
s = "%s\t%v\t%s\n"
} else {
s = "%[1]s\t%[3]s\n"
}
}
fmt.Fprintf(tw, s, link.Hash, link.Size, cmdenv.EscNonPrint(link.Name))
}
// TODO: Print link.Mode and link.ModTime?
fmt.Fprintf(tw, s, link.Hash, link.Size, cmdenv.EscNonPrint(link.Name))
}
}
tw.Flush()

View File

@ -1,189 +0,0 @@
package commands
import (
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestFormatMode(t *testing.T) {
t.Parallel()
tests := []struct {
name string
mode os.FileMode
expected string
}{
// File types
{
name: "regular file with rw-r--r--",
mode: 0644,
expected: "-rw-r--r--",
},
{
name: "regular file with rwxr-xr-x",
mode: 0755,
expected: "-rwxr-xr-x",
},
{
name: "regular file with no permissions",
mode: 0,
expected: "----------",
},
{
name: "regular file with full permissions",
mode: 0777,
expected: "-rwxrwxrwx",
},
{
name: "directory with rwxr-xr-x",
mode: os.ModeDir | 0755,
expected: "drwxr-xr-x",
},
{
name: "directory with rwx------",
mode: os.ModeDir | 0700,
expected: "drwx------",
},
{
name: "symlink with rwxrwxrwx",
mode: os.ModeSymlink | 0777,
expected: "lrwxrwxrwx",
},
{
name: "named pipe with rw-r--r--",
mode: os.ModeNamedPipe | 0644,
expected: "prw-r--r--",
},
{
name: "socket with rw-rw-rw-",
mode: os.ModeSocket | 0666,
expected: "srw-rw-rw-",
},
{
name: "block device with rw-rw----",
mode: os.ModeDevice | 0660,
expected: "brw-rw----",
},
{
name: "character device with rw-rw-rw-",
mode: os.ModeDevice | os.ModeCharDevice | 0666,
expected: "crw-rw-rw-",
},
// Special permission bits - setuid
{
name: "setuid with execute",
mode: os.ModeSetuid | 0755,
expected: "-rwsr-xr-x",
},
{
name: "setuid without execute",
mode: os.ModeSetuid | 0644,
expected: "-rwSr--r--",
},
// Special permission bits - setgid
{
name: "setgid with execute",
mode: os.ModeSetgid | 0755,
expected: "-rwxr-sr-x",
},
{
name: "setgid without execute",
mode: os.ModeSetgid | 0745,
expected: "-rwxr-Sr-x",
},
// Special permission bits - sticky
{
name: "sticky with execute",
mode: os.ModeSticky | 0755,
expected: "-rwxr-xr-t",
},
{
name: "sticky without execute",
mode: os.ModeSticky | 0754,
expected: "-rwxr-xr-T",
},
// Combined special bits
{
name: "setuid + setgid + sticky all with execute",
mode: os.ModeSetuid | os.ModeSetgid | os.ModeSticky | 0777,
expected: "-rwsrwsrwt",
},
{
name: "setuid + setgid + sticky none with execute",
mode: os.ModeSetuid | os.ModeSetgid | os.ModeSticky | 0666,
expected: "-rwSrwSrwT",
},
// Directory with special bits
{
name: "directory with sticky bit",
mode: os.ModeDir | os.ModeSticky | 0755,
expected: "drwxr-xr-t",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
result := formatMode(tc.mode)
assert.Equal(t, tc.expected, result)
})
}
}
func TestFormatModTime(t *testing.T) {
t.Parallel()
t.Run("zero time returns dash", func(t *testing.T) {
t.Parallel()
result := formatModTime(time.Time{})
assert.Equal(t, "-", result)
})
t.Run("old time shows year format", func(t *testing.T) {
t.Parallel()
// Use a time clearly in the past (more than 6 months ago)
oldTime := time.Date(2020, time.March, 15, 10, 30, 0, 0, time.UTC)
result := formatModTime(oldTime)
// Format: "Jan 02 2006" (note: two spaces before year)
assert.Equal(t, "Mar 15 2020", result)
})
t.Run("very old time shows year format", func(t *testing.T) {
t.Parallel()
veryOldTime := time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)
result := formatModTime(veryOldTime)
assert.Equal(t, "Jan 01 2000", result)
})
t.Run("future time shows year format", func(t *testing.T) {
t.Parallel()
// Times more than 24h in the future should show year format
futureTime := time.Now().AddDate(1, 0, 0)
result := formatModTime(futureTime)
// Should contain the future year
assert.Contains(t, result, " ") // two spaces before year
assert.Regexp(t, `^[A-Z][a-z]{2} \d{2} \d{4}$`, result) // matches "Mon DD YYYY"
assert.Contains(t, result, futureTime.Format("2006")) // contains the year
})
t.Run("format lengths are consistent", func(t *testing.T) {
t.Parallel()
// Both formats should produce 12-character strings for alignment
oldTime := time.Date(2020, time.March, 15, 10, 30, 0, 0, time.UTC)
oldResult := formatModTime(oldTime)
assert.Len(t, oldResult, 12, "old time format should be 12 chars")
// Recent time: use 1 month ago to ensure it's always within the 6-month window
recentTime := time.Now().AddDate(0, -1, 0)
recentResult := formatModTime(recentTime)
assert.Len(t, recentResult, 12, "recent time format should be 12 chars")
})
}

View File

@ -3,18 +3,15 @@ package name
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"io"
"strings"
"text/tabwriter"
"time"
"github.com/ipfs/boxo/ipns"
ipns_pb "github.com/ipfs/boxo/ipns/pb"
cmds "github.com/ipfs/go-ipfs-cmds"
"github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/ipfs/kubo/core/coreiface/options"
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
"google.golang.org/protobuf/proto"
)
@ -45,30 +42,29 @@ Examples:
Publish an <ipfs-path> with your default name:
> ipfs name publish /ipfs/bafkreifjjcie6lypi6ny7amxnfftagclbuxndqonfipmb64f2km2devei4
Published to k51qzi5uqu5dgklc20hksmmzhoy5lfrn5xcnryq6xp4r50b5yc0vnivpywfu9p: /ipfs/bafk...
> ipfs name publish /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
Published to QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
Publish an <ipfs-path> with another name, added by an 'ipfs key' command:
> ipfs key gen --type=ed25519 mykey
k51qzi5uqu5dlz49qkb657myg6f1buu6rauv8c6b489a9i1e4dkt7a3yo9j2wr
> ipfs name publish --key=mykey /ipfs/bafkreifjjcie6lypi6ny7amxnfftagclbuxndqonfipmb64f2km2devei4
Published to k51qzi5uqu5dlz49qkb657myg6f1buu6rauv8c6b489a9i1e4dkt7a3yo9j2wr: /ipfs/bafk...
> ipfs key gen --type=rsa --size=2048 mykey
> ipfs name publish --key=mykey /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
Published to QmSrPmbaUKA3ZodhzPWZnpFgcPMFWF4QsxXbkWfEptTBJd: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
Resolve the value of your name:
> ipfs name resolve
/ipfs/bafk...
/ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
Resolve the value of another name:
> ipfs name resolve k51qzi5uqu5dlz49qkb657myg6f1buu6rauv8c6b489a9i1e4dkt7a3yo9j2wr
/ipfs/bafk...
> ipfs name resolve QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ
/ipfs/QmSiTko9JZyabH56y2fussEt1A5oDqsFXB3CkvAqraFryz
Resolve the value of a dnslink:
> ipfs name resolve specs.ipfs.tech
/ipfs/bafy...
> ipfs name resolve ipfs.io
/ipfs/QmaBvfZooxWkrv7D3r8LS9moNjzD2o525XMZze69hhoxf5
`,
},
@ -78,8 +74,6 @@ Resolve the value of a dnslink:
"resolve": IpnsCmd,
"pubsub": IpnsPubsubCmd,
"inspect": IpnsInspectCmd,
"get": IpnsGetCmd,
"put": IpnsPutCmd,
},
}
@ -129,9 +123,6 @@ in Multibase. The Data field is DAG-CBOR represented as DAG-JSON.
Passing --verify will verify signature against provided public key.
`,
HTTP: &cmds.HTTPHelpText{
Description: "Request body should be `multipart/form-data` with the IPNS record bytes.",
},
},
Arguments: []cmds.Argument{
cmds.FileArg("record", true, false, "The IPNS record payload to be verified.").EnableStdin(),
@ -234,7 +225,7 @@ Passing --verify will verify signature against provided public key.
}
if out.Entry.ValidityType != nil {
fmt.Fprintf(tw, "Validity Type:\t%d\n", *out.Entry.ValidityType)
fmt.Fprintf(tw, "Validity Type:\t%q\n", *out.Entry.ValidityType)
}
if out.Entry.Validity != nil {
@ -276,292 +267,3 @@ Passing --verify will verify signature against provided public key.
}),
},
}
var IpnsGetCmd = &cmds.Command{
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "Retrieve a signed IPNS record.",
ShortDescription: `
Retrieves the signed IPNS record for a given name from the routing system.
The output is the raw IPNS record (protobuf) as defined in the IPNS spec:
https://specs.ipfs.tech/ipns/ipns-record/
The record can be inspected with 'ipfs name inspect':
ipfs name get <name> | ipfs name inspect
This is equivalent to 'ipfs routing get /ipns/<name>' but only accepts
IPNS names (not arbitrary routing keys).
Note: The routing system returns the "best" IPNS record it knows about.
For IPNS, "best" means the record with the highest sequence number.
If multiple records exist (e.g., after using 'ipfs name put'), this command
returns the one the routing system considers most current.
`,
HTTP: &cmds.HTTPHelpText{
ResponseContentType: "application/vnd.ipfs.ipns-record",
},
},
Arguments: []cmds.Argument{
cmds.StringArg("name", true, false, "The IPNS name to look up."),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
api, err := cmdenv.GetApi(env, req)
if err != nil {
return err
}
// Normalize the argument: accept both "k51..." and "/ipns/k51..."
name := req.Arguments[0]
if !strings.HasPrefix(name, "/ipns/") {
name = "/ipns/" + name
}
data, err := api.Routing().Get(req.Context, name)
if err != nil {
return err
}
res.SetEncodingType(cmds.OctetStream)
res.SetContentType("application/vnd.ipfs.ipns-record")
return res.Emit(bytes.NewReader(data))
},
}
const (
forceOptionName = "force"
putAllowOfflineOption = "allow-offline"
allowDelegatedOption = "allow-delegated"
putQuietOptionName = "quiet"
maxIPNSRecordSize = 10 << 10 // 10 KiB per IPNS spec
)
var errPutAllowOffline = errors.New("can't put while offline: pass `--allow-offline` to store locally or `--allow-delegated` if Ipns.DelegatedPublishers are set up")
var IpnsPutCmd = &cmds.Command{
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "Store a pre-signed IPNS record in the routing system.",
ShortDescription: `
Stores a pre-signed IPNS record in the routing system.
This command accepts a raw IPNS record (protobuf) as defined in the IPNS spec:
https://specs.ipfs.tech/ipns/ipns-record/
The record must be signed by the private key corresponding to the IPNS name.
Use 'ipfs name get' to retrieve records and 'ipfs name inspect' to examine.
`,
LongDescription: `
Stores a pre-signed IPNS record in the routing system.
This command accepts a raw IPNS record (protobuf) as defined in the IPNS spec:
https://specs.ipfs.tech/ipns/ipns-record/
The record must be signed by the private key corresponding to the IPNS name.
Use 'ipfs name get' to retrieve records and 'ipfs name inspect' to examine.
Use Cases:
- Re-publishing third-party records: store someone else's signed record
- Cross-node sync: import records exported from another node
- Backup/restore: export with 'name get', restore with 'name put'
Validation:
By default, the command validates that:
- The record is a valid IPNS record (protobuf)
- The record size is within 10 KiB limit
- The signature matches the provided IPNS name
- The record's sequence number is higher than any existing record
(identical records are allowed for republishing)
The --force flag skips this command's validation and passes the record
directly to the routing system. Note that --force only affects this command;
it does not control how the routing system handles the record. The routing
system may still reject invalid records or prefer records with higher sequence
numbers. Use --force primarily for testing (e.g., to observe how the routing
system reacts to incorrectly signed or malformed records).
Important: Even after a successful 'name put', a subsequent 'name get' may
return a different record if one with a higher sequence number exists.
This is expected IPNS behavior, not a bug.
Publishing Modes:
By default, IPNS records are published to both the DHT and any configured
HTTP delegated publishers. You can control this behavior with:
--allow-offline Store locally without requiring network connectivity
--allow-delegated Publish via HTTP delegated publishers only (no DHT)
Examples:
Export and re-import a record:
> ipfs name get k51... > record.bin
> ipfs name put k51... record.bin
Store a record received from someone else:
> ipfs name put k51... third-party-record.bin
Force store a record to test routing validation:
> ipfs name put --force k51... possibly-invalid-record.bin
`,
HTTP: &cmds.HTTPHelpText{
Description: "Request body should be `multipart/form-data` with the IPNS record bytes.",
},
},
Arguments: []cmds.Argument{
cmds.StringArg("name", true, false, "The IPNS name to store the record for (e.g., k51... or /ipns/k51...)."),
cmds.FileArg("record", true, false, "Path to file containing the signed IPNS record.").EnableStdin(),
},
Options: []cmds.Option{
cmds.BoolOption(forceOptionName, "f", "Skip validation (signature, sequence, size)."),
cmds.BoolOption(putAllowOfflineOption, "Store locally without broadcasting to the network."),
cmds.BoolOption(allowDelegatedOption, "Publish via HTTP delegated publishers only (no DHT)."),
cmds.BoolOption(putQuietOptionName, "q", "Write no output."),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
nd, err := cmdenv.GetNode(env)
if err != nil {
return err
}
api, err := cmdenv.GetApi(env, req)
if err != nil {
return err
}
// Parse options
force, _ := req.Options[forceOptionName].(bool)
allowOffline, _ := req.Options[putAllowOfflineOption].(bool)
allowDelegated, _ := req.Options[allowDelegatedOption].(bool)
// Validate flag combinations
if allowOffline && allowDelegated {
return errors.New("cannot use both --allow-offline and --allow-delegated flags")
}
// Handle different publishing modes
if allowDelegated {
// AllowDelegated mode: check if delegated publishers are configured
cfg, err := nd.Repo.Config()
if err != nil {
return fmt.Errorf("failed to read config: %w", err)
}
delegatedPublishers := cfg.DelegatedPublishersWithAutoConf()
if len(delegatedPublishers) == 0 {
return errors.New("no delegated publishers configured: add Ipns.DelegatedPublishers or use --allow-offline for local-only publishing")
}
// For allow-delegated mode, we proceed even if offline
// since we're using HTTP publishing via delegated publishers
}
// Parse the IPNS name argument
nameArg := req.Arguments[0]
if !strings.HasPrefix(nameArg, "/ipns/") {
nameArg = "/ipns/" + nameArg
}
// Extract the name part after /ipns/
namePart := strings.TrimPrefix(nameArg, "/ipns/")
name, err := ipns.NameFromString(namePart)
if err != nil {
return fmt.Errorf("invalid IPNS name: %w", err)
}
// Read raw record bytes from file/stdin
file, err := cmdenv.GetFileArg(req.Files.Entries())
if err != nil {
return err
}
defer file.Close()
// Read record data (limit to 1 MiB for memory safety)
data, err := io.ReadAll(io.LimitReader(file, 1<<20))
if err != nil {
return fmt.Errorf("failed to read record: %w", err)
}
if len(data) == 0 {
return errors.New("record is empty")
}
// Validate unless --force
if !force {
// Check size limit per IPNS spec
if len(data) > maxIPNSRecordSize {
return fmt.Errorf("record exceeds maximum size of %d bytes, use --force to skip size check", maxIPNSRecordSize)
}
rec, err := ipns.UnmarshalRecord(data)
if err != nil {
return fmt.Errorf("invalid IPNS record: %w", err)
}
// Validate signature against provided name
err = ipns.ValidateWithName(rec, name)
if err != nil {
return fmt.Errorf("record validation failed: %w", err)
}
// Check for sequence conflicts with existing record
existingData, err := api.Routing().Get(req.Context, nameArg)
if err == nil {
// Allow republishing the exact same record (common use case:
// get a third-party record and put it back to refresh DHT)
if !bytes.Equal(existingData, data) {
existingRec, parseErr := ipns.UnmarshalRecord(existingData)
if parseErr == nil {
existingSeq, seqErr := existingRec.Sequence()
newSeq, newSeqErr := rec.Sequence()
if seqErr == nil && newSeqErr == nil && existingSeq >= newSeq {
return fmt.Errorf("existing IPNS record has sequence %d >= new record sequence %d, use 'ipfs name put --force' to skip this check", existingSeq, newSeq)
}
}
}
}
// If Get fails (no existing record), that's fine - proceed with put
}
// Publish the original bytes as-is
// When allowDelegated is true, we set allowOffline to allow the operation
// even without DHT connectivity (delegated publishers use HTTP)
opts := []options.RoutingPutOption{
options.Routing.AllowOffline(allowOffline || allowDelegated),
}
err = api.Routing().Put(req.Context, nameArg, data, opts...)
if err != nil {
if err.Error() == "can't put while offline" {
return errPutAllowOffline
}
return err
}
// Extract value from the record for the response
value := ""
if rec, err := ipns.UnmarshalRecord(data); err == nil {
if v, err := rec.Value(); err == nil {
value = v.String()
}
}
return cmds.EmitOnce(res, &IpnsEntry{
Name: name.String(),
Value: value,
})
},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, ie *IpnsEntry) error {
quiet, _ := req.Options[putQuietOptionName].(bool)
if quiet {
return nil
}
_, err := fmt.Fprintln(w, cmdenv.EscNonPrint(ie.Name))
return err
}),
},
Type: IpnsEntry{},
}

View File

@ -50,17 +50,9 @@ type P2PStreamsOutput struct {
Streams []P2PStreamInfoOutput
}
// P2PForegroundOutput is output type for foreground mode status messages
type P2PForegroundOutput struct {
Status string // "active" or "closing"
Protocol string
Address string
}
const (
allowCustomProtocolOptionName = "allow-custom-protocol"
reportPeerIDOptionName = "report-peer-id"
foregroundOptionName = "foreground"
)
var resolveTimeout = 10 * time.Second
@ -91,37 +83,15 @@ var p2pForwardCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Forward connections to libp2p service.",
ShortDescription: `
Forward connections made to <listen-address> to <target-address> via libp2p.
Forward connections made to <listen-address> to <target-address>.
Creates a local TCP listener that tunnels connections through libp2p to a
remote peer's p2p listener. Similar to SSH port forwarding (-L flag).
<protocol> specifies the libp2p protocol name to use for libp2p
connections and/or handlers. It must be prefixed with '` + P2PProtoPrefix + `'.
ARGUMENTS:
Example:
ipfs p2p forward ` + P2PProtoPrefix + `myproto /ip4/127.0.0.1/tcp/4567 /p2p/QmPeer
- Forward connections to 127.0.0.1:4567 to '` + P2PProtoPrefix + `myproto' service on /p2p/QmPeer
<protocol> Protocol name (must start with '` + P2PProtoPrefix + `')
<listen-address> Local multiaddr (e.g., /ip4/127.0.0.1/tcp/3000)
<target-address> Remote peer multiaddr (e.g., /p2p/PeerID)
FOREGROUND MODE (--foreground, -f):
By default, the forwarder runs in the daemon and the command returns
immediately. Use --foreground to block until interrupted:
- Ctrl+C or SIGTERM: Removes the forwarder and exits
- 'ipfs p2p close': Removes the forwarder and exits
- Daemon shutdown: Forwarder is automatically removed
Useful for systemd services or scripts that need cleanup on exit.
EXAMPLES:
# Persistent forwarder (command returns immediately)
ipfs p2p forward /x/myapp /ip4/127.0.0.1/tcp/3000 /p2p/PeerID
# Temporary forwarder (removed when command exits)
ipfs p2p forward -f /x/myapp /ip4/127.0.0.1/tcp/3000 /p2p/PeerID
Learn more: https://github.com/ipfs/kubo/blob/master/docs/p2p-tunnels.md
`,
},
Arguments: []cmds.Argument{
@ -131,7 +101,6 @@ Learn more: https://github.com/ipfs/kubo/blob/master/docs/p2p-tunnels.md
},
Options: []cmds.Option{
cmds.BoolOption(allowCustomProtocolOptionName, "Don't require /x/ prefix"),
cmds.BoolOption(foregroundOptionName, "f", "Run in foreground; forwarder is removed when command exits"),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
n, err := p2pGetNode(env)
@ -161,51 +130,7 @@ Learn more: https://github.com/ipfs/kubo/blob/master/docs/p2p-tunnels.md
return errors.New("protocol name must be within '" + P2PProtoPrefix + "' namespace")
}
listener, err := forwardLocal(n.Context(), n.P2P, n.Peerstore, proto, listen, targets)
if err != nil {
return err
}
foreground, _ := req.Options[foregroundOptionName].(bool)
if foreground {
if err := res.Emit(&P2PForegroundOutput{
Status: "active",
Protocol: protoOpt,
Address: listenOpt,
}); err != nil {
return err
}
// Wait for either context cancellation (Ctrl+C/daemon shutdown)
// or listener removal (ipfs p2p close)
select {
case <-req.Context.Done():
// SIGTERM/Ctrl+C - cleanup silently (CLI stream already closing)
n.P2P.ListenersLocal.Close(func(l p2p.Listener) bool {
return l == listener
})
return nil
case <-listener.Done():
// Closed via "ipfs p2p close" - emit closing message
return res.Emit(&P2PForegroundOutput{
Status: "closing",
Protocol: protoOpt,
Address: listenOpt,
})
}
}
return nil
},
Type: P2PForegroundOutput{},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *P2PForegroundOutput) error {
if out.Status == "active" {
fmt.Fprintf(w, "Forwarding %s to %s, waiting for interrupt...\n", out.Protocol, out.Address)
} else if out.Status == "closing" {
fmt.Fprintf(w, "Received interrupt, removing forwarder for %s\n", out.Protocol)
}
return nil
}),
return forwardLocal(n.Context(), n.P2P, n.Peerstore, proto, listen, targets)
},
}
@ -260,40 +185,14 @@ var p2pListenCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Create libp2p service.",
ShortDescription: `
Create a libp2p protocol handler that forwards incoming connections to
<target-address>.
Create libp2p service and forward connections made to <target-address>.
When a remote peer connects using 'ipfs p2p forward', the connection is
forwarded to your local service. Similar to SSH port forwarding (server side).
<protocol> specifies the libp2p handler name. It must be prefixed with '` + P2PProtoPrefix + `'.
ARGUMENTS:
Example:
ipfs p2p listen ` + P2PProtoPrefix + `myproto /ip4/127.0.0.1/tcp/1234
- Forward connections to 'myproto' libp2p service to 127.0.0.1:1234
<protocol> Protocol name (must start with '` + P2PProtoPrefix + `')
<target-address> Local multiaddr (e.g., /ip4/127.0.0.1/tcp/3000)
FOREGROUND MODE (--foreground, -f):
By default, the listener runs in the daemon and the command returns
immediately. Use --foreground to block until interrupted:
- Ctrl+C or SIGTERM: Removes the listener and exits
- 'ipfs p2p close': Removes the listener and exits
- Daemon shutdown: Listener is automatically removed
Useful for systemd services or scripts that need cleanup on exit.
EXAMPLES:
# Persistent listener (command returns immediately)
ipfs p2p listen /x/myapp /ip4/127.0.0.1/tcp/3000
# Temporary listener (removed when command exits)
ipfs p2p listen -f /x/myapp /ip4/127.0.0.1/tcp/3000
# Report connecting peer ID to the target application
ipfs p2p listen -r /x/myapp /ip4/127.0.0.1/tcp/3000
Learn more: https://github.com/ipfs/kubo/blob/master/docs/p2p-tunnels.md
`,
},
Arguments: []cmds.Argument{
@ -303,7 +202,6 @@ Learn more: https://github.com/ipfs/kubo/blob/master/docs/p2p-tunnels.md
Options: []cmds.Option{
cmds.BoolOption(allowCustomProtocolOptionName, "Don't require /x/ prefix"),
cmds.BoolOption(reportPeerIDOptionName, "r", "Send remote base58 peerid to target when a new connection is established"),
cmds.BoolOption(foregroundOptionName, "f", "Run in foreground; listener is removed when command exits"),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
n, err := p2pGetNode(env)
@ -333,51 +231,8 @@ Learn more: https://github.com/ipfs/kubo/blob/master/docs/p2p-tunnels.md
return errors.New("protocol name must be within '" + P2PProtoPrefix + "' namespace")
}
listener, err := n.P2P.ForwardRemote(n.Context(), proto, target, reportPeerID)
if err != nil {
return err
}
foreground, _ := req.Options[foregroundOptionName].(bool)
if foreground {
if err := res.Emit(&P2PForegroundOutput{
Status: "active",
Protocol: protoOpt,
Address: targetOpt,
}); err != nil {
return err
}
// Wait for either context cancellation (Ctrl+C/daemon shutdown)
// or listener removal (ipfs p2p close)
select {
case <-req.Context.Done():
// SIGTERM/Ctrl+C - cleanup silently (CLI stream already closing)
n.P2P.ListenersP2P.Close(func(l p2p.Listener) bool {
return l == listener
})
return nil
case <-listener.Done():
// Closed via "ipfs p2p close" - emit closing message
return res.Emit(&P2PForegroundOutput{
Status: "closing",
Protocol: protoOpt,
Address: targetOpt,
})
}
}
return nil
},
Type: P2PForegroundOutput{},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *P2PForegroundOutput) error {
if out.Status == "active" {
fmt.Fprintf(w, "Listening on %s, forwarding to %s, waiting for interrupt...\n", out.Protocol, out.Address)
} else if out.Status == "closing" {
fmt.Fprintf(w, "Received interrupt, removing listener for %s\n", out.Protocol)
}
return nil
}),
_, err = n.P2P.ForwardRemote(n.Context(), proto, target, reportPeerID)
return err
},
}
@ -416,9 +271,11 @@ func checkPort(target ma.Multiaddr) error {
}
// forwardLocal forwards local connections to a libp2p service
func forwardLocal(ctx context.Context, p *p2p.P2P, ps pstore.Peerstore, proto protocol.ID, bindAddr ma.Multiaddr, addr *peer.AddrInfo) (p2p.Listener, error) {
func forwardLocal(ctx context.Context, p *p2p.P2P, ps pstore.Peerstore, proto protocol.ID, bindAddr ma.Multiaddr, addr *peer.AddrInfo) error {
ps.AddAddrs(addr.ID, addr.Addrs, pstore.TempAddrTTL)
return p.ForwardLocal(ctx, addr.ID, proto, bindAddr)
// TODO: return some info
_, err := p.ForwardLocal(ctx, addr.ID, proto, bindAddr)
return err
}
const (

View File

@ -8,7 +8,6 @@ import (
"os"
"time"
"github.com/dustin/go-humanize"
bserv "github.com/ipfs/boxo/blockservice"
offline "github.com/ipfs/boxo/exchange/offline"
dag "github.com/ipfs/boxo/ipld/merkledag"
@ -48,7 +47,6 @@ type PinOutput struct {
type AddPinOutput struct {
Pins []string `json:",omitempty"`
Progress int `json:",omitempty"`
Bytes uint64 `json:",omitempty"`
}
const (
@ -149,15 +147,14 @@ It may take some time. Pass '--progress' to track the progress.
return val.err
}
if ps := v.ProgressStat(); ps.Nodes != 0 {
if err := res.Emit(&AddPinOutput{Progress: ps.Nodes, Bytes: ps.Bytes}); err != nil {
if pv := v.Value(); pv != 0 {
if err := res.Emit(&AddPinOutput{Progress: v.Value()}); err != nil {
return err
}
}
return res.Emit(&AddPinOutput{Pins: val.pins})
case <-ticker.C:
ps := v.ProgressStat()
if err := res.Emit(&AddPinOutput{Progress: ps.Nodes, Bytes: ps.Bytes}); err != nil {
if err := res.Emit(&AddPinOutput{Progress: v.Value()}); err != nil {
return err
}
case <-ctx.Done():
@ -200,7 +197,7 @@ It may take some time. Pass '--progress' to track the progress.
}
if out.Pins == nil {
// this can only happen if the progress option is set
fmt.Fprintf(os.Stderr, "Fetched/Processed %d nodes (%s)\r", out.Progress, humanize.Bytes(out.Bytes))
fmt.Fprintf(os.Stderr, "Fetched/Processed %d nodes\r", out.Progress)
} else {
err = re.Emit(out)
if err != nil {

View File

@ -112,7 +112,7 @@ trip latency information.
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range numPings {
for i := 0; i < numPings; i++ {
r, ok := <-pings
if !ok {
break

View File

@ -70,9 +70,6 @@ However, it could reveal:
- Memory offsets of various data structures.
- Any modifications you've made to go-ipfs.
`,
HTTP: &cmds.HTTPHelpText{
ResponseContentType: "application/zip",
},
},
NoLocal: true,
Options: []cmds.Option{
@ -124,8 +121,6 @@ However, it could reveal:
archive.Close()
_ = w.CloseWithError(err)
}()
res.SetEncodingType(cmds.OctetStream)
res.SetContentType("application/zip")
return res.Emit(r)
},
PostRun: cmds.PostRunMap{

View File

@ -351,7 +351,8 @@ NOTES:
}
sectionTitle := func(col int, title string) {
if !brief && showHeadings {
formatLine(col, "%s:", title)
//nolint:govet // dynamic format string is intentional
formatLine(col, title+":")
}
}

View File

@ -8,35 +8,26 @@ import (
"net/http"
"slices"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
cmds "github.com/ipfs/go-ipfs-cmds"
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
options "github.com/ipfs/kubo/core/coreiface/options"
"github.com/ipfs/kubo/core/node/libp2p"
"github.com/libp2p/go-libp2p/core/peer"
mbase "github.com/multiformats/go-multibase"
cmds "github.com/ipfs/go-ipfs-cmds"
options "github.com/ipfs/kubo/core/coreiface/options"
)
var PubsubCmd = &cmds.Command{
Status: cmds.Experimental,
Status: cmds.Deprecated,
Helptext: cmds.HelpText{
Tagline: "An experimental publish-subscribe system on ipfs.",
ShortDescription: `
ipfs pubsub allows you to publish messages to a given topic, and also to
subscribe to new messages on a given topic.
EXPERIMENTAL FEATURE
DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717)
This is an opt-in feature optimized for IPNS over PubSub
(https://specs.ipfs.tech/ipns/ipns-pubsub-router/).
The default message validator is designed for IPNS record protocol.
For custom pubsub applications requiring different validation logic,
use go-libp2p-pubsub (https://github.com/libp2p/go-libp2p-pubsub)
directly in a dedicated binary.
To enable, set 'Pubsub.Enabled' config to true.
It is not intended in its current state to be used in a production
environment. To use, the daemon must be run with
'--enable-pubsub-experiment'.
`,
},
Subcommands: map[string]*cmds.Command{
@ -44,7 +35,6 @@ EXPERIMENTAL FEATURE
"sub": PubsubSubCmd,
"ls": PubsubLsCmd,
"peers": PubsubPeersCmd,
"reset": PubsubResetCmd,
},
}
@ -56,18 +46,17 @@ type pubsubMessage struct {
}
var PubsubSubCmd = &cmds.Command{
Status: cmds.Experimental,
Status: cmds.Deprecated,
Helptext: cmds.HelpText{
Tagline: "Subscribe to messages on a given topic.",
ShortDescription: `
ipfs pubsub sub subscribes to messages on a given topic.
EXPERIMENTAL FEATURE
DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717)
This is an opt-in feature optimized for IPNS over PubSub
(https://specs.ipfs.tech/ipns/ipns-pubsub-router/).
To enable, set 'Pubsub.Enabled' config to true.
It is not intended in its current state to be used in a production
environment. To use, the daemon must be run with
'--enable-pubsub-experiment'.
PEER ENCODING
@ -156,19 +145,18 @@ TOPIC AND DATA ENCODING
}
var PubsubPubCmd = &cmds.Command{
Status: cmds.Experimental,
Status: cmds.Deprecated,
Helptext: cmds.HelpText{
Tagline: "Publish data to a given pubsub topic.",
ShortDescription: `
ipfs pubsub pub publishes a message to a specified topic.
It reads binary data from stdin or a file.
EXPERIMENTAL FEATURE
DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717)
This is an opt-in feature optimized for IPNS over PubSub
(https://specs.ipfs.tech/ipns/ipns-pubsub-router/).
To enable, set 'Pubsub.Enabled' config to true.
It is not intended in its current state to be used in a production
environment. To use, the daemon must be run with
'--enable-pubsub-experiment'.
HTTP RPC ENCODING
@ -213,18 +201,17 @@ HTTP RPC ENCODING
}
var PubsubLsCmd = &cmds.Command{
Status: cmds.Experimental,
Status: cmds.Deprecated,
Helptext: cmds.HelpText{
Tagline: "List subscribed topics by name.",
ShortDescription: `
ipfs pubsub ls lists out the names of topics you are currently subscribed to.
EXPERIMENTAL FEATURE
DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717)
This is an opt-in feature optimized for IPNS over PubSub
(https://specs.ipfs.tech/ipns/ipns-pubsub-router/).
To enable, set 'Pubsub.Enabled' config to true.
It is not intended in its current state to be used in a production
environment. To use, the daemon must be run with
'--enable-pubsub-experiment'.
TOPIC ENCODING
@ -286,7 +273,7 @@ func safeTextListEncoder(req *cmds.Request, w io.Writer, list *stringList) error
}
var PubsubPeersCmd = &cmds.Command{
Status: cmds.Experimental,
Status: cmds.Deprecated,
Helptext: cmds.HelpText{
Tagline: "List peers we are currently pubsubbing with.",
ShortDescription: `
@ -294,12 +281,11 @@ ipfs pubsub peers with no arguments lists out the pubsub peers you are
currently connected to. If given a topic, it will list connected peers who are
subscribed to the named topic.
EXPERIMENTAL FEATURE
DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717)
This is an opt-in feature optimized for IPNS over PubSub
(https://specs.ipfs.tech/ipns/ipns-pubsub-router/).
To enable, set 'Pubsub.Enabled' config to true.
It is not intended in its current state to be used in a production
environment. To use, the daemon must be run with
'--enable-pubsub-experiment'.
TOPIC AND DATA ENCODING
@ -381,122 +367,3 @@ func urlArgsDecoder(req *cmds.Request, env cmds.Environment) error {
}
return nil
}
type pubsubResetResult struct {
Deleted int64 `json:"deleted"`
}
var PubsubResetCmd = &cmds.Command{
Status: cmds.Experimental,
Helptext: cmds.HelpText{
Tagline: "Reset pubsub validator state.",
ShortDescription: `
Clears persistent sequence number state used by the pubsub validator.
WARNING: FOR TESTING ONLY - DO NOT USE IN PRODUCTION
Resets validator state that protects against replay attacks. After reset,
previously seen messages may be accepted again until their sequence numbers
are re-learned.
Use cases:
- Testing pubsub functionality
- Recovery from a peer sending artificially high sequence numbers
(which would cause subsequent messages from that peer to be rejected)
The --peer flag limits the reset to a specific peer's state.
Without --peer, all validator state is cleared.
NOTE: This only resets the persistent seqno validator state. The in-memory
seen messages cache (Pubsub.SeenMessagesTTL) auto-expires and can only be
fully cleared by restarting the daemon.
`,
},
Options: []cmds.Option{
cmds.StringOption(peerOptionName, "p", "Only reset state for this peer ID"),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
n, err := cmdenv.GetNode(env)
if err != nil {
return err
}
ds := n.Repo.Datastore()
ctx := req.Context
peerOpt, _ := req.Options[peerOptionName].(string)
var deleted int64
if peerOpt != "" {
// Reset specific peer
pid, err := peer.Decode(peerOpt)
if err != nil {
return fmt.Errorf("invalid peer ID: %w", err)
}
key := datastore.NewKey(libp2p.SeqnoStorePrefix + pid.String())
exists, err := ds.Has(ctx, key)
if err != nil {
return fmt.Errorf("failed to check seqno state: %w", err)
}
if exists {
if err := ds.Delete(ctx, key); err != nil {
return fmt.Errorf("failed to delete seqno state: %w", err)
}
deleted = 1
}
} else {
// Reset all peers using batched delete for efficiency
q := query.Query{
Prefix: libp2p.SeqnoStorePrefix,
KeysOnly: true,
}
results, err := ds.Query(ctx, q)
if err != nil {
return fmt.Errorf("failed to query seqno state: %w", err)
}
defer results.Close()
batch, err := ds.Batch(ctx)
if err != nil {
return fmt.Errorf("failed to create batch: %w", err)
}
for result := range results.Next() {
if result.Error != nil {
return fmt.Errorf("query error: %w", result.Error)
}
if err := batch.Delete(ctx, datastore.NewKey(result.Key)); err != nil {
return fmt.Errorf("failed to batch delete key %s: %w", result.Key, err)
}
deleted++
}
if err := batch.Commit(ctx); err != nil {
return fmt.Errorf("failed to commit batch delete: %w", err)
}
}
// Sync to ensure deletions are persisted
if err := ds.Sync(ctx, datastore.NewKey(libp2p.SeqnoStorePrefix)); err != nil {
return fmt.Errorf("failed to sync datastore: %w", err)
}
return cmds.EmitOnce(res, &pubsubResetResult{Deleted: deleted})
},
Type: pubsubResetResult{},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, result *pubsubResetResult) error {
peerOpt, _ := req.Options[peerOptionName].(string)
if peerOpt != "" {
if result.Deleted == 0 {
_, err := fmt.Fprintf(w, "No validator state found for peer %s\n", peerOpt)
return err
}
_, err := fmt.Fprintf(w, "Reset validator state for peer %s\n", peerOpt)
return err
}
_, err := fmt.Fprintf(w, "Reset validator state for %d peer(s)\n", result.Deleted)
return err
}),
},
}

View File

@ -11,13 +11,11 @@ import (
"github.com/ipfs/kubo/config"
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/ipfs/kubo/core/commands/cmdutils"
"github.com/ipfs/kubo/core/node"
mh "github.com/multiformats/go-multihash"
dag "github.com/ipfs/boxo/ipld/merkledag"
"github.com/ipfs/boxo/ipns"
"github.com/ipfs/boxo/provider"
cid "github.com/ipfs/go-cid"
cmds "github.com/ipfs/go-ipfs-cmds"
ipld "github.com/ipfs/go-ipld-format"
@ -91,7 +89,7 @@ var findProvidersRoutingCmd = &cmds.Command{
defer cancel()
pchan := n.Routing.FindProvidersAsync(ctx, c, numProviders)
for p := range pchan {
np := cmdutils.CloneAddrInfo(p)
np := p
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
Type: routing.Provider,
Responses: []*peer.AddrInfo{&np},
@ -297,9 +295,9 @@ Trigger reprovider to announce our data to network.
if cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) == 0 {
return errors.New("invalid configuration: Provide.DHT.Interval is set to '0'")
}
provideSys, ok := nd.Provider.(provider.Reprovider)
provideSys, ok := nd.Provider.(*node.LegacyProvider)
if !ok {
return errors.New("manual reprovide only available with legacy provider (Provide.DHT.SweepEnabled=false)")
return errors.New("manual reprovide not available with experimental sweeping provider (Provide.DHT.SweepEnabled=true)")
}
err = provideSys.Reprovide(req.Context)

View File

@ -435,7 +435,7 @@ type connInfo struct {
Muxer string `json:",omitempty"`
Direction inet.Direction `json:",omitempty"`
Streams []streamInfo `json:",omitempty"`
Identify IdOutput
Identify IdOutput `json:",omitempty"`
}
func (ci *connInfo) Sort() {
@ -513,9 +513,8 @@ var swarmAddrsCmd = &cmds.Command{
`,
},
Subcommands: map[string]*cmds.Command{
"autonat": swarmAddrsAutoNATCmd,
"local": swarmAddrsLocalCmd,
"listen": swarmAddrsListenCmd,
"local": swarmAddrsLocalCmd,
"listen": swarmAddrsListenCmd,
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
api, err := cmdenv.GetApi(env, req)

View File

@ -1,139 +0,0 @@
package commands
import (
"fmt"
"io"
cmds "github.com/ipfs/go-ipfs-cmds"
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
"github.com/libp2p/go-libp2p/core/network"
ma "github.com/multiformats/go-multiaddr"
)
// reachabilityHost provides access to the AutoNAT reachability status.
type reachabilityHost interface {
Reachability() network.Reachability
}
// confirmedAddrsHost provides access to per-address reachability from AutoNAT V2.
type confirmedAddrsHost interface {
ConfirmedAddrs() (reachable, unreachable, unknown []ma.Multiaddr)
}
// autoNATResult represents the AutoNAT reachability information.
type autoNATResult struct {
Reachability string `json:"reachability"`
Reachable []string `json:"reachable,omitempty"`
Unreachable []string `json:"unreachable,omitempty"`
Unknown []string `json:"unknown,omitempty"`
}
func multiaddrsToStrings(addrs []ma.Multiaddr) []string {
out := make([]string, len(addrs))
for i, a := range addrs {
out[i] = a.String()
}
return out
}
func writeAddrSection(w io.Writer, label string, addrs []string) {
if len(addrs) > 0 {
fmt.Fprintf(w, " %s:\n", label)
for _, addr := range addrs {
fmt.Fprintf(w, " %s\n", addr)
}
}
}
var swarmAddrsAutoNATCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Show address reachability as determined by AutoNAT V2.",
ShortDescription: `
'ipfs swarm addrs autonat' shows the reachability status of your node's
addresses as determined by AutoNAT V2.
`,
LongDescription: `
'ipfs swarm addrs autonat' shows the reachability status of your node's
addresses as verified by AutoNAT V2.
AutoNAT V2 probes your node's addresses to determine if they are reachable
from the public internet. This helps understand whether other peers can
dial your node directly.
The output shows:
- Reachability: Overall status (Public, Private, or Unknown)
- Reachable: Addresses confirmed to be publicly reachable
- Unreachable: Addresses that failed reachability checks
- Unknown: Addresses that haven't been tested yet
For more information on AutoNAT V2, see:
https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md
Example:
> ipfs swarm addrs autonat
AutoNAT V2 Status:
Reachability: Public
Per-Address Reachability:
Reachable:
/ip4/203.0.113.42/tcp/4001
/ip4/203.0.113.42/udp/4001/quic-v1
Unreachable:
/ip6/2001:db8::1/tcp/4001
Unknown:
/ip4/203.0.113.42/udp/4001/webrtc-direct
`,
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
nd, err := cmdenv.GetNode(env)
if err != nil {
return err
}
if !nd.IsOnline {
return ErrNotOnline
}
result := autoNATResult{
Reachability: network.ReachabilityUnknown.String(),
}
// Get per-address reachability from AutoNAT V2.
// The host embeds *BasicHost (closableBasicHost, closableRoutedHost)
// which implements ConfirmedAddrs.
if h, ok := nd.PeerHost.(confirmedAddrsHost); ok {
reachable, unreachable, unknown := h.ConfirmedAddrs()
result.Reachable = multiaddrsToStrings(reachable)
result.Unreachable = multiaddrsToStrings(unreachable)
result.Unknown = multiaddrsToStrings(unknown)
}
// Get overall reachability status.
if h, ok := nd.PeerHost.(reachabilityHost); ok {
result.Reachability = h.Reachability().String()
}
return cmds.EmitOnce(res, result)
},
Type: autoNATResult{},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, result autoNATResult) error {
fmt.Fprintln(w, "AutoNAT V2 Status:")
fmt.Fprintf(w, " Reachability: %s\n", result.Reachability)
fmt.Fprintln(w)
fmt.Fprintln(w, "Per-Address Reachability:")
writeAddrSection(w, "Reachable", result.Reachable)
writeAddrSection(w, "Unreachable", result.Unreachable)
writeAddrSection(w, "Unknown", result.Unknown)
if len(result.Reachable) == 0 && len(result.Unreachable) == 0 && len(result.Unknown) == 0 {
fmt.Fprintln(w, " (no address reachability data available)")
}
return nil
}),
},
}

View File

@ -34,8 +34,8 @@ Prints out information about your computer to aid in easier debugging.
},
}
func getInfo(nd *core.IpfsNode) (map[string]any, error) {
info := make(map[string]any)
func getInfo(nd *core.IpfsNode) (map[string]interface{}, error) {
info := make(map[string]interface{})
err := runtimeInfo(info)
if err != nil {
return nil, err
@ -66,8 +66,8 @@ func getInfo(nd *core.IpfsNode) (map[string]any, error) {
return info, nil
}
func runtimeInfo(out map[string]any) error {
rt := make(map[string]any)
func runtimeInfo(out map[string]interface{}) error {
rt := make(map[string]interface{})
rt["os"] = runtime.GOOS
rt["arch"] = runtime.GOARCH
rt["compiler"] = runtime.Compiler
@ -80,8 +80,8 @@ func runtimeInfo(out map[string]any) error {
return nil
}
func envVarInfo(out map[string]any) error {
ev := make(map[string]any)
func envVarInfo(out map[string]interface{}) error {
ev := make(map[string]interface{})
ev["GOPATH"] = os.Getenv("GOPATH")
ev[config.EnvDir] = os.Getenv(config.EnvDir)
@ -89,7 +89,7 @@ func envVarInfo(out map[string]any) error {
return nil
}
func diskSpaceInfo(out map[string]any) error {
func diskSpaceInfo(out map[string]interface{}) error {
pathRoot, err := config.PathRoot()
if err != nil {
return err
@ -99,7 +99,7 @@ func diskSpaceInfo(out map[string]any) error {
return err
}
out["diskinfo"] = map[string]any{
out["diskinfo"] = map[string]interface{}{
"fstype": dinfo.FsType,
"total_space": dinfo.Total,
"free_space": dinfo.Free,
@ -108,8 +108,8 @@ func diskSpaceInfo(out map[string]any) error {
return nil
}
func memInfo(out map[string]any) error {
m := make(map[string]any)
func memInfo(out map[string]interface{}) error {
m := make(map[string]interface{})
meminf, err := sysi.MemoryInfo()
if err != nil {
@ -122,8 +122,8 @@ func memInfo(out map[string]any) error {
return nil
}
func netInfo(online bool, out map[string]any) error {
n := make(map[string]any)
func netInfo(online bool, out map[string]interface{}) error {
n := make(map[string]interface{})
addrs, err := manet.InterfaceMultiaddrs()
if err != nil {
return err

View File

@ -29,7 +29,7 @@ type key struct {
func newKey(name string, pid peer.ID) (*key, error) {
p, err := path.NewPath("/ipns/" + ipns.NameFromPeer(pid).String())
if err != nil {
return nil, fmt.Errorf("cannot create new key: %w", err)
return nil, err
}
return &key{
name: name,
@ -121,37 +121,34 @@ func (api *KeyAPI) List(ctx context.Context) ([]coreiface.Key, error) {
keys, err := api.repo.Keystore().List()
if err != nil {
return nil, fmt.Errorf("cannot list keys in keystore: %w", err)
return nil, err
}
sort.Strings(keys)
out := make([]coreiface.Key, 1, len(keys)+1)
out := make([]coreiface.Key, len(keys)+1)
out[0], err = newKey("self", api.identity)
if err != nil {
return nil, err
}
for _, k := range keys {
for n, k := range keys {
privKey, err := api.repo.Keystore().Get(k)
if err != nil {
log.Errorf("cannot get key from keystore: %s", err)
continue
return nil, err
}
pubKey := privKey.GetPublic()
pid, err := peer.IDFromPublicKey(pubKey)
if err != nil {
log.Errorf("cannot decode public key: %s", err)
continue
return nil, err
}
k, err := newKey(k, pid)
out[n+1], err = newKey(k, pid)
if err != nil {
return nil, err
}
out = append(out, k)
}
return out, nil
}

View File

@ -37,7 +37,7 @@ func (NodeProvider) MakeAPISwarm(t *testing.T, ctx context.Context, fullIdentity
nodes := make([]*core.IpfsNode, n)
apis := make([]coreiface.CoreAPI, n)
for i := range n {
for i := 0; i < n; i++ {
var ident config.Identity
if fullIdentity {
sk, pk, err := crypto.GenerateKeyPair(crypto.RSA, 2048)

View File

@ -15,7 +15,8 @@ import (
)
func TestPathUnixFSHAMTPartial(t *testing.T) {
ctx := t.Context()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create a node
apis, err := NodeProvider{}.MakeAPISwarm(t, ctx, true, true, 1)

View File

@ -177,18 +177,12 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options
if settings.MaxHAMTFanoutSet {
fileAdder.MaxHAMTFanout = settings.MaxHAMTFanout
}
if settings.SizeEstimationModeSet {
fileAdder.SizeEstimationMode = settings.SizeEstimationMode
}
fileAdder.NoCopy = settings.NoCopy
fileAdder.CidBuilder = prefix
fileAdder.PreserveMode = settings.PreserveMode
fileAdder.PreserveMtime = settings.PreserveMtime
fileAdder.FileMode = settings.Mode
fileAdder.FileMtime = settings.Mtime
if settings.IncludeEmptyDirsSet {
fileAdder.IncludeEmptyDirs = settings.IncludeEmptyDirs
}
switch settings.Layout {
case options.BalancedLayout:

View File

@ -78,23 +78,9 @@ func ListenAndServe(n *core.IpfsNode, listeningMultiAddr string, options ...Serv
return Serve(n, manet.NetListener(list), options...)
}
// Serve accepts incoming HTTP connections on the listener and passes them
// Serve accepts incoming HTTP connections on the listener and pass them
// to ServeOption handlers.
func Serve(node *core.IpfsNode, lis net.Listener, options ...ServeOption) error {
return ServeWithReady(node, lis, nil, options...)
}
// ServeWithReady is like Serve but signals on the ready channel when the
// server is about to accept connections. The channel is closed right before
// server.Serve() is called.
//
// This is useful for callers that need to perform actions (like writing
// address files) only after the server is guaranteed to be accepting
// connections, avoiding race conditions where clients see the file before
// the server is ready.
//
// Passing nil for ready is equivalent to calling Serve().
func ServeWithReady(node *core.IpfsNode, lis net.Listener, ready chan<- struct{}, options ...ServeOption) error {
// make sure we close this no matter what.
defer lis.Close()
@ -121,9 +107,6 @@ func ServeWithReady(node *core.IpfsNode, lis net.Listener, ready chan<- struct{}
var serverError error
serverClosed := make(chan struct{})
go func() {
if ready != nil {
close(ready)
}
serverError = server.Serve(lis)
close(serverClosed)
}()

View File

@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"io"
"maps"
"net"
"net/http"
"time"
@ -113,7 +112,6 @@ func Libp2pGatewayOption() ServeOption {
Menu: nil,
// Apply timeout and concurrency limits from user config
RetrievalTimeout: cfg.Gateway.RetrievalTimeout.WithDefault(config.DefaultRetrievalTimeout),
MaxRequestDuration: cfg.Gateway.MaxRequestDuration.WithDefault(config.DefaultMaxRequestDuration),
MaxConcurrentRequests: int(cfg.Gateway.MaxConcurrentRequests.WithDefault(int64(config.DefaultMaxConcurrentRequests))),
MaxRangeRequestFileSize: int64(cfg.Gateway.MaxRangeRequestFileSize.WithDefault(uint64(config.DefaultMaxRangeRequestFileSize))),
DiagnosticServiceURL: "", // Not used since DisableHTMLErrors=true
@ -270,19 +268,19 @@ func getGatewayConfig(n *core.IpfsNode) (gateway.Config, map[string][]string, er
// Initialize gateway configuration, with empty PublicGateways, handled after.
gwCfg := gateway.Config{
DeserializedResponses: cfg.Gateway.DeserializedResponses.WithDefault(config.DefaultDeserializedResponses),
AllowCodecConversion: cfg.Gateway.AllowCodecConversion.WithDefault(config.DefaultAllowCodecConversion),
DisableHTMLErrors: cfg.Gateway.DisableHTMLErrors.WithDefault(config.DefaultDisableHTMLErrors),
NoDNSLink: cfg.Gateway.NoDNSLink,
PublicGateways: map[string]*gateway.PublicGateway{},
RetrievalTimeout: cfg.Gateway.RetrievalTimeout.WithDefault(config.DefaultRetrievalTimeout),
MaxRequestDuration: cfg.Gateway.MaxRequestDuration.WithDefault(config.DefaultMaxRequestDuration),
MaxConcurrentRequests: int(cfg.Gateway.MaxConcurrentRequests.WithDefault(int64(config.DefaultMaxConcurrentRequests))),
MaxRangeRequestFileSize: int64(cfg.Gateway.MaxRangeRequestFileSize.WithDefault(uint64(config.DefaultMaxRangeRequestFileSize))),
DiagnosticServiceURL: cfg.Gateway.DiagnosticServiceURL.WithDefault(config.DefaultDiagnosticServiceURL),
}
// Add default implicit known gateways, such as subdomain gateway on localhost.
maps.Copy(gwCfg.PublicGateways, defaultKnownGateways)
for hostname, gw := range defaultKnownGateways {
gwCfg.PublicGateways[hostname] = gw
}
// Apply values from cfg.Gateway.PublicGateways if they exist.
for hostname, gw := range cfg.Gateway.PublicGateways {

View File

@ -19,7 +19,7 @@ func TestPeersTotal(t *testing.T) {
ctx := context.Background()
hosts := make([]*bhost.BasicHost, 4)
for i := range 4 {
for i := 0; i < 4; i++ {
var err error
hosts[i], err = bhost.NewHost(swarmt.GenSwarm(t), nil)
if err != nil {

View File

@ -35,13 +35,8 @@ func P2PProxyOption() ServeOption {
}
rt := p2phttp.NewTransport(ipfsNode.PeerHost, p2phttp.ProtocolOption(parsedRequest.name))
proxy := &httputil.ReverseProxy{
Transport: rt,
Rewrite: func(r *httputil.ProxyRequest) {
r.SetURL(target)
r.SetXForwarded()
},
}
proxy := httputil.NewSingleHostReverseProxy(target)
proxy.Transport = rt
proxy.ServeHTTP(w, request)
})
return mux, nil

View File

@ -2,8 +2,6 @@ package corehttp
import (
"context"
"errors"
"fmt"
"net"
"net/http"
"time"
@ -15,9 +13,6 @@ import (
"github.com/ipfs/boxo/routing/http/types/iter"
cid "github.com/ipfs/go-cid"
core "github.com/ipfs/kubo/core"
dht "github.com/libp2p/go-libp2p-kad-dht"
"github.com/libp2p/go-libp2p-kad-dht/dual"
"github.com/libp2p/go-libp2p-kad-dht/fullrt"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/routing"
)
@ -101,60 +96,6 @@ func (r *contentRouter) PutIPNS(ctx context.Context, name ipns.Name, record *ipn
return r.n.Routing.PutValue(ctx, string(name.RoutingKey()), raw)
}
func (r *contentRouter) GetClosestPeers(ctx context.Context, key cid.Cid) (iter.ResultIter[*types.PeerRecord], error) {
// Per the spec, if the peer ID is empty, we should use self.
if key == cid.Undef {
return nil, errors.New("GetClosestPeers key is undefined")
}
keyStr := string(key.Hash())
var peers []peer.ID
var err error
if r.n.DHTClient == nil {
return nil, fmt.Errorf("GetClosestPeers not supported: DHT is not available")
}
switch dhtClient := r.n.DHTClient.(type) {
case *dual.DHT:
// Only use WAN DHT for public HTTP Routing API.
// LAN DHT contains private network peers that should not be exposed publicly.
if dhtClient.WAN == nil {
return nil, fmt.Errorf("GetClosestPeers not supported: WAN DHT is not available")
}
peers, err = dhtClient.WAN.GetClosestPeers(ctx, keyStr)
case *fullrt.FullRT:
peers, err = dhtClient.GetClosestPeers(ctx, keyStr)
case *dht.IpfsDHT:
peers, err = dhtClient.GetClosestPeers(ctx, keyStr)
default:
return nil, fmt.Errorf("GetClosestPeers not supported for DHT type %T", r.n.DHTClient)
}
if err != nil {
return nil, err
}
// We have some DHT-closest peers. Find addresses for them.
// The addresses should be in the peerstore.
records := make([]*types.PeerRecord, 0, len(peers))
for _, p := range peers {
addrs := r.n.Peerstore.Addrs(p)
rAddrs := make([]types.Multiaddr, len(addrs))
for i, addr := range addrs {
rAddrs[i] = types.Multiaddr{Multiaddr: addr}
}
record := types.PeerRecord{
ID: &p,
Schema: types.SchemaPeer,
Addrs: rAddrs,
}
records = append(records, &record)
}
return iter.ToResultIter(iter.FromSlice(records)), nil
}
type peerChanIter struct {
ch <-chan peer.AddrInfo
cancel context.CancelFunc

Some files were not shown because too many files have changed in this diff Show More