mirror of
https://github.com/ipfs/kubo.git
synced 2026-02-21 18:37:45 +08:00
Compare commits
153 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c5776476bc | ||
|
|
2bb2c1c7b7 | ||
|
|
3ba73501fe | ||
|
|
1c6f924c78 | ||
|
|
4e7ffbd897 | ||
|
|
8eab2fcf5d | ||
|
|
0ec69bc019 | ||
|
|
5a05621157 | ||
|
|
1f7bbcbf6a | ||
|
|
6a008fc74c | ||
|
|
36c29c55f0 | ||
|
|
9ca1dbb894 | ||
|
|
d405dfd1ae | ||
|
|
0fed2c35c3 | ||
|
|
c6702eaf88 | ||
|
|
f57d13c2c2 | ||
|
|
3a6b1ee122 | ||
|
|
4c6dab73cf | ||
|
|
67c89bbd7e | ||
|
|
ff4bb10989 | ||
|
|
df9574e090 | ||
|
|
ea8a1ced79 | ||
|
|
dd882c0bdb | ||
|
|
22ca729eb3 | ||
|
|
3522136c4f | ||
|
|
07ad431b00 | ||
|
|
77ed3dd0ef | ||
|
|
59b5d6ab4d | ||
|
|
9539b4d8b8 | ||
|
|
7de7af0820 | ||
|
|
ef99e0a0f7 | ||
|
|
b57278017a | ||
|
|
a137401272 | ||
|
|
5ccdcdd4fc | ||
|
|
f6a5c5f5c5 | ||
|
|
bc9b388610 | ||
|
|
4a80af4b54 | ||
|
|
b7a35a4bd0 | ||
|
|
6d253a6b80 | ||
|
|
1128d81042 | ||
|
|
8848b537cf | ||
|
|
c1fd4d70f5 | ||
|
|
824a47ae11 | ||
|
|
edb7056747 | ||
|
|
698354342e | ||
|
|
4bdc9ad220 | ||
|
|
6a595c27a9 | ||
|
|
39c609b3db | ||
|
|
ec973aeb38 | ||
|
|
56bf782cc6 | ||
|
|
447109df64 | ||
|
|
de20a78a1f | ||
|
|
07ea37e99f | ||
|
|
ac9ae9bf5d | ||
|
|
25ebab9dae | ||
|
|
5288946fd1 | ||
|
|
3e85793b58 | ||
|
|
23ba660ef0 | ||
|
|
aa3c88dcdd | ||
|
|
1301710a91 | ||
|
|
55b94751cc | ||
|
|
584025bb69 | ||
|
|
663a4a78fa | ||
|
|
9b99dc6f45 | ||
|
|
566f8ba63f | ||
|
|
f4d6253ae2 | ||
|
|
3d0e7c8465 | ||
|
|
828526e515 | ||
|
|
edc2cadc85 | ||
|
|
117d8d67e5 | ||
|
|
91d392d9ae | ||
|
|
ab44726177 | ||
|
|
d29c0b9c01 | ||
|
|
c1e1cfebbb | ||
|
|
af2e7e1953 | ||
|
|
3f2cc50eb8 | ||
|
|
b5078b005d | ||
|
|
f7db0c4fc1 | ||
|
|
823c11721d | ||
|
|
2896aed9f4 | ||
|
|
31ea50efbf | ||
|
|
0aa7f10037 | ||
|
|
e0e6cacc49 | ||
|
|
40ffd166ea | ||
|
|
179e1f8629 | ||
|
|
c38c780405 | ||
|
|
78a2f2cf24 | ||
|
|
16cf61d6e7 | ||
|
|
1776cf4bab | ||
|
|
21f50ac931 | ||
|
|
72f4c6f029 | ||
|
|
2844a913d3 | ||
|
|
2cbf151085 | ||
|
|
0453679e7b | ||
|
|
1141220674 | ||
|
|
7e66fe9aac | ||
|
|
73ab037d1d | ||
|
|
d88267018e | ||
|
|
030d64f8ba | ||
|
|
597f2b827d | ||
|
|
1404861086 | ||
|
|
c7eda21d68 | ||
|
|
798b889ba2 | ||
|
|
35d26e143f | ||
|
|
cec7432043 | ||
|
|
d56fe3a026 | ||
|
|
d45c615e73 | ||
|
|
702c63b6db | ||
|
|
93f8897d7c | ||
|
|
0954d249c2 | ||
|
|
149ca2fd3b | ||
|
|
f067a86170 | ||
|
|
be7bf60355 | ||
|
|
ae86672964 | ||
|
|
e05357ed19 | ||
|
|
cf4b1f41db | ||
|
|
044db8253b | ||
|
|
a4323abc10 | ||
|
|
c2bf0f9515 | ||
|
|
2e9c4ec500 | ||
|
|
7d0534b76c | ||
|
|
513ee699ab | ||
|
|
fee76b66c9 | ||
|
|
8c5f302d25 | ||
|
|
ae78c7821c | ||
|
|
5d4c40e1f1 | ||
|
|
886ac22005 | ||
|
|
16479ec692 | ||
|
|
f9dc739933 | ||
|
|
241b723534 | ||
|
|
c04781c7f7 | ||
|
|
40c027aa74 | ||
|
|
ff6f2be4fe | ||
|
|
f4834e797d | ||
|
|
2b5adeedcc | ||
|
|
ff6baeec16 | ||
|
|
95b0348f34 | ||
|
|
35ba5091a5 | ||
|
|
cf8194a8d1 | ||
|
|
42a4935abf | ||
|
|
9370004b5f | ||
|
|
a7ce33c722 | ||
|
|
a86df5feef | ||
|
|
020827d302 | ||
|
|
13fbb76de4 | ||
|
|
1dffcb892f | ||
|
|
776c21a6d6 | ||
|
|
a688b7eeac | ||
|
|
f63887ae96 | ||
|
|
1107ac42af | ||
|
|
1e9b6fb27e | ||
|
|
46d438f685 | ||
|
|
22f03772ee |
39
.github/dependabot.yml
vendored
39
.github/dependabot.yml
vendored
@ -1,6 +1,45 @@
|
||||
# Dependabot PRs are auto-tidied by .github/workflows/dependabot-tidy.yml
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- "dependencies"
|
||||
ignore:
|
||||
# Updated via go-ds-* wrappers in ipfs-ecosystem group
|
||||
- dependency-name: "github.com/cockroachdb/pebble*"
|
||||
- dependency-name: "github.com/syndtr/goleveldb"
|
||||
- dependency-name: "github.com/dgraph-io/badger*"
|
||||
groups:
|
||||
ipfs-ecosystem:
|
||||
patterns:
|
||||
- "github.com/ipfs/*"
|
||||
- "github.com/ipfs-shipyard/*"
|
||||
- "github.com/ipshipyard/*"
|
||||
- "github.com/multiformats/*"
|
||||
- "github.com/ipld/*"
|
||||
libp2p-ecosystem:
|
||||
patterns:
|
||||
- "github.com/libp2p/*"
|
||||
golang-x:
|
||||
patterns:
|
||||
- "golang.org/x/*"
|
||||
opentelemetry:
|
||||
patterns:
|
||||
- "go.opentelemetry.io/*"
|
||||
prometheus:
|
||||
patterns:
|
||||
- "github.com/prometheus/*"
|
||||
- "contrib.go.opencensus.io/*"
|
||||
- "go.opencensus.io"
|
||||
uber:
|
||||
patterns:
|
||||
- "go.uber.org/*"
|
||||
|
||||
8
.github/workflows/codeql-analysis.yml
vendored
8
.github/workflows/codeql-analysis.yml
vendored
@ -29,7 +29,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v6
|
||||
@ -38,12 +38,12 @@ jobs:
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
uses: github/codeql-action/init@v4
|
||||
with:
|
||||
languages: go
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
uses: github/codeql-action/autobuild@v4
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
uses: github/codeql-action/analyze@v4
|
||||
|
||||
61
.github/workflows/dependabot-tidy.yml
vendored
Normal file
61
.github/workflows/dependabot-tidy.yml
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
# Dependabot only updates go.mod/go.sum in the root module, but this repo has
|
||||
# multiple Go modules (see docs/examples/). This workflow runs `make mod_tidy`
|
||||
# on Dependabot PRs to keep all go.sum files in sync, preventing go-check CI
|
||||
# failures.
|
||||
name: Dependabot Tidy
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, synchronize]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
pr_number:
|
||||
description: 'PR number to run mod_tidy on'
|
||||
required: true
|
||||
type: number
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
tidy:
|
||||
if: github.actor == 'dependabot[bot]' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Get PR info
|
||||
id: pr
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
|
||||
pr_number="${{ inputs.pr_number }}"
|
||||
else
|
||||
pr_number="${{ github.event.pull_request.number }}"
|
||||
fi
|
||||
echo "number=$pr_number" >> $GITHUB_OUTPUT
|
||||
branch=$(gh pr view "$pr_number" --repo "${{ github.repository }}" --json headRefName -q '.headRefName')
|
||||
echo "branch=$branch" >> $GITHUB_OUTPUT
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
ref: ${{ steps.pr.outputs.branch }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: Run make mod_tidy
|
||||
run: make mod_tidy
|
||||
- name: Check for changes
|
||||
id: git-check
|
||||
run: |
|
||||
if [[ -n $(git status --porcelain) ]]; then
|
||||
echo "modified=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: Commit changes
|
||||
if: steps.git-check.outputs.modified == 'true'
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
git add -A
|
||||
git commit -m "chore: run make mod_tidy"
|
||||
git push
|
||||
4
.github/workflows/docker-check.yml
vendored
4
.github/workflows/docker-check.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: hadolint/hadolint-action@v3.3.0
|
||||
with:
|
||||
dockerfile: Dockerfile
|
||||
@ -41,7 +41,7 @@ jobs:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
5
.github/workflows/docker-image.yml
vendored
5
.github/workflows/docker-image.yml
vendored
@ -39,10 +39,11 @@ jobs:
|
||||
timeout-minutes: 15
|
||||
env:
|
||||
IMAGE_NAME: ipfs/kubo
|
||||
LEGACY_IMAGE_NAME: ipfs/go-ipfs
|
||||
outputs:
|
||||
tags: ${{ steps.tags.outputs.value }}
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
20
.github/workflows/gateway-conformance.yml
vendored
20
.github/workflows/gateway-conformance.yml
vendored
@ -41,13 +41,13 @@ jobs:
|
||||
steps:
|
||||
# 1. Download the gateway-conformance fixtures
|
||||
- name: Download gateway-conformance fixtures
|
||||
uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.8
|
||||
uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.10
|
||||
with:
|
||||
output: fixtures
|
||||
|
||||
# 2. Build the kubo-gateway
|
||||
- name: Checkout kubo-gateway
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
path: kubo-gateway
|
||||
- name: Setup Go
|
||||
@ -93,7 +93,7 @@ jobs:
|
||||
|
||||
# 6. Run the gateway-conformance tests
|
||||
- name: Run gateway-conformance tests
|
||||
uses: ipfs/gateway-conformance/.github/actions/test@v0.8
|
||||
uses: ipfs/gateway-conformance/.github/actions/test@v0.10
|
||||
with:
|
||||
gateway-url: http://127.0.0.1:8080
|
||||
subdomain-url: http://localhost:8080
|
||||
@ -109,13 +109,13 @@ jobs:
|
||||
run: cat output.md >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload HTML report
|
||||
if: failure() || success()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: gateway-conformance.html
|
||||
path: output.html
|
||||
- name: Upload JSON report
|
||||
if: failure() || success()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: gateway-conformance.json
|
||||
path: output.json
|
||||
@ -127,13 +127,13 @@ jobs:
|
||||
steps:
|
||||
# 1. Download the gateway-conformance fixtures
|
||||
- name: Download gateway-conformance fixtures
|
||||
uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.8
|
||||
uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.10
|
||||
with:
|
||||
output: fixtures
|
||||
|
||||
# 2. Build the kubo-gateway
|
||||
- name: Checkout kubo-gateway
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
path: kubo-gateway
|
||||
- name: Setup Go
|
||||
@ -199,7 +199,7 @@ jobs:
|
||||
|
||||
# 9. Run the gateway-conformance tests over libp2p
|
||||
- name: Run gateway-conformance tests over libp2p
|
||||
uses: ipfs/gateway-conformance/.github/actions/test@v0.8
|
||||
uses: ipfs/gateway-conformance/.github/actions/test@v0.10
|
||||
with:
|
||||
gateway-url: http://127.0.0.1:8092
|
||||
args: --specs "trustless-gateway,-trustless-ipns-gateway" -skip 'TestGatewayCar/GET_response_for_application/vnd.ipld.car/Header_Content-Length'
|
||||
@ -214,13 +214,13 @@ jobs:
|
||||
run: cat output.md >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload HTML report
|
||||
if: failure() || success()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: gateway-conformance-libp2p.html
|
||||
path: output.html
|
||||
- name: Upload JSON report
|
||||
if: failure() || success()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: gateway-conformance-libp2p.json
|
||||
path: output.json
|
||||
|
||||
2
.github/workflows/gobuild.yml
vendored
2
.github/workflows/gobuild.yml
vendored
@ -27,7 +27,7 @@ jobs:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
|
||||
11
.github/workflows/golang-analysis.yml
vendored
11
.github/workflows/golang-analysis.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: recursive
|
||||
- uses: actions/setup-go@v6
|
||||
@ -47,6 +47,15 @@ jobs:
|
||||
echo "$out"
|
||||
exit 1
|
||||
fi
|
||||
- name: go fix
|
||||
if: always() # run this step even if the previous one failed
|
||||
run: |
|
||||
go fix ./...
|
||||
if [[ -n $(git diff --name-only) ]]; then
|
||||
echo "go fix produced changes. Run 'go fix ./...' locally and commit the result."
|
||||
git diff
|
||||
exit 1
|
||||
fi
|
||||
- name: go vet
|
||||
if: always() # run this step even if the previous one failed
|
||||
uses: protocol/multiple-go-modules@v1.4
|
||||
|
||||
2
.github/workflows/golint.yml
vendored
2
.github/workflows/golint.yml
vendored
@ -28,7 +28,7 @@ jobs:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
|
||||
132
.github/workflows/gotest.yml
vendored
132
.github/workflows/gotest.yml
vendored
@ -14,11 +14,13 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
go-test:
|
||||
# Unit tests with coverage collection (uploaded to Codecov)
|
||||
unit-tests:
|
||||
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 15
|
||||
env:
|
||||
GOTRACEBACK: single # reduce noise on test timeout panics
|
||||
TEST_DOCKER: 0
|
||||
TEST_FUSE: 0
|
||||
TEST_VERBOSE: 1
|
||||
@ -29,48 +31,25 @@ jobs:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Check out Kubo
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- name: Install missing tools
|
||||
run: sudo apt update && sudo apt install -y zsh
|
||||
- name: 👉️ If this step failed, go to «Summary» (top left) → inspect the «Failures/Errors» table
|
||||
env:
|
||||
# increasing parallelism beyond 2 doesn't speed up the tests much
|
||||
PARALLEL: 2
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
make -j "$PARALLEL" test/unit/gotest.junit.xml &&
|
||||
make test_unit &&
|
||||
[[ ! $(jq -s -c 'map(select(.Action == "fail")) | .[]' test/unit/gotest.json) ]]
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
|
||||
if: failure() || success()
|
||||
with:
|
||||
name: unittests
|
||||
files: coverage/unit_tests.coverprofile
|
||||
- name: Test kubo-as-a-library example
|
||||
run: |
|
||||
# we want to first test with the kubo version in the go.mod file
|
||||
go test -v ./...
|
||||
|
||||
# we also want to test the examples against the current version of kubo
|
||||
# however, that version might be in a fork so we need to replace the dependency
|
||||
|
||||
# backup the go.mod and go.sum files to restore them after we run the tests
|
||||
cp go.mod go.mod.bak
|
||||
cp go.sum go.sum.bak
|
||||
|
||||
# make sure the examples run against the current version of kubo
|
||||
go mod edit -replace github.com/ipfs/kubo=./../../..
|
||||
go mod tidy
|
||||
|
||||
go test -v ./...
|
||||
|
||||
# restore the go.mod and go.sum files to their original state
|
||||
mv go.mod.bak go.mod
|
||||
mv go.sum.bak go.sum
|
||||
working-directory: docs/examples/kubo-as-a-library
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
fail_ci_if_error: false
|
||||
- name: Create a proper JUnit XML report
|
||||
uses: ipdxco/gotest-json-to-junit-xml@v1
|
||||
with:
|
||||
@ -78,9 +57,9 @@ jobs:
|
||||
output: test/unit/gotest.junit.xml
|
||||
if: failure() || success()
|
||||
- name: Archive the JUnit XML report
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: unit
|
||||
name: unit-tests-junit
|
||||
path: test/unit/gotest.junit.xml
|
||||
if: failure() || success()
|
||||
- name: Create a HTML report
|
||||
@ -91,9 +70,9 @@ jobs:
|
||||
output: test/unit/gotest.html
|
||||
if: failure() || success()
|
||||
- name: Archive the HTML report
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: html
|
||||
name: unit-tests-html
|
||||
path: test/unit/gotest.html
|
||||
if: failure() || success()
|
||||
- name: Create a Markdown report
|
||||
@ -106,3 +85,86 @@ jobs:
|
||||
- name: Set the summary
|
||||
run: cat test/unit/gotest.md >> $GITHUB_STEP_SUMMARY
|
||||
if: failure() || success()
|
||||
|
||||
# End-to-end integration/regression tests from test/cli
|
||||
# (Go-based replacement for legacy test/sharness shell scripts)
|
||||
cli-tests:
|
||||
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
|
||||
timeout-minutes: 15
|
||||
env:
|
||||
GOTRACEBACK: single # reduce noise on test timeout panics
|
||||
TEST_VERBOSE: 1
|
||||
GIT_PAGER: cat
|
||||
IPFS_CHECK_RCMGR_DEFAULTS: 1
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Check out Kubo
|
||||
uses: actions/checkout@v6
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- name: Install missing tools
|
||||
run: sudo apt update && sudo apt install -y zsh
|
||||
- name: Run CLI tests
|
||||
env:
|
||||
IPFS_PATH: ${{ runner.temp }}/ipfs-test
|
||||
run: make test_cli
|
||||
- name: Create JUnit XML report
|
||||
uses: ipdxco/gotest-json-to-junit-xml@v1
|
||||
with:
|
||||
input: test/cli/cli-tests.json
|
||||
output: test/cli/cli-tests.junit.xml
|
||||
if: failure() || success()
|
||||
- name: Archive JUnit XML report
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: cli-tests-junit
|
||||
path: test/cli/cli-tests.junit.xml
|
||||
if: failure() || success()
|
||||
- name: Create HTML report
|
||||
uses: ipdxco/junit-xml-to-html@v1
|
||||
with:
|
||||
mode: no-frames
|
||||
input: test/cli/cli-tests.junit.xml
|
||||
output: test/cli/cli-tests.html
|
||||
if: failure() || success()
|
||||
- name: Archive HTML report
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: cli-tests-html
|
||||
path: test/cli/cli-tests.html
|
||||
if: failure() || success()
|
||||
- name: Create Markdown report
|
||||
uses: ipdxco/junit-xml-to-html@v1
|
||||
with:
|
||||
mode: summary
|
||||
input: test/cli/cli-tests.junit.xml
|
||||
output: test/cli/cli-tests.md
|
||||
if: failure() || success()
|
||||
- name: Set summary
|
||||
run: cat test/cli/cli-tests.md >> $GITHUB_STEP_SUMMARY
|
||||
if: failure() || success()
|
||||
|
||||
# Example tests (kubo-as-a-library)
|
||||
example-tests:
|
||||
if: github.repository == 'ipfs/kubo' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
|
||||
timeout-minutes: 5
|
||||
env:
|
||||
GOTRACEBACK: single
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Check out Kubo
|
||||
uses: actions/checkout@v6
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- name: Run example tests
|
||||
run: make test_examples
|
||||
|
||||
144
.github/workflows/interop.yml
vendored
144
.github/workflows/interop.yml
vendored
@ -1,3 +1,17 @@
|
||||
# Interoperability Tests
|
||||
#
|
||||
# This workflow ensures Kubo remains compatible with the broader IPFS ecosystem.
|
||||
# It builds Kubo from source, then runs:
|
||||
#
|
||||
# 1. helia-interop: Tests compatibility with Helia (JavaScript IPFS implementation)
|
||||
# using Playwright-based tests from @helia/interop package.
|
||||
#
|
||||
# 2. ipfs-webui: Runs E2E tests from ipfs/ipfs-webui repository to verify
|
||||
# the web interface works correctly with the locally built Kubo binary.
|
||||
#
|
||||
# Both jobs use caching to speed up repeated runs (npm dependencies, Playwright
|
||||
# browsers, and webui build artifacts).
|
||||
|
||||
name: Interop
|
||||
|
||||
on:
|
||||
@ -32,12 +46,12 @@ jobs:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- run: make build
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: kubo
|
||||
path: cmd/ipfs/ipfs
|
||||
@ -49,26 +63,50 @@ jobs:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- uses: actions/setup-node@v5
|
||||
- uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: lts/*
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: kubo
|
||||
path: cmd/ipfs
|
||||
- run: chmod +x cmd/ipfs/ipfs
|
||||
- run: echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT
|
||||
id: npm-cache-dir
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ steps.npm-cache-dir.outputs.dir }}
|
||||
key: ${{ runner.os }}-${{ github.job }}-helia-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: ${{ runner.os }}-${{ github.job }}-helia-
|
||||
- run: sudo apt update
|
||||
- run: sudo apt install -y libxkbcommon0 libxdamage1 libgbm1 libpango-1.0-0 libcairo2 # dependencies for playwright
|
||||
- run: npx --package @helia/interop helia-interop
|
||||
# Cache node_modules based on latest @helia/interop version from npm registry.
|
||||
# This ensures we always test against the latest release while still benefiting
|
||||
# from caching when the version hasn't changed.
|
||||
- name: Get latest @helia/interop version
|
||||
id: helia-version
|
||||
run: echo "version=$(npm view @helia/interop version)" >> $GITHUB_OUTPUT
|
||||
- name: Cache helia-interop node_modules
|
||||
uses: actions/cache@v5
|
||||
id: helia-cache
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.os }}-helia-interop-${{ steps.helia-version.outputs.version }}
|
||||
- name: Install @helia/interop
|
||||
if: steps.helia-cache.outputs.cache-hit != 'true'
|
||||
run: npm install @helia/interop
|
||||
# TODO(IPIP-499): Remove --grep --invert workaround once helia implements IPIP-499
|
||||
# Tracking issue: https://github.com/ipfs/helia/issues/941
|
||||
#
|
||||
# PROVISIONAL HACK: Skip '@helia/mfs - should have the same CID after
|
||||
# creating a file' test due to IPIP-499 changes in kubo.
|
||||
#
|
||||
# WHY IT FAILS: The test creates a 5-byte file in MFS on both kubo and helia,
|
||||
# then compares the root directory CID. With kubo PR #11148, `ipfs files write`
|
||||
# now produces raw CIDs for single-block files (matching `ipfs add --raw-leaves`),
|
||||
# while helia uses `reduceSingleLeafToSelf: false` which keeps the dag-pb wrapper.
|
||||
# Different file CIDs lead to different directory CIDs.
|
||||
#
|
||||
# We run aegir directly (instead of helia-interop binary) because only aegir
|
||||
# supports the --grep/--invert flags needed to exclude specific tests.
|
||||
- name: Run helia-interop tests (excluding IPIP-499 incompatible test)
|
||||
run: npx aegir test -t node --bail -- --grep 'should have the same CID after creating a file' --invert
|
||||
env:
|
||||
KUBO_BINARY: ${{ github.workspace }}/cmd/ipfs/ipfs
|
||||
working-directory: node_modules/@helia/interop
|
||||
ipfs-webui:
|
||||
needs: [interop-prep]
|
||||
runs-on: ${{ fromJSON(github.repository == 'ipfs/kubo' && '["self-hosted", "linux", "x64", "2xlarge"]' || '"ubuntu-latest"') }}
|
||||
@ -84,48 +122,82 @@ jobs:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: 20.x
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: kubo
|
||||
path: cmd/ipfs
|
||||
- run: chmod +x cmd/ipfs/ipfs
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
repository: ipfs/ipfs-webui
|
||||
path: ipfs-webui
|
||||
- run: |
|
||||
echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT
|
||||
id: npm-cache-dir
|
||||
- uses: actions/cache@v4
|
||||
- uses: actions/setup-node@v6
|
||||
with:
|
||||
path: ${{ steps.npm-cache-dir.outputs.dir }}
|
||||
key: ${{ runner.os }}-${{ github.job }}-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ github.job }}-
|
||||
- env:
|
||||
NPM_CACHE_DIR: ${{ steps.npm-cache-dir.outputs.dir }}
|
||||
run: |
|
||||
npm ci --prefer-offline --no-audit --progress=false --cache "$NPM_CACHE_DIR"
|
||||
npx playwright install --with-deps
|
||||
working-directory: ipfs-webui
|
||||
- id: ref
|
||||
node-version-file: 'ipfs-webui/.tool-versions'
|
||||
- id: webui-ref
|
||||
run: echo "ref=$(git rev-parse --short HEAD)" | tee -a $GITHUB_OUTPUT
|
||||
working-directory: ipfs-webui
|
||||
- id: state
|
||||
- id: webui-state
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
ENDPOINT: repos/ipfs/ipfs-webui/commits/${{ steps.ref.outputs.ref }}/status
|
||||
ENDPOINT: repos/ipfs/ipfs-webui/commits/${{ steps.webui-ref.outputs.ref }}/status
|
||||
SELECTOR: .state
|
||||
KEY: state
|
||||
run: gh api "$ENDPOINT" --jq "$SELECTOR" | xargs -I{} echo "$KEY={}" | tee -a $GITHUB_OUTPUT
|
||||
- name: Build ipfs-webui@main (state=${{ steps.state.outputs.state }})
|
||||
# Cache node_modules based on package-lock.json
|
||||
- name: Cache node_modules
|
||||
uses: actions/cache@v5
|
||||
id: node-modules-cache
|
||||
with:
|
||||
path: ipfs-webui/node_modules
|
||||
key: ${{ runner.os }}-webui-node-modules-${{ hashFiles('ipfs-webui/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-webui-node-modules-
|
||||
- name: Install dependencies
|
||||
if: steps.node-modules-cache.outputs.cache-hit != 'true'
|
||||
run: npm ci --prefer-offline --no-audit --progress=false
|
||||
working-directory: ipfs-webui
|
||||
# Cache Playwright browsers
|
||||
- name: Cache Playwright browsers
|
||||
uses: actions/cache@v5
|
||||
id: playwright-cache
|
||||
with:
|
||||
path: ~/.cache/ms-playwright
|
||||
key: ${{ runner.os }}-playwright-${{ hashFiles('ipfs-webui/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-playwright-
|
||||
# On cache miss: download browsers and install OS dependencies
|
||||
- name: Install Playwright with dependencies
|
||||
if: steps.playwright-cache.outputs.cache-hit != 'true'
|
||||
run: npx playwright install --with-deps
|
||||
working-directory: ipfs-webui
|
||||
# On cache hit: only ensure OS dependencies are present (fast, idempotent)
|
||||
- name: Install Playwright OS dependencies
|
||||
if: steps.playwright-cache.outputs.cache-hit == 'true'
|
||||
run: npx playwright install-deps
|
||||
working-directory: ipfs-webui
|
||||
# Cache test build output
|
||||
- name: Cache test build
|
||||
uses: actions/cache@v5
|
||||
id: test-build-cache
|
||||
with:
|
||||
path: ipfs-webui/build
|
||||
key: ${{ runner.os }}-webui-build-${{ hashFiles('ipfs-webui/package-lock.json', 'ipfs-webui/src/**', 'ipfs-webui/public/**') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-webui-build-
|
||||
- name: Build ipfs-webui@${{ steps.webui-ref.outputs.ref }} (state=${{ steps.webui-state.outputs.state }})
|
||||
if: steps.test-build-cache.outputs.cache-hit != 'true'
|
||||
run: npm run test:build
|
||||
working-directory: ipfs-webui
|
||||
- name: Test ipfs-webui@main (state=${{ steps.state.outputs.state }}) E2E against the locally built Kubo binary
|
||||
- name: Test ipfs-webui@${{ steps.webui-ref.outputs.ref }} (state=${{ steps.webui-state.outputs.state }}) E2E against the locally built Kubo binary
|
||||
run: npm run test:e2e
|
||||
env:
|
||||
IPFS_GO_EXEC: ${{ github.workspace }}/cmd/ipfs/ipfs
|
||||
working-directory: ipfs-webui
|
||||
- name: Upload test artifacts on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: webui-test-results
|
||||
path: ipfs-webui/test-results/
|
||||
retention-days: 7
|
||||
|
||||
12
.github/workflows/sharness.yml
vendored
12
.github/workflows/sharness.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Checkout Kubo
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
path: kubo
|
||||
- name: Setup Go
|
||||
@ -32,7 +32,7 @@ jobs:
|
||||
go-version-file: 'kubo/go.mod'
|
||||
- name: Install missing tools
|
||||
run: sudo apt update && sudo apt install -y socat net-tools fish libxml2-utils
|
||||
- uses: actions/cache@v4
|
||||
- uses: actions/cache@v5
|
||||
with:
|
||||
path: test/sharness/lib/dependencies
|
||||
key: ${{ runner.os }}-test-generate-junit-html-${{ hashFiles('test/sharness/lib/test-generate-junit-html.sh') }}
|
||||
@ -55,11 +55,13 @@ jobs:
|
||||
# increasing parallelism beyond 10 doesn't speed up the tests much
|
||||
PARALLEL: ${{ github.repository == 'ipfs/kubo' && 10 || 3 }}
|
||||
- name: Upload coverage report
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
|
||||
if: failure() || success()
|
||||
with:
|
||||
name: sharness
|
||||
files: kubo/coverage/sharness_tests.coverprofile
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
fail_ci_if_error: false
|
||||
- name: Aggregate results
|
||||
run: find kubo/test/sharness/test-results -name 't*-*.sh.*.counts' | kubo/test/sharness/lib/sharness/aggregate-results.sh > kubo/test/sharness/test-results/summary.txt
|
||||
- name: 👉️ If this step failed, go to «Summary» (top left) → «HTML Report» → inspect the «Failures» column
|
||||
@ -88,7 +90,7 @@ jobs:
|
||||
destination: sharness.html
|
||||
- name: Upload one-page HTML report
|
||||
if: github.repository != 'ipfs/kubo' && (failure() || success())
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: sharness.html
|
||||
path: kubo/test/sharness/test-results/sharness.html
|
||||
@ -108,7 +110,7 @@ jobs:
|
||||
destination: sharness-html/
|
||||
- name: Upload full HTML report
|
||||
if: github.repository != 'ipfs/kubo' && (failure() || success())
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: sharness-html
|
||||
path: kubo/test/sharness/test-results/sharness-html
|
||||
|
||||
2
.github/workflows/sync-release-assets.yml
vendored
2
.github/workflows/sync-release-assets.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
||||
- uses: ipfs/start-ipfs-daemon-action@v1
|
||||
with:
|
||||
args: --init --init-profile=flatfs,server --enable-gc=false
|
||||
- uses: actions/setup-node@v5
|
||||
- uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: 14
|
||||
- name: Sync the latest 5 github releases
|
||||
|
||||
4
.github/workflows/test-migrations.yml
vendored
4
.github/workflows/test-migrations.yml
vendored
@ -41,7 +41,7 @@ jobs:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Check out Kubo
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
@ -77,7 +77,7 @@ jobs:
|
||||
|
||||
- name: Upload test results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: ${{ matrix.os }}-test-results
|
||||
path: |
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@ -28,6 +28,11 @@ go-ipfs-source.tar.gz
|
||||
docs/examples/go-ipfs-as-a-library/example-folder/Qm*
|
||||
/test/sharness/t0054-dag-car-import-export-data/*.car
|
||||
|
||||
# test artifacts from make test_unit / test_cli
|
||||
/test/unit/gotest.json
|
||||
/test/unit/gotest.junit.xml
|
||||
/test/cli/cli-tests.json
|
||||
|
||||
# ignore build output from snapcraft
|
||||
/ipfs_*.snap
|
||||
/parts
|
||||
|
||||
218
AGENTS.md
Normal file
218
AGENTS.md
Normal file
@ -0,0 +1,218 @@
|
||||
# AI Agent Instructions for Kubo
|
||||
|
||||
This file provides instructions for AI coding agents working on the [Kubo](https://github.com/ipfs/kubo) codebase (the Go implementation of IPFS). Follow the [Developer Guide](docs/developer-guide.md) for full details.
|
||||
|
||||
## Quick Reference
|
||||
|
||||
| Task | Command |
|
||||
|-------------------|----------------------------------------------------------|
|
||||
| Tidy deps | `make mod_tidy` (run first if `go.mod` changed) |
|
||||
| Build | `make build` |
|
||||
| Unit tests | `go test ./... -run TestName -v` |
|
||||
| Integration tests | `make build && go test ./test/cli/... -run TestName -v` |
|
||||
| Lint | `make -O test_go_lint` |
|
||||
| Format | `go fmt ./...` |
|
||||
|
||||
## Project Overview
|
||||
|
||||
Kubo is the reference implementation of IPFS in Go. Most IPFS protocol logic lives in [boxo](https://github.com/ipfs/boxo) (the IPFS SDK); kubo wires it together and exposes it via CLI and HTTP RPC API. If a change belongs in the protocol layer, it likely belongs in boxo, not here.
|
||||
|
||||
Key directories:
|
||||
|
||||
| Directory | Purpose |
|
||||
|--------------------|----------------------------------------------------------|
|
||||
| `cmd/ipfs/` | CLI entry point and binary |
|
||||
| `core/` | core IPFS node implementation |
|
||||
| `core/commands/` | CLI command definitions |
|
||||
| `core/coreapi/` | Go API implementation |
|
||||
| `client/rpc/` | HTTP RPC client |
|
||||
| `plugin/` | plugin system |
|
||||
| `repo/` | repository management |
|
||||
| `test/cli/` | Go-based CLI integration tests (preferred for new tests) |
|
||||
| `test/sharness/` | legacy shell-based integration tests |
|
||||
| `docs/` | documentation |
|
||||
|
||||
Other key external dependencies: [go-libp2p](https://github.com/libp2p/go-libp2p) (networking), [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht) (DHT).
|
||||
|
||||
## Go Style
|
||||
|
||||
Follow these Go style references:
|
||||
|
||||
- [Go Code Review Comments](https://go.dev/wiki/CodeReviewComments)
|
||||
- [Google Go Style Decisions](https://google.github.io/styleguide/go/decisions)
|
||||
|
||||
Specific conventions for this project:
|
||||
|
||||
- check the Go version in `go.mod` and use idiomatic features available at that version
|
||||
- readability over micro-optimization: clear code is more important than saving microseconds
|
||||
- prefer standard library functions and utilities over writing your own
|
||||
- use early returns and indent the error flow, not the happy path
|
||||
- use `slices.Contains`, `slices.DeleteFunc`, and the `maps` package instead of manual loops
|
||||
- preallocate slices and maps when the size is known: `make([]T, 0, n)`
|
||||
- use `map[K]struct{}` for sets, not `map[K]bool`
|
||||
- receiver names: single-letter abbreviations matching the type (e.g., `s *Server`, `c *Client`)
|
||||
- run `go fmt` after modifying Go source files, never indent manually
|
||||
|
||||
### Error Handling
|
||||
|
||||
- wrap errors with `fmt.Errorf("context: %w", err)`, never discard errors silently
|
||||
- use `errors.Is` / `errors.As` for error checking, not string comparison
|
||||
- never use `panic` in library code; only in `main` or test helpers
|
||||
- return `nil` explicitly for the error value on success paths
|
||||
|
||||
### Canonical Examples
|
||||
|
||||
When adding or modifying code, follow the patterns established in these files:
|
||||
|
||||
- CLI command structure: `core/commands/dag/dag.go`
|
||||
- CLI integration test: `test/cli/dag_test.go`
|
||||
- Test harness usage: `test/cli/harness/` package
|
||||
|
||||
## Building
|
||||
|
||||
Always run commands from the repository root.
|
||||
|
||||
```bash
|
||||
make mod_tidy # update go.mod/go.sum (use this instead of go mod tidy)
|
||||
make build # build the ipfs binary to cmd/ipfs/ipfs
|
||||
make install # install to $GOPATH/bin
|
||||
make -O test_go_lint # run linter (use this instead of golangci-lint directly)
|
||||
```
|
||||
|
||||
If you modify `go.mod` (add/remove/update dependencies), you must run `make mod_tidy` first, before building or testing. Use `make mod_tidy` instead of `go mod tidy` directly, as the project has multiple `go.mod` files.
|
||||
|
||||
If you modify any `.go` files outside of `test/`, you must run `make build` before running integration tests.
|
||||
|
||||
## Testing
|
||||
|
||||
The full test suite is composed of several targets:
|
||||
|
||||
| Make target | What it runs |
|
||||
|----------------------|-----------------------------------------------------------------------|
|
||||
| `make test` | all tests (`test_go_fmt` + `test_unit` + `test_cli` + `test_sharness`) |
|
||||
| `make test_short` | fast subset (`test_go_fmt` + `test_unit`) |
|
||||
| `make test_unit` | unit tests with coverage (excludes `test/cli`) |
|
||||
| `make test_cli` | CLI integration tests (requires `make build` first) |
|
||||
| `make test_sharness` | legacy shell-based integration tests |
|
||||
| `make test_go_fmt` | checks Go source formatting |
|
||||
| `make -O test_go_lint` | runs `golangci-lint` |
|
||||
|
||||
During development, prefer running a specific test rather than the full suite:
|
||||
|
||||
```bash
|
||||
# run a single unit test
|
||||
go test ./core/... -run TestSpecificUnit -v
|
||||
|
||||
# run a single CLI integration test (requires make build first)
|
||||
go test ./test/cli/... -run TestSpecificCLI -v
|
||||
```
|
||||
|
||||
### Environment Setup for Integration Tests
|
||||
|
||||
Before running `test_cli` or `test_sharness`, set these environment variables from the repo root:
|
||||
|
||||
```bash
|
||||
export PATH="$PWD/cmd/ipfs:$PATH"
|
||||
export IPFS_PATH="$(mktemp -d)"
|
||||
```
|
||||
|
||||
- `PATH`: integration tests use the `ipfs` binary from `PATH`, not Go source directly
|
||||
- `IPFS_PATH`: isolates test data from `~/.ipfs` or other running nodes
|
||||
|
||||
If you see "version (N) is lower than repos (M)", the `ipfs` binary in `PATH` is outdated. Rebuild with `make build` and verify `PATH`.
|
||||
|
||||
### Running Sharness Tests
|
||||
|
||||
Sharness tests are legacy shell-based tests. Run individual tests with a timeout:
|
||||
|
||||
```bash
|
||||
cd test/sharness && timeout 60s ./t0080-repo.sh
|
||||
```
|
||||
|
||||
To investigate a failing test, pass `-v` for verbose output. In this mode, daemons spawned by the test are not shut down automatically and must be killed manually afterwards.
|
||||
|
||||
### Cleaning Up Stale Daemons
|
||||
|
||||
Before running `test/cli` or `test/sharness`, stop any stale `ipfs daemon` processes owned by the current user. Leftover daemons hold locks and bind ports, causing test failures:
|
||||
|
||||
```bash
|
||||
pkill -f "ipfs daemon"
|
||||
```
|
||||
|
||||
### Writing Tests
|
||||
|
||||
- all new integration tests go in `test/cli/`, not `test/sharness/`
|
||||
- if a `test/sharness` test needs significant changes, remove it and add a replacement in `test/cli/`
|
||||
- use [testify](https://github.com/stretchr/testify) for assertions (already a dependency)
|
||||
- for Go 1.25+, use `testing/synctest` when testing concurrent code (goroutines, channels, timers)
|
||||
- reuse existing `.car` fixtures in `test/cli/fixtures/` when possible; only add new fixtures when the test requires data not covered by existing ones
|
||||
- always re-run modified tests locally before submitting to confirm they pass
|
||||
- avoid emojis in test names and test log output
|
||||
|
||||
## Before Submitting
|
||||
|
||||
Run these steps in order before considering work complete:
|
||||
|
||||
1. `make mod_tidy` (if `go.mod` changed)
|
||||
2. `go fmt ./...`
|
||||
3. `make build` (if non-test `.go` files changed)
|
||||
4. `make -O test_go_lint`
|
||||
5. `go test ./...` (or the relevant subset)
|
||||
|
||||
## Documentation and Commit Messages
|
||||
|
||||
- after editing CLI help text in `core/commands/`, verify width: `go test ./test/cli/... -run TestCommandDocsWidth`
|
||||
- config options are documented in `docs/config.md`
|
||||
- changelogs in `docs/changelogs/`: only edit the Table of Contents and the Highlights section; the Changelog and Contributors sections are auto-generated and must not be modified
|
||||
- follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- keep commit titles short and messages terse
|
||||
|
||||
## Writing Style
|
||||
|
||||
When writing docs, comments, and commit messages:
|
||||
|
||||
- avoid emojis in code, comments, and log output
|
||||
- keep an empty line before lists in markdown
|
||||
- use backticks around CLI commands, paths, environment variables, and config options
|
||||
|
||||
## PR Guidelines
|
||||
|
||||
- explain what changed and why in the PR description
|
||||
- include test coverage for new functionality and bug fixes
|
||||
- run `make -O test_go_lint` and fix any lint issues before submitting
|
||||
- verify that `go test ./...` passes locally
|
||||
- when modifying `test/sharness` tests significantly, migrate them to `test/cli` instead
|
||||
- end the PR description with a `## References` section listing related context, one link per line
|
||||
- if the PR closes an issue in `ipfs/kubo`, each closing reference should be a bullet starting with `Closes`:
|
||||
|
||||
```markdown
|
||||
## References
|
||||
|
||||
- Closes https://github.com/ipfs/kubo/issues/1234
|
||||
- Closes https://github.com/ipfs/kubo/issues/5678
|
||||
- https://discuss.ipfs.tech/t/related-topic/999
|
||||
```
|
||||
|
||||
## Scope and Safety
|
||||
|
||||
Do not modify or touch:
|
||||
|
||||
- files under `test/sharness/lib/` (third-party sharness test framework)
|
||||
- CI workflows in `.github/` unless explicitly asked
|
||||
- auto-generated sections in `docs/changelogs/` (Changelog and Contributors are generated; only TOC and Highlights are human-edited)
|
||||
|
||||
Do not run without being asked:
|
||||
|
||||
- `make test` or `make test_sharness` (full suite is slow; prefer targeted tests)
|
||||
- `ipfs daemon` without a timeout
|
||||
|
||||
## Running the Daemon
|
||||
|
||||
Always run the daemon with a timeout or shut it down promptly:
|
||||
|
||||
```bash
|
||||
timeout 60s ipfs daemon # auto-kill after 60s
|
||||
ipfs shutdown # graceful shutdown via API
|
||||
```
|
||||
|
||||
Kill dangling daemons before re-running tests: `pkill -f "ipfs daemon"`
|
||||
@ -1,5 +1,8 @@
|
||||
# Kubo Changelogs
|
||||
|
||||
- [v0.41](docs/changelogs/v0.41.md)
|
||||
- [v0.40](docs/changelogs/v0.40.md)
|
||||
- [v0.39](docs/changelogs/v0.39.md)
|
||||
- [v0.38](docs/changelogs/v0.38.md)
|
||||
- [v0.37](docs/changelogs/v0.37.md)
|
||||
- [v0.36](docs/changelogs/v0.36.md)
|
||||
|
||||
@ -1,6 +1,10 @@
|
||||
IPFS as a project, including go-ipfs and all of its modules, follows the [standard IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md).
|
||||
# Contributing to Kubo
|
||||
|
||||
We also adhere to the [GO IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md) which provide additional information of how to collaborate and contribute in the Go implementation of IPFS.
|
||||
**For development setup, building, and testing, see the [Developer Guide](docs/developer-guide.md).**
|
||||
|
||||
IPFS as a project, including Kubo and all of its modules, follows the [standard IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md).
|
||||
|
||||
We also adhere to the [Go IPFS Community contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING_GO.md) which provide additional information on how to collaborate and contribute to the Go implementation of IPFS.
|
||||
|
||||
We appreciate your time and attention for going over these. Please open an issue on ipfs/community if you have any questions.
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
# Enables BuildKit with cache mounts for faster builds
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.25 AS builder
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.26 AS builder
|
||||
|
||||
ARG TARGETOS TARGETARCH
|
||||
|
||||
|
||||
530
README.md
530
README.md
@ -2,7 +2,7 @@
|
||||
<br>
|
||||
<a href="https://github.com/ipfs/kubo/blob/master/docs/logo/"><img src="https://user-images.githubusercontent.com/157609/250148884-d6d12db8-fdcf-4be3-8546-2550b69845d8.png" alt="Kubo logo" title="Kubo logo" width="200"></a>
|
||||
<br>
|
||||
Kubo: IPFS Implementation in GO
|
||||
Kubo: IPFS Implementation in Go
|
||||
<br>
|
||||
</h1>
|
||||
|
||||
@ -11,111 +11,61 @@
|
||||
<p align="center">
|
||||
<a href="https://ipfs.tech"><img src="https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square" alt="Official Part of IPFS Project"></a>
|
||||
<a href="https://discuss.ipfs.tech"><img alt="Discourse Forum" src="https://img.shields.io/discourse/posts?server=https%3A%2F%2Fdiscuss.ipfs.tech"></a>
|
||||
<a href="https://matrix.to/#/#ipfs-space:ipfs.io"><img alt="Matrix" src="https://img.shields.io/matrix/ipfs-space%3Aipfs.io?server_fqdn=matrix.org"></a>
|
||||
<a href="https://docs.ipfs.tech/community/"><img alt="Matrix" src="https://img.shields.io/matrix/ipfs-space%3Aipfs.io?server_fqdn=matrix.org"></a>
|
||||
<a href="https://github.com/ipfs/kubo/actions"><img src="https://img.shields.io/github/actions/workflow/status/ipfs/kubo/gobuild.yml?branch=master"></a>
|
||||
<a href="https://github.com/ipfs/kubo/releases"><img alt="GitHub release" src="https://img.shields.io/github/v/release/ipfs/kubo?filter=!*rc*"></a>
|
||||
</p>
|
||||
|
||||
<hr />
|
||||
|
||||
<p align="center">
|
||||
<b><a href="#what-is-kubo">What is Kubo?</a></b> | <b><a href="#quick-taste">Quick Taste</a></b> | <b><a href="#install">Install</a></b> | <b><a href="#documentation">Documentation</a></b> | <b><a href="#development">Development</a></b> | <b><a href="#getting-help">Getting Help</a></b>
|
||||
</p>
|
||||
|
||||
## What is Kubo?
|
||||
|
||||
Kubo was the first IPFS implementation and is the most widely used one today. Implementing the *Interplanetary Filesystem* - the standard for content-addressing on the Web, interoperable with HTTP. Thus powered by future-proof data models and the libp2p for network communication. Kubo is written in Go.
|
||||
Kubo was the first [IPFS](https://docs.ipfs.tech/concepts/what-is-ipfs/) implementation and is the [most widely used one today](https://probelab.io/ipfs/topology/#chart-agent-types-avg). It takes an opinionated approach to content-addressing ([CIDs](https://docs.ipfs.tech/concepts/glossary/#cid), [DAGs](https://docs.ipfs.tech/concepts/glossary/#dag)) that maximizes interoperability: [UnixFS](https://docs.ipfs.tech/concepts/glossary/#unixfs) for files and directories, [HTTP Gateways](https://docs.ipfs.tech/concepts/glossary/#gateway) for web browsers, [Bitswap](https://docs.ipfs.tech/concepts/glossary/#bitswap) and [HTTP](https://specs.ipfs.tech/http-gateways/trustless-gateway/) for verifiable data transfer.
|
||||
|
||||
Featureset
|
||||
- Runs an IPFS-Node as a network service that is part of LAN and WAN DHT
|
||||
- Native support for UnixFS (most popular way to represent files and directories on IPFS)
|
||||
- [HTTP Gateway](https://specs.ipfs.tech/http-gateways/) (`/ipfs` and `/ipns`) functionality for trusted and [trustless](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) content retrieval
|
||||
- [HTTP Routing V1](https://specs.ipfs.tech/routing/http-routing-v1/) (`/routing/v1`) client and server implementation for [delegated routing](./docs/delegated-routing.md) lookups
|
||||
- [HTTP Kubo RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) (`/api/v0`) to access and control the daemon
|
||||
- [Command Line Interface](https://docs.ipfs.tech/reference/kubo/cli/) based on (`/api/v0`) RPC API
|
||||
- [WebUI](https://github.com/ipfs/ipfs-webui/#readme) to manage the Kubo node
|
||||
- [Content blocking](/docs/content-blocking.md) support for operators of public nodes
|
||||
**Features:**
|
||||
|
||||
### Other implementations
|
||||
- Runs an IPFS node as a network service (LAN [mDNS](https://github.com/libp2p/specs/blob/master/discovery/mdns.md) and WAN [Amino DHT](https://docs.ipfs.tech/concepts/glossary/#dht))
|
||||
- [Command-line interface](https://docs.ipfs.tech/reference/kubo/cli/) (`ipfs --help`)
|
||||
- [WebUI](https://github.com/ipfs/ipfs-webui/#readme) for node management
|
||||
- [HTTP Gateway](https://specs.ipfs.tech/http-gateways/) for trusted and [trustless](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) content retrieval
|
||||
- [HTTP RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) to control the daemon
|
||||
- [HTTP Routing V1](https://specs.ipfs.tech/routing/http-routing-v1/) client and server for [delegated routing](./docs/delegated-routing.md)
|
||||
- [Content blocking](./docs/content-blocking.md) for public node operators
|
||||
|
||||
See [List](https://docs.ipfs.tech/basics/ipfs-implementations/)
|
||||
**Other IPFS implementations:** [Helia](https://github.com/ipfs/helia) (JavaScript), [more...](https://docs.ipfs.tech/concepts/ipfs-implementations/)
|
||||
|
||||
## What is IPFS?
|
||||
## Quick Taste
|
||||
|
||||
IPFS is a global, versioned, peer-to-peer filesystem. It combines good ideas from previous systems such as Git, BitTorrent, Kademlia, SFS, and the Web. It is like a single BitTorrent swarm, exchanging git objects. IPFS provides an interface as simple as the HTTP web, but with permanence built-in. You can also mount the world at /ipfs.
|
||||
After [installing Kubo](#install), verify it works:
|
||||
|
||||
For more info see: https://docs.ipfs.tech/concepts/what-is-ipfs/
|
||||
```console
|
||||
$ ipfs init
|
||||
generating ED25519 keypair...done
|
||||
peer identity: 12D3KooWGcSLQdLDBi2BvoP8WnpdHvhWPbxpGcqkf93rL2XMZK7R
|
||||
|
||||
Before opening an issue, consider using one of the following locations to ensure you are opening your thread in the right place:
|
||||
- kubo (previously named go-ipfs) _implementation_ bugs in [this repo](https://github.com/ipfs/kubo/issues).
|
||||
- Documentation issues in [ipfs/docs issues](https://github.com/ipfs/ipfs-docs/issues).
|
||||
- IPFS _design_ in [ipfs/specs issues](https://github.com/ipfs/specs/issues).
|
||||
- Exploration of new ideas in [ipfs/notes issues](https://github.com/ipfs/notes/issues).
|
||||
- Ask questions and meet the rest of the community at the [IPFS Forum](https://discuss.ipfs.tech).
|
||||
- Or [chat with us](https://docs.ipfs.tech/community/chat/).
|
||||
$ ipfs daemon &
|
||||
Daemon is ready
|
||||
|
||||
[](https://www.youtube.com/channel/UCdjsUXJ3QawK4O5L1kqqsew) [](https://twitter.com/IPFS)
|
||||
$ echo "hello IPFS" | ipfs add -q --cid-version 1
|
||||
bafkreicouv3sksjuzxb3rbb6rziy6duakk2aikegsmtqtz5rsuppjorxsa
|
||||
|
||||
## Next milestones
|
||||
$ ipfs cat bafkreicouv3sksjuzxb3rbb6rziy6duakk2aikegsmtqtz5rsuppjorxsa
|
||||
hello IPFS
|
||||
```
|
||||
|
||||
[Milestones on GitHub](https://github.com/ipfs/kubo/milestones)
|
||||
Verify this CID is provided by your node to the IPFS network: <https://check.ipfs.network/?cid=bafkreicouv3sksjuzxb3rbb6rziy6duakk2aikegsmtqtz5rsuppjorxsa>
|
||||
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [What is Kubo?](#what-is-kubo)
|
||||
- [What is IPFS?](#what-is-ipfs)
|
||||
- [Next milestones](#next-milestones)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Security Issues](#security-issues)
|
||||
- [Install](#install)
|
||||
- [Minimal System Requirements](#minimal-system-requirements)
|
||||
- [Docker](#docker)
|
||||
- [Official prebuilt binaries](#official-prebuilt-binaries)
|
||||
- [Updating](#updating)
|
||||
- [Downloading builds using IPFS](#downloading-builds-using-ipfs)
|
||||
- [Unofficial Linux packages](#unofficial-linux-packages)
|
||||
- [ArchLinux](#arch-linux)
|
||||
- [Gentoo Linux](#gentoo-linux)
|
||||
- [Nix](#nix)
|
||||
- [Solus](#solus)
|
||||
- [openSUSE](#opensuse)
|
||||
- [Guix](#guix)
|
||||
- [Snap](#snap)
|
||||
- [Ubuntu PPA](#ubuntu-ppa)
|
||||
- [Fedora](#fedora-copr)
|
||||
- [Unofficial Windows packages](#unofficial-windows-packages)
|
||||
- [Chocolatey](#chocolatey)
|
||||
- [Scoop](#scoop)
|
||||
- [Unofficial MacOS packages](#unofficial-macos-packages)
|
||||
- [MacPorts](#macports)
|
||||
- [Nix](#nix-macos)
|
||||
- [Homebrew](#homebrew)
|
||||
- [Build from Source](#build-from-source)
|
||||
- [Install Go](#install-go)
|
||||
- [Download and Compile IPFS](#download-and-compile-ipfs)
|
||||
- [Cross Compiling](#cross-compiling)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Usage](#usage)
|
||||
- [Some things to try](#some-things-to-try)
|
||||
- [Troubleshooting](#troubleshooting-1)
|
||||
- [Packages](#packages)
|
||||
- [Development](#development)
|
||||
- [Map of Implemented Subsystems](#map-of-implemented-subsystems)
|
||||
- [CLI, HTTP-API, Architecture Diagram](#cli-http-api-architecture-diagram)
|
||||
- [Testing](#testing)
|
||||
- [Development Dependencies](#development-dependencies)
|
||||
- [Developer Notes](#developer-notes)
|
||||
- [Maintainer Info](#maintainer-info)
|
||||
- [Contributing](#contributing)
|
||||
- [License](#license)
|
||||
|
||||
## Security Issues
|
||||
|
||||
Please follow [`SECURITY.md`](SECURITY.md).
|
||||
See `ipfs add --help` for all import options. Ready for more? Follow the [command-line quick start](https://docs.ipfs.tech/how-to/command-line-quick-start/).
|
||||
|
||||
## Install
|
||||
|
||||
The canonical download instructions for IPFS are over at: https://docs.ipfs.tech/install/. It is **highly recommended** you follow those instructions if you are not interested in working on IPFS development.
|
||||
Follow the [official installation guide](https://docs.ipfs.tech/install/command-line/), or choose: [prebuilt binary](#official-prebuilt-binaries) | [Docker](#docker) | [package manager](#package-managers) | [from source](#build-from-source).
|
||||
|
||||
For production use, Release Docker images (below) are recommended.
|
||||
Prefer a GUI? Try [IPFS Desktop](https://docs.ipfs.tech/install/ipfs-desktop/) and/or [IPFS Companion](https://docs.ipfs.tech/install/ipfs-companion/).
|
||||
|
||||
### Minimal System Requirements
|
||||
|
||||
@ -127,388 +77,148 @@ Kubo runs on most Linux, macOS, and Windows systems. For optimal performance, we
|
||||
> [!CAUTION]
|
||||
> Systems with less than the recommended memory may experience instability, frequent OOM errors or restarts, and missing data announcement (reprovider window), which can make data fully or partially inaccessible to other peers. Running Kubo on underprovisioned hardware is at your own risk.
|
||||
|
||||
### Official Prebuilt Binaries
|
||||
|
||||
Download from https://dist.ipfs.tech#kubo or [GitHub Releases](https://github.com/ipfs/kubo/releases/latest).
|
||||
|
||||
### Docker
|
||||
|
||||
Official images are published at https://hub.docker.com/r/ipfs/kubo/: [](https://hub.docker.com/r/ipfs/kubo/)
|
||||
|
||||
#### 🟢 Release Images
|
||||
- These are production grade images. Use them.
|
||||
- `latest` and [`release`](https://hub.docker.com/r/ipfs/kubo/tags?name=release) tags always point at [the latest stable release](https://github.com/ipfs/kubo/releases/latest). If you use this, remember to `docker pull` periodically to update.
|
||||
- [`vN.N.N`](https://hub.docker.com/r/ipfs/kubo/tags?name=v) points at a specific [release tag](https://github.com/ipfs/kubo/releases)
|
||||
|
||||
#### 🟠 Developer Preview Images
|
||||
- These tags are used by developers for internal testing, not intended for end users or production use.
|
||||
- [`master-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-latest) always points at the `HEAD` of the [`master`](https://github.com/ipfs/kubo/commits/master/) branch
|
||||
- [`master-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-2) points at a specific commit from the `master` branch
|
||||
Use these for production deployments.
|
||||
|
||||
#### 🔴 Internal Staging Images
|
||||
- We use `staging` for testing arbitrary commits and experimental patches.
|
||||
- To build image for current HEAD, force push to `staging` via `git push origin HEAD:staging --force`)
|
||||
- [`staging-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-latest) always points at the `HEAD` of the [`staging`](https://github.com/ipfs/kubo/commits/staging/) branch
|
||||
- [`staging-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-2) points at a specific commit from the `staging` branch
|
||||
- `latest` and [`release`](https://hub.docker.com/r/ipfs/kubo/tags?name=release) always point at [the latest stable release](https://github.com/ipfs/kubo/releases/latest)
|
||||
- [`vN.N.N`](https://hub.docker.com/r/ipfs/kubo/tags?name=v) points at a specific [release tag](https://github.com/ipfs/kubo/releases)
|
||||
|
||||
```console
|
||||
$ docker pull ipfs/kubo:latest
|
||||
$ docker run --rm -it --net=host ipfs/kubo:latest
|
||||
```
|
||||
|
||||
To [customize your node](https://docs.ipfs.tech/install/run-ipfs-inside-docker/#customizing-your-node),
|
||||
pass necessary config via `-e` or by mounting scripts in the `/container-init.d`.
|
||||
To [customize your node](https://docs.ipfs.tech/install/run-ipfs-inside-docker/#customizing-your-node), pass config via `-e` or mount scripts in `/container-init.d`.
|
||||
|
||||
Learn more at https://docs.ipfs.tech/install/run-ipfs-inside-docker/
|
||||
#### 🟠 Developer Preview Images
|
||||
|
||||
### Official prebuilt binaries
|
||||
For internal testing, not intended for production.
|
||||
|
||||
The official binaries are published at https://dist.ipfs.tech#kubo:
|
||||
- [`master-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-latest) points at `HEAD` of [`master`](https://github.com/ipfs/kubo/commits/master/)
|
||||
- [`master-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=master-2) points at a specific commit
|
||||
|
||||
[](https://dist.ipfs.tech#kubo)
|
||||
#### 🔴 Internal Staging Images
|
||||
|
||||
From there:
|
||||
- Click the blue "Download Kubo" on the right side of the page.
|
||||
- Open/extract the archive.
|
||||
- Move kubo (`ipfs`) to your path (`install.sh` can do it for you).
|
||||
For testing arbitrary commits and experimental patches (force push to `staging` branch).
|
||||
|
||||
If you are unable to access [dist.ipfs.tech](https://dist.ipfs.tech#kubo), you can also download kubo from:
|
||||
- this project's GitHub [releases](https://github.com/ipfs/kubo/releases/latest) page
|
||||
- `/ipns/dist.ipfs.tech` at [dweb.link](https://dweb.link/ipns/dist.ipfs.tech#kubo) gateway
|
||||
|
||||
#### Updating
|
||||
|
||||
##### Downloading builds using IPFS
|
||||
|
||||
List the available versions of Kubo implementation:
|
||||
|
||||
```console
|
||||
$ ipfs cat /ipns/dist.ipfs.tech/kubo/versions
|
||||
```
|
||||
|
||||
Then, to view available builds for a version from the previous command (`$VERSION`):
|
||||
|
||||
```console
|
||||
$ ipfs ls /ipns/dist.ipfs.tech/kubo/$VERSION
|
||||
```
|
||||
|
||||
To download a given build of a version:
|
||||
|
||||
```console
|
||||
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_darwin-386.tar.gz # darwin 32-bit build
|
||||
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_darwin-amd64.tar.gz # darwin 64-bit build
|
||||
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_freebsd-amd64.tar.gz # freebsd 64-bit build
|
||||
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-386.tar.gz # linux 32-bit build
|
||||
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-amd64.tar.gz # linux 64-bit build
|
||||
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_linux-arm.tar.gz # linux arm build
|
||||
$ ipfs get /ipns/dist.ipfs.tech/kubo/$VERSION/kubo_$VERSION_windows-amd64.zip # windows 64-bit build
|
||||
```
|
||||
|
||||
### Unofficial Linux packages
|
||||
|
||||
<a href="https://repology.org/project/kubo/versions">
|
||||
<img src="https://repology.org/badge/vertical-allrepos/kubo.svg" alt="Packaging status" align="right">
|
||||
</a>
|
||||
|
||||
- [ArchLinux](#arch-linux)
|
||||
- [Gentoo Linux](#gentoo-linux)
|
||||
- [Nix](#nix-linux)
|
||||
- [Solus](#solus)
|
||||
- [openSUSE](#opensuse)
|
||||
- [Guix](#guix)
|
||||
- [Snap](#snap)
|
||||
- [Ubuntu PPA](#ubuntu-ppa)
|
||||
- [Fedora](#fedora-copr)
|
||||
|
||||
#### Arch Linux
|
||||
|
||||
[](https://wiki.archlinux.org/title/IPFS)
|
||||
|
||||
```bash
|
||||
# pacman -S kubo
|
||||
```
|
||||
|
||||
[](https://archlinux.org/packages/kubo/)
|
||||
|
||||
#### <a name="gentoo-linux">Gentoo Linux</a>
|
||||
|
||||
https://wiki.gentoo.org/wiki/Kubo
|
||||
|
||||
```bash
|
||||
# emerge -a net-p2p/kubo
|
||||
```
|
||||
|
||||
https://packages.gentoo.org/packages/net-p2p/kubo
|
||||
|
||||
#### <a name="nix-linux">Nix</a>
|
||||
|
||||
With the purely functional package manager [Nix](https://nixos.org/nix/) you can install kubo like this:
|
||||
|
||||
```
|
||||
$ nix-env -i kubo
|
||||
```
|
||||
|
||||
You can also install the Package by using its attribute name, which is also `kubo`.
|
||||
|
||||
#### Solus
|
||||
|
||||
[Package for Solus](https://dev.getsol.us/source/kubo/repository/master/)
|
||||
|
||||
```
|
||||
$ sudo eopkg install kubo
|
||||
```
|
||||
|
||||
You can also install it through the Solus software center.
|
||||
|
||||
#### openSUSE
|
||||
|
||||
[Community Package for kubo](https://software.opensuse.org/package/kubo)
|
||||
|
||||
#### Guix
|
||||
|
||||
[Community Package for kubo](https://packages.guix.gnu.org/search/?query=kubo) is available.
|
||||
|
||||
#### Snap
|
||||
|
||||
No longer supported, see rationale in [kubo#8688](https://github.com/ipfs/kubo/issues/8688).
|
||||
|
||||
#### Ubuntu PPA
|
||||
|
||||
[PPA homepage](https://launchpad.net/~twdragon/+archive/ubuntu/ipfs) on Launchpad.
|
||||
|
||||
##### Latest Ubuntu (>= 20.04 LTS)
|
||||
```sh
|
||||
sudo add-apt-repository ppa:twdragon/ipfs
|
||||
sudo apt update
|
||||
sudo apt install ipfs-kubo
|
||||
```
|
||||
|
||||
### Fedora COPR
|
||||
|
||||
[`taw00/ipfs-rpm`](https://github.com/taw00/ipfs-rpm)
|
||||
|
||||
##### Any Ubuntu version
|
||||
|
||||
```sh
|
||||
sudo su
|
||||
echo 'deb https://ppa.launchpadcontent.net/twdragon/ipfs/ubuntu <<DISTRO>> main' >> /etc/apt/sources.list.d/ipfs
|
||||
echo 'deb-src https://ppa.launchpadcontent.net/twdragon/ipfs/ubuntu <<DISTRO>> main' >> /etc/apt/sources.list.d/ipfs
|
||||
exit
|
||||
sudo apt update
|
||||
sudo apt install ipfs-kubo
|
||||
```
|
||||
where `<<DISTRO>>` is the codename of your Ubuntu distribution (for example, `jammy` for 22.04 LTS). During the first installation the package maintenance script may automatically ask you about which networking profile, CPU accounting model, and/or existing node configuration file you want to use.
|
||||
|
||||
**NOTE**: this method also may work with any compatible Debian-based distro which has `libc6` inside, and APT as a package manager.
|
||||
|
||||
### Unofficial Windows packages
|
||||
|
||||
- [Chocolatey](#chocolatey)
|
||||
- [Scoop](#scoop)
|
||||
|
||||
#### Chocolatey
|
||||
|
||||
No longer supported, see rationale in [kubo#9341](https://github.com/ipfs/kubo/issues/9341).
|
||||
|
||||
#### Scoop
|
||||
|
||||
Scoop provides kubo as `kubo` in its 'extras' bucket.
|
||||
|
||||
```Powershell
|
||||
PS> scoop bucket add extras
|
||||
PS> scoop install kubo
|
||||
```
|
||||
|
||||
### Unofficial macOS packages
|
||||
|
||||
- [MacPorts](#macports)
|
||||
- [Nix](#nix-macos)
|
||||
- [Homebrew](#homebrew)
|
||||
|
||||
#### MacPorts
|
||||
|
||||
The package [ipfs](https://ports.macports.org/port/ipfs) currently points to kubo and is being maintained.
|
||||
|
||||
```
|
||||
$ sudo port install ipfs
|
||||
```
|
||||
|
||||
#### <a name="nix-macos">Nix</a>
|
||||
|
||||
In macOS you can use the purely functional package manager [Nix](https://nixos.org/nix/):
|
||||
|
||||
```
|
||||
$ nix-env -i kubo
|
||||
```
|
||||
|
||||
You can also install the Package by using its attribute name, which is also `kubo`.
|
||||
|
||||
#### Homebrew
|
||||
|
||||
A Homebrew formula [ipfs](https://formulae.brew.sh/formula/ipfs) is maintained too.
|
||||
|
||||
```
|
||||
$ brew install --formula ipfs
|
||||
```
|
||||
- [`staging-latest`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-latest) points at `HEAD` of [`staging`](https://github.com/ipfs/kubo/commits/staging/)
|
||||
- [`staging-YYYY-DD-MM-GITSHA`](https://hub.docker.com/r/ipfs/kubo/tags?name=staging-2) points at a specific commit
|
||||
|
||||
### Build from Source
|
||||
|
||||

|
||||
|
||||
kubo's build system requires Go and some standard POSIX build tools:
|
||||
|
||||
* GNU make
|
||||
* Git
|
||||
* GCC (or some other go compatible C Compiler) (optional)
|
||||
|
||||
To build without GCC, build with `CGO_ENABLED=0` (e.g., `make build CGO_ENABLED=0`).
|
||||
|
||||
#### Install Go
|
||||
|
||||

|
||||
|
||||
If you need to update: [Download latest version of Go](https://golang.org/dl/).
|
||||
|
||||
You'll need to add Go's bin directories to your `$PATH` environment variable e.g., by adding these lines to your `/etc/profile` (for a system-wide installation) or `$HOME/.profile`:
|
||||
|
||||
```
|
||||
export PATH=$PATH:/usr/local/go/bin
|
||||
export PATH=$PATH:$GOPATH/bin
|
||||
```bash
|
||||
git clone https://github.com/ipfs/kubo.git
|
||||
cd kubo
|
||||
make build # creates cmd/ipfs/ipfs
|
||||
make install # installs to $GOPATH/bin/ipfs
|
||||
```
|
||||
|
||||
(If you run into trouble, see the [Go install instructions](https://golang.org/doc/install)).
|
||||
See the [Developer Guide](docs/developer-guide.md) for details, Windows instructions, and troubleshooting.
|
||||
|
||||
#### Download and Compile IPFS
|
||||
### Package Managers
|
||||
|
||||
```
|
||||
$ git clone https://github.com/ipfs/kubo.git
|
||||
Kubo is available in community-maintained packages across many operating systems, Linux distributions, and package managers. See [Repology](https://repology.org/project/kubo/versions) for the full list: [](https://repology.org/project/kubo/versions)
|
||||
|
||||
$ cd kubo
|
||||
$ make install
|
||||
```
|
||||
> [!WARNING]
|
||||
> These packages are maintained by third-party volunteers. The IPFS Project and Kubo maintainers are not responsible for their contents or supply chain security. For increased security, [build from source](#build-from-source).
|
||||
|
||||
Alternatively, you can run `make build` to build the kubo binary (storing it in `cmd/ipfs/ipfs`) without installing it.
|
||||
#### Linux
|
||||
|
||||
**NOTE:** If you get an error along the lines of "fatal error: stdlib.h: No such file or directory", you're missing a C compiler. Either re-run `make` with `CGO_ENABLED=0` or install GCC.
|
||||
| Distribution | Install | Version |
|
||||
|--------------|---------|---------|
|
||||
| Ubuntu | [PPA](https://launchpad.net/~twdragon/+archive/ubuntu/ipfs): `sudo apt install ipfs-kubo` | [](https://launchpad.net/~twdragon/+archive/ubuntu/ipfs) |
|
||||
| Arch | `pacman -S kubo` | [](https://archlinux.org/packages/extra/x86_64/kubo/) |
|
||||
| Fedora | [COPR](https://copr.fedorainfracloud.org/coprs/taw/ipfs/): `dnf install kubo` | [](https://copr.fedorainfracloud.org/coprs/taw/ipfs/) |
|
||||
| Nix | `nix-env -i kubo` | [](https://search.nixos.org/packages?query=kubo) |
|
||||
| Gentoo | `emerge -a net-p2p/kubo` | [](https://packages.gentoo.org/packages/net-p2p/kubo) |
|
||||
| openSUSE | `zypper install kubo` | [](https://software.opensuse.org/package/kubo) |
|
||||
| Solus | `sudo eopkg install kubo` | [](https://packages.getsol.us/shannon/k/kubo/) |
|
||||
| Guix | `guix install kubo` | [](https://packages.guix.gnu.org/packages/kubo/) |
|
||||
| _other_ | [See Repology for the full list](https://repology.org/project/kubo/versions) | |
|
||||
|
||||
##### Cross Compiling
|
||||
~~Snap~~ no longer supported ([#8688](https://github.com/ipfs/kubo/issues/8688))
|
||||
|
||||
Compiling for a different platform is as simple as running:
|
||||
#### macOS
|
||||
|
||||
```
|
||||
make build GOOS=myTargetOS GOARCH=myTargetArchitecture
|
||||
```
|
||||
| Manager | Install | Version |
|
||||
|---------|---------|---------|
|
||||
| Homebrew | `brew install ipfs` | [](https://formulae.brew.sh/formula/ipfs) |
|
||||
| MacPorts | `sudo port install ipfs` | [](https://ports.macports.org/port/ipfs/) |
|
||||
| Nix | `nix-env -i kubo` | [](https://search.nixos.org/packages?query=kubo) |
|
||||
| _other_ | [See Repology for the full list](https://repology.org/project/kubo/versions) | |
|
||||
|
||||
#### Troubleshooting
|
||||
#### Windows
|
||||
|
||||
- Separate [instructions are available for building on Windows](docs/windows.md).
|
||||
- `git` is required in order for `go get` to fetch all dependencies.
|
||||
- Package managers often contain out-of-date `golang` packages.
|
||||
Ensure that `go version` reports the minimum version required (see go.mod). See above for how to install go.
|
||||
- If you are interested in development, please install the development
|
||||
dependencies as well.
|
||||
- Shell command completions can be generated with one of the `ipfs commands completion` subcommands. Read [docs/command-completion.md](docs/command-completion.md) to learn more.
|
||||
- See the [misc folder](https://github.com/ipfs/kubo/tree/master/misc) for how to connect IPFS to systemd or whatever init system your distro uses.
|
||||
| Manager | Install | Version |
|
||||
|---------|---------|---------|
|
||||
| Scoop | `scoop install kubo` | [](https://scoop.sh/#/apps?q=kubo) |
|
||||
| _other_ | [See Repology for the full list](https://repology.org/project/kubo/versions) | |
|
||||
|
||||
## Getting Started
|
||||
~~Chocolatey~~ no longer supported ([#9341](https://github.com/ipfs/kubo/issues/9341))
|
||||
|
||||
### Usage
|
||||
## Documentation
|
||||
|
||||
[](https://docs.ipfs.tech/how-to/command-line-quick-start/)
|
||||
[](https://docs.ipfs.tech/reference/kubo/cli/)
|
||||
|
||||
To start using IPFS, you must first initialize IPFS's config files on your
|
||||
system, this is done with `ipfs init`. See `ipfs init --help` for information on
|
||||
the optional arguments it takes. After initialization is complete, you can use
|
||||
`ipfs mount`, `ipfs add` and any of the other commands to explore!
|
||||
|
||||
For detailed configuration options, see [docs/config.md](https://github.com/ipfs/kubo/blob/master/docs/config.md).
|
||||
|
||||
### Some things to try
|
||||
|
||||
Basic proof of 'ipfs working' locally:
|
||||
|
||||
echo "hello world" > hello
|
||||
ipfs add hello
|
||||
# This should output a hash string that looks something like:
|
||||
# QmT78zSuBmuS4z925WZfrqQ1qHaJ56DQaTfyMUF7F8ff5o
|
||||
ipfs cat <that hash>
|
||||
|
||||
### HTTP/RPC clients
|
||||
|
||||
For programmatic interaction with Kubo, see our [list of HTTP/RPC clients](docs/http-rpc-clients.md).
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
If you have previously installed IPFS before and you are running into problems getting a newer version to work, try deleting (or backing up somewhere else) your IPFS config directory (~/.ipfs by default) and rerunning `ipfs init`. This will reinitialize the config file to its defaults and clear out the local datastore of any bad entries.
|
||||
|
||||
For more information about configuration options, see [docs/config.md](https://github.com/ipfs/kubo/blob/master/docs/config.md).
|
||||
|
||||
Please direct general questions and help requests to our [forums](https://discuss.ipfs.tech).
|
||||
|
||||
If you believe you've found a bug, check the [issues list](https://github.com/ipfs/kubo/issues) and, if you don't see your problem there, either come talk to us on [Matrix chat](https://docs.ipfs.tech/community/chat/), or file an issue of your own!
|
||||
|
||||
## Packages
|
||||
|
||||
See [IPFS in GO](https://docs.ipfs.tech/reference/go/api/) documentation.
|
||||
| Topic | Description |
|
||||
|-------|-------------|
|
||||
| [Configuration](docs/config.md) | All config options reference |
|
||||
| [Environment variables](docs/environment-variables.md) | Runtime settings via env vars |
|
||||
| [Experimental features](docs/experimental-features.md) | Opt-in features in development |
|
||||
| [HTTP Gateway](docs/gateway.md) | Path, subdomain, and trustless gateway setup |
|
||||
| [HTTP RPC clients](docs/http-rpc-clients.md) | Client libraries for Go, JS |
|
||||
| [Delegated routing](docs/delegated-routing.md) | Multi-router and HTTP routing |
|
||||
| [Metrics & monitoring](docs/metrics.md) | Prometheus metrics |
|
||||
| [Content blocking](docs/content-blocking.md) | Denylist for public nodes |
|
||||
| [Customizing](docs/customizing.md) | Unsure if use Plugins, Boxo, or fork? |
|
||||
| [Debug guide](docs/debug-guide.md) | CPU profiles, memory analysis, tracing |
|
||||
| [Changelogs](docs/changelogs/) | Release notes for each version |
|
||||
| [All documentation](https://github.com/ipfs/kubo/tree/master/docs) | Full list of docs |
|
||||
|
||||
## Development
|
||||
|
||||
Some places to get you started on the codebase:
|
||||
See the [Developer Guide](docs/developer-guide.md) for build instructions, testing, and contribution workflow. AI coding agents should follow [AGENTS.md](AGENTS.md).
|
||||
|
||||
- Main file: [./cmd/ipfs/main.go](https://github.com/ipfs/kubo/blob/master/cmd/ipfs/main.go)
|
||||
- CLI Commands: [./core/commands/](https://github.com/ipfs/kubo/tree/master/core/commands)
|
||||
- Bitswap (the data trading engine): [go-bitswap](https://github.com/ipfs/go-bitswap)
|
||||
- libp2p
|
||||
- libp2p: https://github.com/libp2p/go-libp2p
|
||||
- DHT: https://github.com/libp2p/go-libp2p-kad-dht
|
||||
- [IPFS : The `Add` command demystified](https://github.com/ipfs/kubo/tree/master/docs/add-code-flow.md)
|
||||
## Getting Help
|
||||
|
||||
### Map of Implemented Subsystems
|
||||
**WIP**: This is a high-level architecture diagram of the various sub-systems of this specific implementation. To be updated with how they interact. Anyone who has suggestions is welcome to comment [here](https://docs.google.com/drawings/d/1OVpBT2q-NtSJqlPX3buvjYhOnWfdzb85YEsM_njesME/edit) on how we can improve this!
|
||||
<img src="https://docs.google.com/drawings/d/e/2PACX-1vS_n1FvSu6mdmSirkBrIIEib2gqhgtatD9awaP2_WdrGN4zTNeg620XQd9P95WT-IvognSxIIdCM5uE/pub?w=1446&h=1036">
|
||||
- [IPFS Forum](https://discuss.ipfs.tech) - community support, questions, and discussion
|
||||
- [Community](https://docs.ipfs.tech/community/) - chat, events, and working groups
|
||||
- [GitHub Issues](https://github.com/ipfs/kubo/issues) - bug reports for Kubo specifically
|
||||
- [IPFS Docs Issues](https://github.com/ipfs/ipfs-docs/issues) - documentation issues
|
||||
|
||||
### CLI, HTTP-API, Architecture Diagram
|
||||
|
||||

|
||||
|
||||
> [Origin](https://github.com/ipfs/pm/pull/678#discussion_r210410924)
|
||||
|
||||
Description: Dotted means "likely going away". The "Legacy" parts are thin wrappers around some commands to translate between the new system and the old system. The grayed-out parts on the "daemon" diagram are there to show that the code is all the same, it's just that we turn some pieces on and some pieces off depending on whether we're running on the client or the server.
|
||||
|
||||
### Testing
|
||||
|
||||
```
|
||||
make test
|
||||
```
|
||||
|
||||
### Development Dependencies
|
||||
|
||||
If you make changes to the protocol buffers, you will need to install the [protoc compiler](https://github.com/google/protobuf).
|
||||
|
||||
### Developer Notes
|
||||
|
||||
Find more documentation for developers on [docs](./docs)
|
||||
|
||||
## Maintainer Info
|
||||
|
||||
Kubo is maintained by [Shipyard](https://ipshipyard.com/).
|
||||
|
||||
* This repository is part of [Shipyard's GO Triage triage](https://ipshipyard.notion.site/IPFS-Go-Triage-Boxo-Kubo-Rainbow-0ddee6b7f28d412da7dabe4f9107c29a).
|
||||
* [Release Process](https://ipshipyard.notion.site/Kubo-Release-Process-6dba4f5755c9458ab5685eeb28173778)
|
||||
## Security Issues
|
||||
|
||||
See [`SECURITY.md`](SECURITY.md).
|
||||
|
||||
## Contributing
|
||||
|
||||
[](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md)
|
||||
|
||||
We ❤️ all [our contributors](docs/AUTHORS); this project wouldn’t be what it is without you! If you want to help out, please see [CONTRIBUTING.md](CONTRIBUTING.md).
|
||||
We welcome contributions. See [CONTRIBUTING.md](CONTRIBUTING.md) and the [Developer Guide](docs/developer-guide.md).
|
||||
|
||||
This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md).
|
||||
This repository follows the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md).
|
||||
|
||||
Members of IPFS community provide Kubo support on [discussion forum category here](https://discuss.ipfs.tech/c/help/help-kubo/23).
|
||||
## Maintainer Info
|
||||
|
||||
Need help with IPFS itself? Learn where to get help and support at https://ipfs.tech/help.
|
||||
<a href="https://ipshipyard.com/"><img align="right" src="https://github.com/user-attachments/assets/39ed3504-bb71-47f6-9bf8-cb9a1698f272" /></a>
|
||||
|
||||
> [!NOTE]
|
||||
> Kubo is maintained by the [Shipyard](https://ipshipyard.com/) team.
|
||||
>
|
||||
> [Release Process](https://ipshipyard.notion.site/Kubo-Release-Process-6dba4f5755c9458ab5685eeb28173778)
|
||||
|
||||
## License
|
||||
|
||||
This project is dual-licensed under Apache 2.0 and MIT terms:
|
||||
Dual-licensed under Apache 2.0 and MIT:
|
||||
|
||||
- Apache License, Version 2.0, ([LICENSE-APACHE](https://github.com/ipfs/kubo/blob/master/LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
- MIT license ([LICENSE-MIT](https://github.com/ipfs/kubo/blob/master/LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
- [LICENSE-APACHE](LICENSE-APACHE)
|
||||
- [LICENSE-MIT](LICENSE-MIT)
|
||||
|
||||
15
Rules.mk
15
Rules.mk
@ -134,15 +134,14 @@ help:
|
||||
@echo ''
|
||||
@echo 'TESTING TARGETS:'
|
||||
@echo ''
|
||||
@echo ' test - Run all tests'
|
||||
@echo ' test_short - Run short go tests and short sharness tests'
|
||||
@echo ' test_go_short - Run short go tests'
|
||||
@echo ' test_go_test - Run all go tests'
|
||||
@echo ' test - Run all tests (test_go_fmt, test_unit, test_cli, test_sharness)'
|
||||
@echo ' test_short - Run fast tests (test_go_fmt, test_unit)'
|
||||
@echo ' test_unit - Run unit tests with coverage (excludes test/cli)'
|
||||
@echo ' test_cli - Run CLI integration tests (requires built binary)'
|
||||
@echo ' test_go_fmt - Check Go source formatting'
|
||||
@echo ' test_go_build - Build kubo for all platforms from .github/build-platforms.yml'
|
||||
@echo ' test_go_expensive - Run all go tests and build all platforms'
|
||||
@echo ' test_go_race - Run go tests with the race detector enabled'
|
||||
@echo ' test_go_lint - Run the `golangci-lint` vetting tool'
|
||||
@echo ' test_go_lint - Run golangci-lint'
|
||||
@echo ' test_sharness - Run sharness tests'
|
||||
@echo ' coverage - Collects coverage info from unit tests and sharness'
|
||||
@echo ' coverage - Collect coverage info from unit tests and sharness'
|
||||
@echo
|
||||
.PHONY: help
|
||||
|
||||
@ -50,6 +50,6 @@ else
|
||||
unset IPFS_SWARM_KEY_FILE
|
||||
fi
|
||||
|
||||
find /container-init.d -maxdepth 1 -type f -iname '*.sh' -print0 | sort -z | xargs -n 1 -0 -r container_init_run
|
||||
find /container-init.d -maxdepth 1 \( -type f -o -type l \) -iname '*.sh' -print0 | sort -z | xargs -n 1 -0 -r container_init_run
|
||||
|
||||
exec ipfs "$@"
|
||||
|
||||
@ -29,12 +29,10 @@ GIT_BRANCH=${3:-$(git symbolic-ref -q --short HEAD || echo "unknown")}
|
||||
GIT_TAG=${4:-$(git describe --tags --exact-match 2> /dev/null || echo "")}
|
||||
|
||||
IMAGE_NAME=${IMAGE_NAME:-ipfs/kubo}
|
||||
LEGACY_IMAGE_NAME=${LEGACY_IMAGE_NAME:-ipfs/go-ipfs}
|
||||
|
||||
echoImageName () {
|
||||
local IMAGE_TAG=$1
|
||||
echo "$IMAGE_NAME:$IMAGE_TAG"
|
||||
echo "$LEGACY_IMAGE_NAME:$IMAGE_TAG"
|
||||
}
|
||||
|
||||
if [[ $GIT_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+-rc ]]; then
|
||||
@ -43,7 +41,7 @@ if [[ $GIT_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+-rc ]]; then
|
||||
elif [[ $GIT_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echoImageName "$GIT_TAG"
|
||||
echoImageName "latest"
|
||||
echoImageName "release" # see: https://github.com/ipfs/go-ipfs/issues/3999#issuecomment-742228981
|
||||
echoImageName "release" # see: https://github.com/ipfs/kubo/issues/3999#issuecomment-742228981
|
||||
|
||||
elif [[ $GIT_BRANCH =~ ^bifrost-.* ]]; then
|
||||
# sanitize the branch name since docker tags have stricter char limits than git branch names
|
||||
|
||||
532
bin/mkreleaselog
532
bin/mkreleaselog
@ -1,10 +1,19 @@
|
||||
#!/bin/zsh
|
||||
#!/bin/bash
|
||||
#
|
||||
# Invocation: mkreleaselog [FIRST_REF [LAST_REF]]
|
||||
#
|
||||
# Generates release notes with contributor statistics, deduplicating by GitHub handle.
|
||||
# GitHub handles are resolved from:
|
||||
# 1. GitHub noreply emails (user@users.noreply.github.com)
|
||||
# 2. Merge commit messages (Merge pull request #N from user/branch)
|
||||
# 3. GitHub API via gh CLI (for squash merges)
|
||||
#
|
||||
# Results are cached in ~/.cache/mkreleaselog/github-handles.json
|
||||
|
||||
set -euo pipefail
|
||||
export GO111MODULE=on
|
||||
export GOPATH="$(go env GOPATH)"
|
||||
GOPATH="$(go env GOPATH)"
|
||||
export GOPATH
|
||||
|
||||
# List of PCRE regular expressions to match "included" modules.
|
||||
INCLUDE_MODULES=(
|
||||
@ -15,10 +24,15 @@ INCLUDE_MODULES=(
|
||||
"^github.com/multiformats/"
|
||||
"^github.com/filecoin-project/"
|
||||
"^github.com/ipfs-shipyard/"
|
||||
"^github.com/ipshipyard/"
|
||||
"^github.com/probe-lab/"
|
||||
|
||||
# Authors of personal modules used by go-ipfs that should be mentioned in the
|
||||
# release notes.
|
||||
"^github.com/whyrusleeping/"
|
||||
"^github.com/gammazero/"
|
||||
"^github.com/Jorropo/"
|
||||
"^github.com/guillaumemichel/"
|
||||
"^github.com/Kubuxu/"
|
||||
"^github.com/jbenet/"
|
||||
"^github.com/Stebalien/"
|
||||
@ -48,15 +62,348 @@ IGNORE_FILES=(
|
||||
)
|
||||
|
||||
##########################################################################################
|
||||
# GitHub Handle Resolution Infrastructure
|
||||
##########################################################################################
|
||||
|
||||
# Cache location following XDG spec
|
||||
GITHUB_CACHE_DIR="${XDG_CACHE_HOME:-$HOME/.cache}/mkreleaselog"
|
||||
GITHUB_CACHE_FILE="$GITHUB_CACHE_DIR/github-handles.json"
|
||||
|
||||
# Timeout for gh CLI commands (seconds)
|
||||
GH_TIMEOUT=10
|
||||
|
||||
# Associative array for email -> github handle mapping (runtime cache)
|
||||
declare -A EMAIL_TO_GITHUB
|
||||
|
||||
# Check if gh CLI is available and authenticated
|
||||
gh_available() {
|
||||
command -v gh >/dev/null 2>&1 && gh auth status >/dev/null 2>&1
|
||||
}
|
||||
|
||||
# Load cached email -> github handle mappings from disk
|
||||
load_github_cache() {
|
||||
EMAIL_TO_GITHUB=()
|
||||
|
||||
if [[ ! -f "$GITHUB_CACHE_FILE" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Validate JSON before loading
|
||||
if ! jq -e '.' "$GITHUB_CACHE_FILE" >/dev/null 2>&1; then
|
||||
msg "Warning: corrupted cache file, ignoring"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local email handle
|
||||
while IFS=$'\t' read -r email handle; do
|
||||
# Validate handle format (alphanumeric, hyphens, max 39 chars)
|
||||
if [[ -n "$email" && -n "$handle" && "$handle" =~ ^[a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?$ ]]; then
|
||||
EMAIL_TO_GITHUB["$email"]="$handle"
|
||||
fi
|
||||
done < <(jq -r 'to_entries[] | "\(.key)\t\(.value)"' "$GITHUB_CACHE_FILE" 2>/dev/null)
|
||||
|
||||
msg "Loaded ${#EMAIL_TO_GITHUB[@]} cached GitHub handle mappings"
|
||||
}
|
||||
|
||||
# Save email -> github handle mappings to disk (atomic write)
|
||||
save_github_cache() {
|
||||
if [[ ${#EMAIL_TO_GITHUB[@]} -eq 0 ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
mkdir -p "$GITHUB_CACHE_DIR"
|
||||
|
||||
local tmp_file
|
||||
tmp_file="$(mktemp "$GITHUB_CACHE_DIR/cache.XXXXXX")" || return 1
|
||||
|
||||
# Build JSON from associative array
|
||||
{
|
||||
echo "{"
|
||||
local first=true
|
||||
local key
|
||||
for key in "${!EMAIL_TO_GITHUB[@]}"; do
|
||||
if [[ "$first" == "true" ]]; then
|
||||
first=false
|
||||
else
|
||||
echo ","
|
||||
fi
|
||||
# Escape special characters in email for JSON
|
||||
printf ' %s: %s' "$(jq -n --arg e "$key" '$e')" "$(jq -n --arg h "${EMAIL_TO_GITHUB[$key]}" '$h')"
|
||||
done
|
||||
echo
|
||||
echo "}"
|
||||
} > "$tmp_file"
|
||||
|
||||
# Validate before replacing
|
||||
if jq -e '.' "$tmp_file" >/dev/null 2>&1; then
|
||||
mv "$tmp_file" "$GITHUB_CACHE_FILE"
|
||||
msg "Saved ${#EMAIL_TO_GITHUB[@]} GitHub handle mappings to cache"
|
||||
else
|
||||
rm -f "$tmp_file"
|
||||
msg "Warning: failed to save cache (invalid JSON)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Extract GitHub handle from email if it's a GitHub noreply address
|
||||
# Handles: user@users.noreply.github.com and 12345678+user@users.noreply.github.com
|
||||
extract_handle_from_noreply() {
|
||||
local email="$1"
|
||||
|
||||
if [[ "$email" =~ ^([0-9]+\+)?([a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?)@users\.noreply\.github\.com$ ]]; then
|
||||
echo "${BASH_REMATCH[2]}"
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# Extract GitHub handle from merge commit subject
|
||||
# Handles: "Merge pull request #123 from username/branch"
|
||||
extract_handle_from_merge_commit() {
|
||||
local subject="$1"
|
||||
|
||||
if [[ "$subject" =~ ^Merge\ pull\ request\ \#[0-9]+\ from\ ([a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?)/.*$ ]]; then
|
||||
echo "${BASH_REMATCH[1]}"
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# Extract PR number from commit subject
|
||||
# Handles: "Subject (#123)" and "Merge pull request #123 from"
|
||||
extract_pr_number() {
|
||||
local subject="$1"
|
||||
|
||||
if [[ "$subject" =~ \(#([0-9]+)\)$ ]]; then
|
||||
echo "${BASH_REMATCH[1]}"
|
||||
return 0
|
||||
elif [[ "$subject" =~ ^Merge\ pull\ request\ \#([0-9]+)\ from ]]; then
|
||||
echo "${BASH_REMATCH[1]}"
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# Query GitHub API for PR author (with timeout and error handling)
|
||||
query_pr_author() {
|
||||
local gh_repo="$1" # e.g., "ipfs/kubo"
|
||||
local pr_num="$2"
|
||||
|
||||
if ! gh_available; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
local handle
|
||||
handle="$(timeout "$GH_TIMEOUT" gh pr view "$pr_num" --repo "$gh_repo" --json author -q '.author.login' 2>/dev/null)" || return 1
|
||||
|
||||
# Validate handle format
|
||||
if [[ -n "$handle" && "$handle" =~ ^[a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?$ ]]; then
|
||||
echo "$handle"
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# Query GitHub API for commit author (fallback when no PR available)
|
||||
query_commit_author() {
|
||||
local gh_repo="$1" # e.g., "ipfs/kubo"
|
||||
local commit_sha="$2"
|
||||
|
||||
if ! gh_available; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
local handle
|
||||
handle="$(timeout "$GH_TIMEOUT" gh api "/repos/$gh_repo/commits/$commit_sha" --jq '.author.login // empty' 2>/dev/null)" || return 1
|
||||
|
||||
# Validate handle format
|
||||
if [[ -n "$handle" && "$handle" =~ ^[a-zA-Z0-9]([a-zA-Z0-9-]{0,37}[a-zA-Z0-9])?$ ]]; then
|
||||
echo "$handle"
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# Resolve email to GitHub handle using all available methods
|
||||
# Args: email, commit_hash (optional), repo_dir (optional), gh_repo (optional)
|
||||
resolve_github_handle() {
|
||||
local email="$1"
|
||||
local commit="${2:-}"
|
||||
local repo_dir="${3:-}"
|
||||
local gh_repo="${4:-}"
|
||||
|
||||
# Skip empty emails
|
||||
[[ -z "$email" ]] && return 1
|
||||
|
||||
# Check runtime cache first
|
||||
if [[ -n "${EMAIL_TO_GITHUB[$email]:-}" ]]; then
|
||||
echo "${EMAIL_TO_GITHUB[$email]}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local handle=""
|
||||
|
||||
# Method 1: Extract from noreply email
|
||||
if handle="$(extract_handle_from_noreply "$email")"; then
|
||||
EMAIL_TO_GITHUB["$email"]="$handle"
|
||||
echo "$handle"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Method 2: Look at commit message for merge commit pattern
|
||||
if [[ -n "$commit" && -n "$repo_dir" ]]; then
|
||||
local subject
|
||||
subject="$(git -C "$repo_dir" log -1 --format='%s' "$commit" 2>/dev/null)" || true
|
||||
|
||||
if [[ -n "$subject" ]]; then
|
||||
if handle="$(extract_handle_from_merge_commit "$subject")"; then
|
||||
EMAIL_TO_GITHUB["$email"]="$handle"
|
||||
echo "$handle"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Method 3: Query GitHub API for PR author
|
||||
if [[ -n "$gh_repo" ]]; then
|
||||
local pr_num
|
||||
if pr_num="$(extract_pr_number "$subject")"; then
|
||||
if handle="$(query_pr_author "$gh_repo" "$pr_num")"; then
|
||||
EMAIL_TO_GITHUB["$email"]="$handle"
|
||||
echo "$handle"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Build GitHub handle mappings for all commits in a range
|
||||
# This does a single pass to collect PR numbers, then batch queries them
|
||||
build_github_mappings() {
|
||||
local module="$1"
|
||||
local start="$2"
|
||||
local end="${3:-HEAD}"
|
||||
local repo
|
||||
repo="$(strip_version "$module")"
|
||||
local dir
|
||||
local gh_repo=""
|
||||
|
||||
if [[ "$module" == "github.com/ipfs/kubo" ]]; then
|
||||
dir="$ROOT_DIR"
|
||||
else
|
||||
dir="$GOPATH/src/$repo"
|
||||
fi
|
||||
|
||||
# Extract gh_repo for API calls (e.g., "ipfs/kubo" from "github.com/ipfs/kubo")
|
||||
if [[ "$repo" =~ ^github\.com/(.+)$ ]]; then
|
||||
gh_repo="${BASH_REMATCH[1]}"
|
||||
fi
|
||||
|
||||
msg "Building GitHub handle mappings for $module..."
|
||||
|
||||
# Collect all unique emails and their commit context
|
||||
declare -A email_commits=()
|
||||
local hash email subject
|
||||
|
||||
while IFS=$'\t' read -r hash email subject; do
|
||||
[[ -z "$email" ]] && continue
|
||||
|
||||
# Skip if already resolved
|
||||
[[ -n "${EMAIL_TO_GITHUB[$email]:-}" ]] && continue
|
||||
|
||||
# Try to resolve without API first
|
||||
local handle=""
|
||||
|
||||
# Method 1: noreply email
|
||||
if handle="$(extract_handle_from_noreply "$email")"; then
|
||||
EMAIL_TO_GITHUB["$email"]="$handle"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Method 2: merge commit message
|
||||
if handle="$(extract_handle_from_merge_commit "$subject")"; then
|
||||
EMAIL_TO_GITHUB["$email"]="$handle"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Store for potential API lookup
|
||||
if [[ -z "${email_commits[$email]:-}" ]]; then
|
||||
email_commits["$email"]="$hash"
|
||||
fi
|
||||
done < <(git -C "$dir" log --format='tformat:%H%x09%aE%x09%s' --no-merges "$start..$end" 2>/dev/null)
|
||||
|
||||
# API batch lookup for remaining emails (if gh is available)
|
||||
if gh_available && [[ -n "$gh_repo" && ${#email_commits[@]} -gt 0 ]]; then
|
||||
msg "Querying GitHub API for ${#email_commits[@]} unknown contributors..."
|
||||
local key
|
||||
for key in "${!email_commits[@]}"; do
|
||||
# Skip if already resolved
|
||||
[[ -n "${EMAIL_TO_GITHUB[$key]:-}" ]] && continue
|
||||
|
||||
local commit_hash="${email_commits[$key]}"
|
||||
local subj handle
|
||||
subj="$(git -C "$dir" log -1 --format='%s' "$commit_hash" 2>/dev/null)" || true
|
||||
|
||||
# Try PR author lookup first (cheaper API call)
|
||||
local pr_num
|
||||
if pr_num="$(extract_pr_number "$subj")"; then
|
||||
if handle="$(query_pr_author "$gh_repo" "$pr_num")"; then
|
||||
EMAIL_TO_GITHUB["$key"]="$handle"
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
# Fallback: commit author API (works for any commit)
|
||||
if handle="$(query_commit_author "$gh_repo" "$commit_hash")"; then
|
||||
EMAIL_TO_GITHUB["$key"]="$handle"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
##########################################################################################
|
||||
# Original infrastructure with modifications
|
||||
##########################################################################################
|
||||
|
||||
build_include_regex() {
|
||||
local result=""
|
||||
local mod
|
||||
for mod in "${INCLUDE_MODULES[@]}"; do
|
||||
if [[ -n "$result" ]]; then
|
||||
result="$result|$mod"
|
||||
else
|
||||
result="$mod"
|
||||
fi
|
||||
done
|
||||
echo "($result)"
|
||||
}
|
||||
|
||||
build_exclude_regex() {
|
||||
local result=""
|
||||
local mod
|
||||
for mod in "${EXCLUDE_MODULES[@]}"; do
|
||||
if [[ -n "$result" ]]; then
|
||||
result="$result|$mod"
|
||||
else
|
||||
result="$mod"
|
||||
fi
|
||||
done
|
||||
if [[ -n "$result" ]]; then
|
||||
echo "($result)"
|
||||
else
|
||||
echo '$^' # match nothing
|
||||
fi
|
||||
}
|
||||
|
||||
if [[ ${#INCLUDE_MODULES[@]} -gt 0 ]]; then
|
||||
INCLUDE_REGEX="(${$(printf "|%s" "${INCLUDE_MODULES[@]}"):1})"
|
||||
INCLUDE_REGEX="$(build_include_regex)"
|
||||
else
|
||||
INCLUDE_REGEX="" # "match anything"
|
||||
fi
|
||||
|
||||
if [[ ${#EXCLUDE_MODULES[@]} -gt 0 ]]; then
|
||||
EXCLUDE_REGEX="(${$(printf "|%s" "${EXCLUDE_MODULES[@]}"):1})"
|
||||
EXCLUDE_REGEX="$(build_exclude_regex)"
|
||||
else
|
||||
EXCLUDE_REGEX='$^' # "match nothing"
|
||||
fi
|
||||
@ -71,8 +418,6 @@ NL=$'\n'
|
||||
|
||||
ROOT_DIR="$(git rev-parse --show-toplevel)"
|
||||
|
||||
alias jq="jq --unbuffered"
|
||||
|
||||
msg() {
|
||||
echo "$*" >&2
|
||||
}
|
||||
@ -80,11 +425,21 @@ msg() {
|
||||
statlog() {
|
||||
local module="$1"
|
||||
local rpath
|
||||
local gh_repo=""
|
||||
|
||||
if [[ "$module" == "github.com/ipfs/kubo" ]]; then
|
||||
rpath="$ROOT_DIR"
|
||||
else
|
||||
rpath="$GOPATH/src/$(strip_version "$module")"
|
||||
fi
|
||||
|
||||
# Extract gh_repo for API calls
|
||||
local repo
|
||||
repo="$(strip_version "$module")"
|
||||
if [[ "$repo" =~ ^github\.com/(.+)$ ]]; then
|
||||
gh_repo="${BASH_REMATCH[1]}"
|
||||
fi
|
||||
|
||||
local start="${2:-}"
|
||||
local end="${3:-HEAD}"
|
||||
local mailmap_file="$rpath/.mailmap"
|
||||
@ -93,18 +448,21 @@ statlog() {
|
||||
fi
|
||||
|
||||
local stack=()
|
||||
git -C "$rpath" -c mailmap.file="$mailmap_file" log --use-mailmap --shortstat --no-merges --pretty="tformat:%H%x09%aN%x09%aE" "$start..$end" -- . "${IGNORE_FILES_PATHSPEC[@]}" | while read -r line; do
|
||||
local line
|
||||
while read -r line; do
|
||||
if [[ -n "$line" ]]; then
|
||||
stack+=("$line")
|
||||
continue
|
||||
fi
|
||||
|
||||
local changes
|
||||
read -r changes
|
||||
|
||||
changed=0
|
||||
insertions=0
|
||||
deletions=0
|
||||
while read count event; do
|
||||
local changed=0
|
||||
local insertions=0
|
||||
local deletions=0
|
||||
local count event
|
||||
while read -r count event; do
|
||||
if [[ "$event" =~ ^file ]]; then
|
||||
changed=$count
|
||||
elif [[ "$event" =~ ^insertion ]]; then
|
||||
@ -117,27 +475,32 @@ statlog() {
|
||||
fi
|
||||
done<<<"${changes//,/$NL}"
|
||||
|
||||
local author
|
||||
for author in "${stack[@]}"; do
|
||||
local hash name email
|
||||
IFS=$'\t' read -r hash name email <<<"$author"
|
||||
|
||||
# Resolve GitHub handle
|
||||
local github_handle=""
|
||||
github_handle="$(resolve_github_handle "$email" "$hash" "$rpath" "$gh_repo")" || true
|
||||
|
||||
jq -n \
|
||||
--arg "hash" "$hash" \
|
||||
--arg "name" "$name" \
|
||||
--arg "email" "$email" \
|
||||
--arg "github" "$github_handle" \
|
||||
--argjson "changed" "$changed" \
|
||||
--argjson "insertions" "$insertions" \
|
||||
--argjson "deletions" "$deletions" \
|
||||
'{Commit: $hash, Author: $name, Email: $email, Files: $changed, Insertions: $insertions, Deletions: $deletions}'
|
||||
'{Commit: $hash, Author: $name, Email: $email, GitHub: $github, Files: $changed, Insertions: $insertions, Deletions: $deletions}'
|
||||
done
|
||||
stack=()
|
||||
done
|
||||
done < <(git -C "$rpath" -c mailmap.file="$mailmap_file" log --use-mailmap --shortstat --no-merges --pretty="tformat:%H%x09%aN%x09%aE" "$start..$end" -- . "${IGNORE_FILES_PATHSPEC[@]}")
|
||||
}
|
||||
|
||||
# Returns a stream of deps changed between $1 and $2.
|
||||
dep_changes() {
|
||||
{
|
||||
<"$1"
|
||||
<"$2"
|
||||
} | jq -s 'JOIN(INDEX(.[0][]; .Path); .[1][]; .Path; {Path: .[0].Path, Old: (.[1] | del(.Path)), New: (.[0] | del(.Path))}) | select(.New.Version != .Old.Version)'
|
||||
cat "$1" "$2" | jq -s 'JOIN(INDEX(.[0][]; .Path); .[1][]; .Path; {Path: .[0].Path, Old: (.[1] | del(.Path)), New: (.[0] | del(.Path))}) | select(.New.Version != .Old.Version)'
|
||||
}
|
||||
|
||||
# resolve_commits resolves a git ref for each version.
|
||||
@ -165,12 +528,11 @@ ignored_commit() {
|
||||
|
||||
# Generate a release log for a range of commits in a single repo.
|
||||
release_log() {
|
||||
setopt local_options BASH_REMATCH
|
||||
|
||||
local module="$1"
|
||||
local start="$2"
|
||||
local end="${3:-HEAD}"
|
||||
local repo="$(strip_version "$1")"
|
||||
local repo
|
||||
repo="$(strip_version "$1")"
|
||||
local dir
|
||||
if [[ "$module" == "github.com/ipfs/kubo" ]]; then
|
||||
dir="$ROOT_DIR"
|
||||
@ -178,28 +540,25 @@ release_log() {
|
||||
dir="$GOPATH/src/$repo"
|
||||
fi
|
||||
|
||||
local commit pr
|
||||
git -C "$dir" log \
|
||||
--format='tformat:%H %s' \
|
||||
--first-parent \
|
||||
"$start..$end" |
|
||||
while read commit subject; do
|
||||
# Skip commits that only touch ignored files.
|
||||
if ignored_commit "$dir" "$commit"; then
|
||||
continue
|
||||
fi
|
||||
local commit subject
|
||||
while read -r commit subject; do
|
||||
# Skip commits that only touch ignored files.
|
||||
if ignored_commit "$dir" "$commit"; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ "$subject" =~ '^Merge pull request #([0-9]+) from' ]]; then
|
||||
local prnum="${BASH_REMATCH[2]}"
|
||||
local desc="$(git -C "$dir" show --summary --format='tformat:%b' "$commit" | head -1)"
|
||||
printf -- "- %s (%s)\n" "$desc" "$(pr_link "$repo" "$prnum")"
|
||||
elif [[ "$subject" =~ '\(#([0-9]+)\)$' ]]; then
|
||||
local prnum="${BASH_REMATCH[2]}"
|
||||
printf -- "- %s (%s)\n" "$subject" "$(pr_link "$repo" "$prnum")"
|
||||
else
|
||||
printf -- "- %s\n" "$subject"
|
||||
fi
|
||||
done
|
||||
if [[ "$subject" =~ ^Merge\ pull\ request\ \#([0-9]+)\ from ]]; then
|
||||
local prnum="${BASH_REMATCH[1]}"
|
||||
local desc
|
||||
desc="$(git -C "$dir" show --summary --format='tformat:%b' "$commit" | head -1)"
|
||||
printf -- "- %s (%s)\n" "$desc" "$(pr_link "$repo" "$prnum")"
|
||||
elif [[ "$subject" =~ \(#([0-9]+)\)$ ]]; then
|
||||
local prnum="${BASH_REMATCH[1]}"
|
||||
printf -- "- %s (%s)\n" "$subject" "$(pr_link "$repo" "$prnum")"
|
||||
else
|
||||
printf -- "- %s\n" "$subject"
|
||||
fi
|
||||
done < <(git -C "$dir" log --format='tformat:%H %s' --first-parent "$start..$end")
|
||||
}
|
||||
|
||||
indent() {
|
||||
@ -211,7 +570,8 @@ mod_deps() {
|
||||
}
|
||||
|
||||
ensure() {
|
||||
local repo="$(strip_version "$1")"
|
||||
local repo
|
||||
repo="$(strip_version "$1")"
|
||||
local commit="$2"
|
||||
local rpath
|
||||
if [[ "$1" == "github.com/ipfs/kubo" ]]; then
|
||||
@ -232,14 +592,27 @@ ensure() {
|
||||
git -C "$rpath" rev-parse --verify "$commit" >/dev/null || return 1
|
||||
}
|
||||
|
||||
# Summarize stats, grouping by GitHub handle (with fallback to email for dedup)
|
||||
statsummary() {
|
||||
jq -s 'group_by(.Author)[] | {Author: .[0].Author, Commits: (. | length), Insertions: (map(.Insertions) | add), Deletions: (map(.Deletions) | add), Files: (map(.Files) | add)}' |
|
||||
jq '. + {Lines: (.Deletions + .Insertions)}'
|
||||
jq -s '
|
||||
# Group by GitHub handle if available, otherwise by email
|
||||
group_by(if .GitHub != "" then .GitHub else .Email end)[] |
|
||||
{
|
||||
# Use first non-empty GitHub handle, or fall back to Author name
|
||||
Author: .[0].Author,
|
||||
GitHub: (map(select(.GitHub != "")) | .[0].GitHub // ""),
|
||||
Email: .[0].Email,
|
||||
Commits: (. | length),
|
||||
Insertions: (map(.Insertions) | add),
|
||||
Deletions: (map(.Deletions) | add),
|
||||
Files: (map(.Files) | add)
|
||||
}
|
||||
' | jq '. + {Lines: (.Deletions + .Insertions)}'
|
||||
}
|
||||
|
||||
strip_version() {
|
||||
local repo="$1"
|
||||
if [[ "$repo" =~ '.*/v[0-9]+$' ]]; then
|
||||
if [[ "$repo" =~ .*/v[0-9]+$ ]]; then
|
||||
repo="$(dirname "$repo")"
|
||||
fi
|
||||
echo "$repo"
|
||||
@ -248,16 +621,24 @@ strip_version() {
|
||||
recursive_release_log() {
|
||||
local start="${1:-$(git tag -l | sort -V | grep -v -- '-rc' | grep 'v'| tail -n1)}"
|
||||
local end="${2:-$(git rev-parse HEAD)}"
|
||||
local repo_root="$(git rev-parse --show-toplevel)"
|
||||
local module="$(go list -m)"
|
||||
local dir="$(go list -m -f '{{.Dir}}')"
|
||||
local repo_root
|
||||
repo_root="$(git rev-parse --show-toplevel)"
|
||||
local module
|
||||
module="$(go list -m)"
|
||||
local dir
|
||||
dir="$(go list -m -f '{{.Dir}}')"
|
||||
|
||||
# Load cached GitHub handle mappings
|
||||
load_github_cache
|
||||
|
||||
# Kubo can be run from any directory, dependencies still use GOPATH
|
||||
|
||||
(
|
||||
local result=0
|
||||
local workspace="$(mktemp -d)"
|
||||
trap "$(printf 'rm -rf "%q"' "$workspace")" INT TERM EXIT
|
||||
local workspace
|
||||
workspace="$(mktemp -d)"
|
||||
# shellcheck disable=SC2064
|
||||
trap "rm -rf '$workspace'" INT TERM EXIT
|
||||
cd "$workspace"
|
||||
|
||||
echo "Computing old deps..." >&2
|
||||
@ -272,6 +653,9 @@ recursive_release_log() {
|
||||
|
||||
printf -- "Generating Changelog for %s %s..%s\n" "$module" "$start" "$end" >&2
|
||||
|
||||
# Pre-build GitHub mappings for main module
|
||||
build_github_mappings "$module" "$start" "$end"
|
||||
|
||||
echo "### 📝 Changelog"
|
||||
echo
|
||||
echo "<details><summary>Full Changelog</summary>"
|
||||
@ -282,24 +666,26 @@ recursive_release_log() {
|
||||
|
||||
statlog "$module" "$start" "$end" > statlog.json
|
||||
|
||||
dep_changes old_deps.json new_deps.json |
|
||||
local dep_module new new_ref old old_ref
|
||||
while read -r dep_module new new_ref old old_ref; do
|
||||
if ! ensure "$dep_module" "$new_ref"; then
|
||||
result=1
|
||||
local changelog="failed to fetch repo"
|
||||
else
|
||||
# Pre-build GitHub mappings for dependency
|
||||
build_github_mappings "$dep_module" "$old_ref" "$new_ref"
|
||||
statlog "$dep_module" "$old_ref" "$new_ref" >> statlog.json
|
||||
local changelog
|
||||
changelog="$(release_log "$dep_module" "$old_ref" "$new_ref")"
|
||||
fi
|
||||
if [[ -n "$changelog" ]]; then
|
||||
printf -- "- %s (%s -> %s):\n" "$dep_module" "$old" "$new"
|
||||
echo "$changelog" | indent
|
||||
fi
|
||||
done < <(dep_changes old_deps.json new_deps.json |
|
||||
jq --arg inc "$INCLUDE_REGEX" --arg exc "$EXCLUDE_REGEX" \
|
||||
'select(.Path | test($inc)) | select(.Path | test($exc) | not)' |
|
||||
# Compute changelogs
|
||||
jq -r '"\(.Path) \(.New.Version) \(.New.Ref) \(.Old.Version) \(.Old.Ref // "")"' |
|
||||
while read module new new_ref old old_ref; do
|
||||
if ! ensure "$module" "$new_ref"; then
|
||||
result=1
|
||||
local changelog="failed to fetch repo"
|
||||
else
|
||||
statlog "$module" "$old_ref" "$new_ref" >> statlog.json
|
||||
local changelog="$(release_log "$module" "$old_ref" "$new_ref")"
|
||||
fi
|
||||
if [[ -n "$changelog" ]]; then
|
||||
printf -- "- %s (%s -> %s):\n" "$module" "$old" "$new"
|
||||
echo "$changelog" | indent
|
||||
fi
|
||||
done
|
||||
jq -r '"\(.Path) \(.New.Version) \(.New.Ref) \(.Old.Version) \(.Old.Ref // "")"')
|
||||
|
||||
echo
|
||||
echo "</details>"
|
||||
@ -311,8 +697,18 @@ recursive_release_log() {
|
||||
echo "|-------------|---------|---------|---------------|"
|
||||
statsummary <statlog.json |
|
||||
jq -s 'sort_by(.Lines) | reverse | .[]' |
|
||||
jq -r '"| \(.Author) | \(.Commits) | +\(.Insertions)/-\(.Deletions) | \(.Files) |"'
|
||||
return "$status"
|
||||
jq -r '
|
||||
if .GitHub != "" then
|
||||
"| [@\(.GitHub)](https://github.com/\(.GitHub)) | \(.Commits) | +\(.Insertions)/-\(.Deletions) | \(.Files) |"
|
||||
else
|
||||
"| \(.Author) | \(.Commits) | +\(.Insertions)/-\(.Deletions) | \(.Files) |"
|
||||
end
|
||||
'
|
||||
|
||||
# Save cache before exiting
|
||||
save_github_cache
|
||||
|
||||
return "$result"
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@ -7,7 +7,7 @@
|
||||
# Run from ci to tag images based on the current branch or tag name.
|
||||
# A bit like dockerhub autobuild config, but somewhere we can version control it.
|
||||
#
|
||||
# The `docker-build` job builds the current commit in docker and tags it as ipfs/go-ipfs:wip
|
||||
# The `docker-build` job builds the current commit in docker and tags it as ipfs/kubo:wip
|
||||
#
|
||||
# Then the `docker-publish` job runs this script to decide what tag, if any,
|
||||
# to publish to dockerhub.
|
||||
@ -42,7 +42,7 @@ GIT_TAG=${4:-$(git describe --tags --exact-match || echo "")}
|
||||
DRY_RUN=${5:-false}
|
||||
|
||||
WIP_IMAGE_TAG=${WIP_IMAGE_TAG:-wip}
|
||||
IMAGE_NAME=${IMAGE_NAME:-ipfs/go-ipfs}
|
||||
IMAGE_NAME=${IMAGE_NAME:-ipfs/kubo}
|
||||
|
||||
pushTag () {
|
||||
local IMAGE_TAG=$1
|
||||
@ -63,7 +63,7 @@ if [[ $GIT_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+-rc ]]; then
|
||||
elif [[ $GIT_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
pushTag "$GIT_TAG"
|
||||
pushTag "latest"
|
||||
pushTag "release" # see: https://github.com/ipfs/go-ipfs/issues/3999#issuecomment-742228981
|
||||
pushTag "release" # see: https://github.com/ipfs/kubo/issues/3999#issuecomment-742228981
|
||||
|
||||
elif [[ $GIT_BRANCH =~ ^bifrost-.* ]]; then
|
||||
# sanitize the branch name since docker tags have stricter char limits than git branch names
|
||||
|
||||
@ -34,10 +34,10 @@ type RmBlocksOpts struct {
|
||||
// It returns a channel where objects of type RemovedBlock are placed, when
|
||||
// not using the Quiet option. Block removal is asynchronous and will
|
||||
// skip any pinned blocks.
|
||||
func RmBlocks(ctx context.Context, blocks bs.GCBlockstore, pins pin.Pinner, cids []cid.Cid, opts RmBlocksOpts) (<-chan interface{}, error) {
|
||||
func RmBlocks(ctx context.Context, blocks bs.GCBlockstore, pins pin.Pinner, cids []cid.Cid, opts RmBlocksOpts) (<-chan any, error) {
|
||||
// make the channel large enough to hold any result to avoid
|
||||
// blocking while holding the GCLock
|
||||
out := make(chan interface{}, len(cids))
|
||||
out := make(chan any, len(cids))
|
||||
go func() {
|
||||
defer close(out)
|
||||
|
||||
@ -75,7 +75,7 @@ func RmBlocks(ctx context.Context, blocks bs.GCBlockstore, pins pin.Pinner, cids
|
||||
// out channel, with an error which indicates that the Cid is pinned.
|
||||
// This function is used in RmBlocks to filter out any blocks which are not
|
||||
// to be removed (because they are pinned).
|
||||
func FilterPinned(ctx context.Context, pins pin.Pinner, out chan<- interface{}, cids []cid.Cid) []cid.Cid {
|
||||
func FilterPinned(ctx context.Context, pins pin.Pinner, out chan<- any, cids []cid.Cid) []cid.Cid {
|
||||
stillOkay := make([]cid.Cid, 0, len(cids))
|
||||
res, err := pins.CheckIfPinned(ctx, cids...)
|
||||
if err != nil {
|
||||
|
||||
@ -101,7 +101,7 @@ func (api *KeyAPI) List(ctx context.Context) ([]iface.Key, error) {
|
||||
var out struct {
|
||||
Keys []keyOutput
|
||||
}
|
||||
if err := api.core().Request("key/list").Exec(ctx, &out); err != nil {
|
||||
if err := api.core().Request("key/ls").Exec(ctx, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@ -18,10 +18,10 @@ type RequestBuilder interface {
|
||||
BodyBytes(body []byte) RequestBuilder
|
||||
Body(body io.Reader) RequestBuilder
|
||||
FileBody(body io.Reader) RequestBuilder
|
||||
Option(key string, value interface{}) RequestBuilder
|
||||
Option(key string, value any) RequestBuilder
|
||||
Header(name, value string) RequestBuilder
|
||||
Send(ctx context.Context) (*Response, error)
|
||||
Exec(ctx context.Context, res interface{}) error
|
||||
Exec(ctx context.Context, res any) error
|
||||
}
|
||||
|
||||
// encodedAbsolutePathVersion is the version from which the absolute path header in
|
||||
@ -83,7 +83,7 @@ func (r *requestBuilder) FileBody(body io.Reader) RequestBuilder {
|
||||
}
|
||||
|
||||
// Option sets the given option.
|
||||
func (r *requestBuilder) Option(key string, value interface{}) RequestBuilder {
|
||||
func (r *requestBuilder) Option(key string, value any) RequestBuilder {
|
||||
var s string
|
||||
switch v := value.(type) {
|
||||
case bool:
|
||||
@ -128,7 +128,7 @@ func (r *requestBuilder) Send(ctx context.Context) (*Response, error) {
|
||||
}
|
||||
|
||||
// Exec sends the request a request and decodes the response.
|
||||
func (r *requestBuilder) Exec(ctx context.Context, res interface{}) error {
|
||||
func (r *requestBuilder) Exec(ctx context.Context, res any) error {
|
||||
httpRes, err := r.Send(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@ -64,7 +64,7 @@ func (r *Response) Cancel() error {
|
||||
}
|
||||
|
||||
// Decode reads request body and decodes it as json.
|
||||
func (r *Response) decode(dec interface{}) error {
|
||||
func (r *Response) decode(dec any) error {
|
||||
if r.Error != nil {
|
||||
return r.Error
|
||||
}
|
||||
|
||||
@ -181,8 +181,8 @@ Headers.
|
||||
cmds.BoolOption(enableGCKwd, "Enable automatic periodic repo garbage collection"),
|
||||
cmds.BoolOption(adjustFDLimitKwd, "Check and raise file descriptor limits if needed").WithDefault(true),
|
||||
cmds.BoolOption(migrateKwd, "If true, assume yes at the migrate prompt. If false, assume no."),
|
||||
cmds.BoolOption(enablePubSubKwd, "DEPRECATED"),
|
||||
cmds.BoolOption(enableIPNSPubSubKwd, "Enable IPNS over pubsub. Implicitly enables pubsub, overrides Ipns.UsePubsub config."),
|
||||
cmds.BoolOption(enablePubSubKwd, "DEPRECATED CLI flag. Use Pubsub.Enabled config instead."),
|
||||
cmds.BoolOption(enableIPNSPubSubKwd, "DEPRECATED CLI flag. Use Ipns.UsePubsub config instead."),
|
||||
cmds.BoolOption(enableMultiplexKwd, "DEPRECATED"),
|
||||
cmds.StringOption(agentVersionSuffix, "Optional suffix to the AgentVersion presented by `ipfs id` and exposed via libp2p identify protocol."),
|
||||
|
||||
@ -397,10 +397,14 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
|
||||
fmt.Printf("PeerID: %s\n", cfg.Identity.PeerID)
|
||||
|
||||
if !psSet {
|
||||
if psSet {
|
||||
log.Error("The --enable-pubsub-experiment flag is deprecated. Use Pubsub.Enabled config option instead.")
|
||||
} else {
|
||||
pubsub = cfg.Pubsub.Enabled.WithDefault(false)
|
||||
}
|
||||
if !ipnsPsSet {
|
||||
if ipnsPsSet {
|
||||
log.Error("The --enable-namesys-pubsub flag is deprecated. Use Ipns.UsePubsub config option instead.")
|
||||
} else {
|
||||
ipnsps = cfg.Ipns.UsePubsub.WithDefault(false)
|
||||
}
|
||||
|
||||
@ -515,7 +519,7 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
}
|
||||
//nolint:staticcheck // intentionally checking deprecated fields
|
||||
if !cfg.Reprovider.Interval.IsDefault() || !cfg.Reprovider.Strategy.IsDefault() {
|
||||
log.Fatal("Deprecated configuration detected. Manually migrate 'Reprovider' fields to 'Provide': Reprovider.Strategy -> Provide.Strategy, Reprovider.Interval -> Provide.Interval. Remove 'Reprovider' from your config. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide")
|
||||
log.Fatal("Deprecated configuration detected. Manually migrate 'Reprovider' fields to 'Provide': Reprovider.Strategy -> Provide.Strategy, Reprovider.Interval -> Provide.DHT.Interval. Remove 'Reprovider' from your config. Documentation: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide")
|
||||
}
|
||||
// Check for deprecated "flat" strategy (should have been migrated to "all")
|
||||
if cfg.Provide.Strategy.WithDefault("") == "flat" {
|
||||
@ -883,23 +887,38 @@ func serveHTTPApi(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error
|
||||
return nil, fmt.Errorf("serveHTTPApi: ConstructNode() failed: %s", err)
|
||||
}
|
||||
|
||||
// Buffer channel to prevent deadlock when multiple servers write errors simultaneously
|
||||
errc := make(chan error, len(listeners))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Start all servers and wait for them to be ready before writing api file.
|
||||
// This prevents race conditions where external tools (like systemd path units)
|
||||
// see the file and try to connect before servers can accept connections.
|
||||
if len(listeners) > 0 {
|
||||
// Only add an api file if the API is running.
|
||||
readyChannels := make([]chan struct{}, len(listeners))
|
||||
for i, lis := range listeners {
|
||||
readyChannels[i] = make(chan struct{})
|
||||
ready := readyChannels[i]
|
||||
wg.Go(func() {
|
||||
errc <- corehttp.ServeWithReady(node, manet.NetListener(lis), ready, opts...)
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for all listeners to be ready or any to fail
|
||||
for _, ready := range readyChannels {
|
||||
select {
|
||||
case <-ready:
|
||||
// This listener is ready
|
||||
case err := <-errc:
|
||||
return nil, fmt.Errorf("serveHTTPApi: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := node.Repo.SetAPIAddr(rewriteMaddrToUseLocalhostIfItsAny(listeners[0].Multiaddr())); err != nil {
|
||||
return nil, fmt.Errorf("serveHTTPApi: SetAPIAddr() failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
errc := make(chan error)
|
||||
var wg sync.WaitGroup
|
||||
for _, apiLis := range listeners {
|
||||
wg.Add(1)
|
||||
go func(lis manet.Listener) {
|
||||
defer wg.Done()
|
||||
errc <- corehttp.Serve(node, manet.NetListener(lis), opts...)
|
||||
}(apiLis)
|
||||
}
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errc)
|
||||
@ -1058,26 +1077,42 @@ func serveHTTPGateway(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, e
|
||||
return nil, fmt.Errorf("serveHTTPGateway: ConstructNode() failed: %s", err)
|
||||
}
|
||||
|
||||
// Buffer channel to prevent deadlock when multiple servers write errors simultaneously
|
||||
errc := make(chan error, len(listeners))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Start all servers and wait for them to be ready before writing gateway file.
|
||||
// This prevents race conditions where external tools (like systemd path units)
|
||||
// see the file and try to connect before servers can accept connections.
|
||||
if len(listeners) > 0 {
|
||||
readyChannels := make([]chan struct{}, len(listeners))
|
||||
for i, lis := range listeners {
|
||||
readyChannels[i] = make(chan struct{})
|
||||
ready := readyChannels[i]
|
||||
wg.Go(func() {
|
||||
errc <- corehttp.ServeWithReady(node, manet.NetListener(lis), ready, opts...)
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for all listeners to be ready or any to fail
|
||||
for _, ready := range readyChannels {
|
||||
select {
|
||||
case <-ready:
|
||||
// This listener is ready
|
||||
case err := <-errc:
|
||||
return nil, fmt.Errorf("serveHTTPGateway: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
addr, err := manet.ToNetAddr(rewriteMaddrToUseLocalhostIfItsAny(listeners[0].Multiaddr()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("serveHTTPGateway: manet.ToIP() failed: %w", err)
|
||||
return nil, fmt.Errorf("serveHTTPGateway: manet.ToNetAddr() failed: %w", err)
|
||||
}
|
||||
if err := node.Repo.SetGatewayAddr(addr); err != nil {
|
||||
return nil, fmt.Errorf("serveHTTPGateway: SetGatewayAddr() failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
errc := make(chan error)
|
||||
var wg sync.WaitGroup
|
||||
for _, lis := range listeners {
|
||||
wg.Add(1)
|
||||
go func(lis manet.Listener) {
|
||||
defer wg.Done()
|
||||
errc <- corehttp.Serve(node, manet.NetListener(lis), opts...)
|
||||
}(lis)
|
||||
}
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errc)
|
||||
@ -1252,7 +1287,7 @@ func merge(cs ...<-chan error) <-chan error {
|
||||
|
||||
func YesNoPrompt(prompt string) bool {
|
||||
var s string
|
||||
for i := 0; i < 3; i++ {
|
||||
for range 3 {
|
||||
fmt.Printf("%s ", prompt)
|
||||
_, err := fmt.Scanf("%s", &s)
|
||||
if err != nil {
|
||||
|
||||
@ -18,7 +18,7 @@ var (
|
||||
|
||||
func makeResolver(t *testing.T, n uint8) *madns.Resolver {
|
||||
results := make([]net.IPAddr, n)
|
||||
for i := uint8(0); i < n; i++ {
|
||||
for i := range n {
|
||||
results[i] = net.IPAddr{IP: net.ParseIP(fmt.Sprintf("192.0.2.%d", i))}
|
||||
}
|
||||
|
||||
|
||||
@ -133,7 +133,7 @@ func applyProfiles(conf *config.Config, profiles string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, profile := range strings.Split(profiles, ",") {
|
||||
for profile := range strings.SplitSeq(profiles, ",") {
|
||||
transformer, ok := config.Profiles[profile]
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid configuration profile: %s", profile)
|
||||
|
||||
@ -8,6 +8,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -33,6 +34,7 @@ import (
|
||||
"github.com/ipfs/kubo/repo"
|
||||
"github.com/ipfs/kubo/repo/fsrepo"
|
||||
"github.com/ipfs/kubo/tracing"
|
||||
"github.com/libp2p/go-libp2p/gologshim"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
madns "github.com/multiformats/go-multiaddr-dns"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
@ -50,6 +52,17 @@ var (
|
||||
tracer trace.Tracer
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Set go-log's slog handler as the application-wide default.
|
||||
// This ensures all slog-based logging uses go-log's formatting.
|
||||
slog.SetDefault(slog.New(logging.SlogHandler()))
|
||||
|
||||
// Wire go-log's slog bridge to go-libp2p's gologshim.
|
||||
// This provides go-libp2p loggers with the "logger" attribute
|
||||
// for per-subsystem level control (e.g., `ipfs log level libp2p-swarm debug`).
|
||||
gologshim.SetDefaultHandler(logging.SlogHandler())
|
||||
}
|
||||
|
||||
// declared as a var for testing purposes.
|
||||
var dnsResolver = madns.DefaultResolver
|
||||
|
||||
@ -238,7 +251,7 @@ func apiAddrOption(req *cmds.Request) (ma.Multiaddr, error) {
|
||||
// multipart requests is %-encoded. Before this version, its sent raw.
|
||||
var encodedAbsolutePathVersion = semver.MustParse("0.23.0-dev")
|
||||
|
||||
func makeExecutor(req *cmds.Request, env interface{}) (cmds.Executor, error) {
|
||||
func makeExecutor(req *cmds.Request, env any) (cmds.Executor, error) {
|
||||
exe := tracingWrappedExecutor{cmds.NewExecutor(req.Root)}
|
||||
cctx := env.(*oldcmds.Context)
|
||||
|
||||
|
||||
@ -37,9 +37,7 @@ func (ih *IntrHandler) Close() error {
|
||||
func (ih *IntrHandler) Handle(handler func(count int, ih *IntrHandler), sigs ...os.Signal) {
|
||||
notify := make(chan os.Signal, 1)
|
||||
signal.Notify(notify, sigs...)
|
||||
ih.wg.Add(1)
|
||||
go func() {
|
||||
defer ih.wg.Done()
|
||||
ih.wg.Go(func() {
|
||||
defer signal.Stop(notify)
|
||||
|
||||
count := 0
|
||||
@ -52,7 +50,7 @@ func (ih *IntrHandler) Handle(handler func(count int, ih *IntrHandler), sigs ...
|
||||
handler(count, ih)
|
||||
}
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
func SetupInterruptHandler(ctx context.Context) (io.Closer, context.Context) {
|
||||
|
||||
3
cmd/ipfswatch/.gitignore
vendored
Normal file
3
cmd/ipfswatch/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
ipfswatch
|
||||
ipfswatch-test-cover
|
||||
ipfswatch.exe
|
||||
@ -9,6 +9,7 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"syscall"
|
||||
|
||||
commands "github.com/ipfs/kubo/commands"
|
||||
@ -17,6 +18,11 @@ import (
|
||||
coreapi "github.com/ipfs/kubo/core/coreapi"
|
||||
corehttp "github.com/ipfs/kubo/core/corehttp"
|
||||
"github.com/ipfs/kubo/misc/fsutil"
|
||||
"github.com/ipfs/kubo/plugin"
|
||||
pluginbadgerds "github.com/ipfs/kubo/plugin/plugins/badgerds"
|
||||
pluginflatfs "github.com/ipfs/kubo/plugin/plugins/flatfs"
|
||||
pluginlevelds "github.com/ipfs/kubo/plugin/plugins/levelds"
|
||||
pluginpebbleds "github.com/ipfs/kubo/plugin/plugins/pebbleds"
|
||||
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
|
||||
|
||||
fsnotify "github.com/fsnotify/fsnotify"
|
||||
@ -60,6 +66,18 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
func loadDatastorePlugins(plugins []plugin.Plugin) error {
|
||||
for _, pl := range plugins {
|
||||
if pl, ok := pl.(plugin.PluginDatastore); ok {
|
||||
err := fsrepo.AddDatastoreConfigHandler(pl.DatastoreTypeName(), pl.DatastoreConfigParser())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func run(ipfsPath, watchPath string) error {
|
||||
log.Printf("running IPFSWatch on '%s' using repo at '%s'...", watchPath, ipfsPath)
|
||||
|
||||
@ -77,6 +95,15 @@ func run(ipfsPath, watchPath string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = loadDatastorePlugins(slices.Concat(
|
||||
pluginbadgerds.Plugins,
|
||||
pluginflatfs.Plugins,
|
||||
pluginlevelds.Plugins,
|
||||
pluginpebbleds.Plugins,
|
||||
)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := fsrepo.Open(ipfsPath)
|
||||
if err != nil {
|
||||
// TODO handle case: daemon running
|
||||
@ -123,6 +150,7 @@ func run(ipfsPath, watchPath string) error {
|
||||
log.Printf("received event: %s", e)
|
||||
isDir, err := IsDirectory(e.Name)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
continue
|
||||
}
|
||||
switch e.Op {
|
||||
@ -193,7 +221,7 @@ func addTree(w *fsnotify.Watcher, root string) error {
|
||||
return filepath.SkipDir
|
||||
case isDir:
|
||||
log.Println(path)
|
||||
if err := w.Add(path); err != nil {
|
||||
if err = w.Add(path); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
@ -206,7 +234,10 @@ func addTree(w *fsnotify.Watcher, root string) error {
|
||||
|
||||
func IsDirectory(path string) (bool, error) {
|
||||
fileInfo, err := os.Stat(path)
|
||||
return fileInfo.IsDir(), err
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return fileInfo.IsDir(), nil
|
||||
}
|
||||
|
||||
func IsHidden(path string) bool {
|
||||
|
||||
@ -11,7 +11,7 @@ type ReqLogEntry struct {
|
||||
EndTime time.Time
|
||||
Active bool
|
||||
Command string
|
||||
Options map[string]interface{}
|
||||
Options map[string]any
|
||||
Args []string
|
||||
ID int
|
||||
|
||||
|
||||
@ -2,7 +2,7 @@ package config
|
||||
|
||||
import (
|
||||
"maps"
|
||||
"math/rand"
|
||||
"math/rand/v2"
|
||||
"strings"
|
||||
|
||||
"github.com/ipfs/boxo/autoconf"
|
||||
@ -70,7 +70,7 @@ func selectRandomResolver(resolvers []string) string {
|
||||
if len(resolvers) == 0 {
|
||||
return ""
|
||||
}
|
||||
return resolvers[rand.Intn(len(resolvers))]
|
||||
return resolvers[rand.IntN(len(resolvers))]
|
||||
}
|
||||
|
||||
// DNSResolversWithAutoConf returns DNS resolvers with "auto" values replaced by autoconf values
|
||||
|
||||
@ -3,6 +3,7 @@ package config
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/ipfs/boxo/autoconf"
|
||||
@ -82,12 +83,9 @@ func validateAutoConfDisabled(cfg *Config) error {
|
||||
var errors []string
|
||||
|
||||
// Check Bootstrap
|
||||
for _, peer := range cfg.Bootstrap {
|
||||
if peer == AutoPlaceholder {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "Bootstrap contains 'auto' but AutoConf.Enabled=false")
|
||||
break
|
||||
}
|
||||
if slices.Contains(cfg.Bootstrap, AutoPlaceholder) {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "Bootstrap contains 'auto' but AutoConf.Enabled=false")
|
||||
}
|
||||
|
||||
// Check DNS.Resolvers
|
||||
@ -102,21 +100,15 @@ func validateAutoConfDisabled(cfg *Config) error {
|
||||
}
|
||||
|
||||
// Check Routing.DelegatedRouters
|
||||
for _, router := range cfg.Routing.DelegatedRouters {
|
||||
if router == AutoPlaceholder {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "Routing.DelegatedRouters contains 'auto' but AutoConf.Enabled=false")
|
||||
break
|
||||
}
|
||||
if slices.Contains(cfg.Routing.DelegatedRouters, AutoPlaceholder) {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "Routing.DelegatedRouters contains 'auto' but AutoConf.Enabled=false")
|
||||
}
|
||||
|
||||
// Check Ipns.DelegatedPublishers
|
||||
for _, publisher := range cfg.Ipns.DelegatedPublishers {
|
||||
if publisher == AutoPlaceholder {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "Ipns.DelegatedPublishers contains 'auto' but AutoConf.Enabled=false")
|
||||
break
|
||||
}
|
||||
if slices.Contains(cfg.Ipns.DelegatedPublishers, AutoPlaceholder) {
|
||||
hasAutoValues = true
|
||||
errors = append(errors, "Ipns.DelegatedPublishers contains 'auto' but AutoConf.Enabled=false")
|
||||
}
|
||||
|
||||
// Log all errors
|
||||
|
||||
@ -84,5 +84,5 @@ type AutoNATThrottleConfig struct {
|
||||
// global/peer dialback limits.
|
||||
//
|
||||
// When unset, this defaults to 1 minute.
|
||||
Interval OptionalDuration `json:",omitempty"`
|
||||
Interval OptionalDuration
|
||||
}
|
||||
|
||||
@ -16,6 +16,13 @@ type AutoTLS struct {
|
||||
// Optional, controls if Kubo should add /tls/sni/.../ws listener to every /tcp port if no explicit /ws is defined in Addresses.Swarm
|
||||
AutoWSS Flag `json:",omitempty"`
|
||||
|
||||
// Optional, controls whether to skip network DNS lookups for p2p-forge domains.
|
||||
// Applies to resolution via DNS.Resolvers, including /dns* multiaddrs in go-libp2p.
|
||||
// When enabled (default), A/AAAA queries for *.libp2p.direct are resolved
|
||||
// locally by parsing the IP directly from the hostname, avoiding network I/O.
|
||||
// Set to false to always use network DNS (useful for debugging).
|
||||
SkipDNSLookup Flag `json:",omitempty"`
|
||||
|
||||
// Optional override of the parent domain that will be used
|
||||
DomainSuffix *OptionalString `json:",omitempty"`
|
||||
|
||||
@ -42,5 +49,6 @@ const (
|
||||
DefaultCAEndpoint = p2pforge.DefaultCAEndpoint
|
||||
DefaultAutoWSS = true // requires AutoTLS.Enabled
|
||||
DefaultAutoTLSShortAddrs = true // requires AutoTLS.Enabled
|
||||
DefaultAutoTLSSkipDNSLookup = true // skip network DNS for p2p-forge domains
|
||||
DefaultAutoTLSRegistrationDelay = 1 * time.Hour
|
||||
)
|
||||
|
||||
@ -47,7 +47,7 @@ type Config struct {
|
||||
|
||||
Internal Internal // experimental/unstable options
|
||||
|
||||
Bitswap Bitswap `json:",omitempty"`
|
||||
Bitswap Bitswap
|
||||
}
|
||||
|
||||
const (
|
||||
@ -106,7 +106,7 @@ func Filename(configroot, userConfigFile string) (string, error) {
|
||||
}
|
||||
|
||||
// HumanOutput gets a config value ready for printing.
|
||||
func HumanOutput(value interface{}) ([]byte, error) {
|
||||
func HumanOutput(value any) ([]byte, error) {
|
||||
s, ok := value.(string)
|
||||
if ok {
|
||||
return []byte(strings.Trim(s, "\n")), nil
|
||||
@ -115,12 +115,12 @@ func HumanOutput(value interface{}) ([]byte, error) {
|
||||
}
|
||||
|
||||
// Marshal configuration with JSON.
|
||||
func Marshal(value interface{}) ([]byte, error) {
|
||||
func Marshal(value any) ([]byte, error) {
|
||||
// need to prettyprint, hence MarshalIndent, instead of Encoder
|
||||
return json.MarshalIndent(value, "", " ")
|
||||
}
|
||||
|
||||
func FromMap(v map[string]interface{}) (*Config, error) {
|
||||
func FromMap(v map[string]any) (*Config, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := json.NewEncoder(buf).Encode(v); err != nil {
|
||||
return nil, err
|
||||
@ -132,12 +132,12 @@ func FromMap(v map[string]interface{}) (*Config, error) {
|
||||
return &conf, nil
|
||||
}
|
||||
|
||||
func ToMap(conf *Config) (map[string]interface{}, error) {
|
||||
func ToMap(conf *Config) (map[string]any, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := json.NewEncoder(buf).Encode(conf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var m map[string]interface{}
|
||||
var m map[string]any
|
||||
if err := json.NewDecoder(buf).Decode(&m); err != nil {
|
||||
return nil, fmt.Errorf("failure to decode config: %w", err)
|
||||
}
|
||||
@ -147,14 +147,14 @@ func ToMap(conf *Config) (map[string]interface{}, error) {
|
||||
// Convert config to a map, without using encoding/json, since
|
||||
// zero/empty/'omitempty' fields are excluded by encoding/json during
|
||||
// marshaling.
|
||||
func ReflectToMap(conf interface{}) interface{} {
|
||||
func ReflectToMap(conf any) any {
|
||||
v := reflect.ValueOf(conf)
|
||||
if !v.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle pointer type
|
||||
if v.Kind() == reflect.Ptr {
|
||||
if v.Kind() == reflect.Pointer {
|
||||
if v.IsNil() {
|
||||
// Create a zero value of the pointer's element type
|
||||
elemType := v.Type().Elem()
|
||||
@ -166,7 +166,7 @@ func ReflectToMap(conf interface{}) interface{} {
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Struct:
|
||||
result := make(map[string]interface{})
|
||||
result := make(map[string]any)
|
||||
t := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
field := v.Field(i)
|
||||
@ -178,7 +178,7 @@ func ReflectToMap(conf interface{}) interface{} {
|
||||
return result
|
||||
|
||||
case reflect.Map:
|
||||
result := make(map[string]interface{})
|
||||
result := make(map[string]any)
|
||||
iter := v.MapRange()
|
||||
for iter.Next() {
|
||||
key := iter.Key()
|
||||
@ -194,7 +194,7 @@ func ReflectToMap(conf interface{}) interface{} {
|
||||
return result
|
||||
|
||||
case reflect.Slice, reflect.Array:
|
||||
result := make([]interface{}, v.Len())
|
||||
result := make([]any, v.Len())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
result[i] = ReflectToMap(v.Index(i).Interface())
|
||||
}
|
||||
@ -234,11 +234,11 @@ func CheckKey(key string) error {
|
||||
|
||||
// Parse the key and verify it's presence in the map.
|
||||
var ok bool
|
||||
var mapCursor map[string]interface{}
|
||||
var mapCursor map[string]any
|
||||
|
||||
parts := strings.Split(key, ".")
|
||||
for i, part := range parts {
|
||||
mapCursor, ok = cursor.(map[string]interface{})
|
||||
mapCursor, ok = cursor.(map[string]any)
|
||||
if !ok {
|
||||
if cursor == nil {
|
||||
return nil
|
||||
|
||||
@ -32,7 +32,7 @@ func TestReflectToMap(t *testing.T) {
|
||||
// Helper function to create a test config with various field types
|
||||
reflectedConfig := ReflectToMap(new(Config))
|
||||
|
||||
mapConfig, ok := reflectedConfig.(map[string]interface{})
|
||||
mapConfig, ok := reflectedConfig.(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("Config didn't convert to map")
|
||||
}
|
||||
@ -42,7 +42,7 @@ func TestReflectToMap(t *testing.T) {
|
||||
t.Fatal("Identity field not found")
|
||||
}
|
||||
|
||||
mapIdentity, ok := reflectedIdentity.(map[string]interface{})
|
||||
mapIdentity, ok := reflectedIdentity.(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("Identity field didn't convert to map")
|
||||
}
|
||||
@ -70,7 +70,7 @@ func TestReflectToMap(t *testing.T) {
|
||||
if !ok {
|
||||
t.Fatal("Bootstrap field not found in config")
|
||||
}
|
||||
bootstrap, ok := reflectedBootstrap.([]interface{})
|
||||
bootstrap, ok := reflectedBootstrap.([]any)
|
||||
if !ok {
|
||||
t.Fatal("Bootstrap field didn't convert to []string")
|
||||
}
|
||||
@ -82,7 +82,7 @@ func TestReflectToMap(t *testing.T) {
|
||||
if !ok {
|
||||
t.Fatal("Datastore field not found in config")
|
||||
}
|
||||
datastore, ok := reflectedDatastore.(map[string]interface{})
|
||||
datastore, ok := reflectedDatastore.(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("Datastore field didn't convert to map")
|
||||
}
|
||||
@ -107,7 +107,7 @@ func TestReflectToMap(t *testing.T) {
|
||||
if !ok {
|
||||
t.Fatal("DNS field not found in config")
|
||||
}
|
||||
DNS, ok := reflectedDNS.(map[string]interface{})
|
||||
DNS, ok := reflectedDNS.(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("DNS field didn't convert to map")
|
||||
}
|
||||
@ -116,12 +116,12 @@ func TestReflectToMap(t *testing.T) {
|
||||
t.Fatal("Resolvers field not found in DNS")
|
||||
}
|
||||
// Test map field
|
||||
if _, ok := reflectedResolvers.(map[string]interface{}); !ok {
|
||||
if _, ok := reflectedResolvers.(map[string]any); !ok {
|
||||
t.Fatal("Resolvers field didn't convert to map")
|
||||
}
|
||||
|
||||
// Test pointer field
|
||||
if _, ok := DNS["MaxCacheTTL"].(map[string]interface{}); !ok {
|
||||
if _, ok := DNS["MaxCacheTTL"].(map[string]any); !ok {
|
||||
// Since OptionalDuration only field is private, we cannot test it
|
||||
t.Fatal("MaxCacheTTL field didn't convert to map")
|
||||
}
|
||||
|
||||
@ -32,12 +32,12 @@ type Datastore struct {
|
||||
NoSync bool `json:",omitempty"`
|
||||
Params *json.RawMessage `json:",omitempty"`
|
||||
|
||||
Spec map[string]interface{}
|
||||
Spec map[string]any
|
||||
|
||||
HashOnRead bool
|
||||
BloomFilterSize int
|
||||
BlockKeyCacheSize OptionalInteger `json:",omitempty"`
|
||||
WriteThrough Flag `json:",omitempty"`
|
||||
BlockKeyCacheSize OptionalInteger
|
||||
WriteThrough Flag `json:",omitempty"`
|
||||
}
|
||||
|
||||
// DataStorePath returns the default data store path given a configuration root
|
||||
|
||||
@ -8,12 +8,15 @@ const (
|
||||
DefaultInlineDNSLink = false
|
||||
DefaultDeserializedResponses = true
|
||||
DefaultDisableHTMLErrors = false
|
||||
DefaultExposeRoutingAPI = false
|
||||
DefaultExposeRoutingAPI = true
|
||||
DefaultDiagnosticServiceURL = "https://check.ipfs.network"
|
||||
DefaultAllowCodecConversion = false
|
||||
|
||||
// Gateway limit defaults from boxo
|
||||
DefaultRetrievalTimeout = gateway.DefaultRetrievalTimeout
|
||||
DefaultMaxConcurrentRequests = gateway.DefaultMaxConcurrentRequests
|
||||
DefaultRetrievalTimeout = gateway.DefaultRetrievalTimeout
|
||||
DefaultMaxRequestDuration = gateway.DefaultMaxRequestDuration
|
||||
DefaultMaxConcurrentRequests = gateway.DefaultMaxConcurrentRequests
|
||||
DefaultMaxRangeRequestFileSize = 0 // 0 means no limit
|
||||
)
|
||||
|
||||
type GatewaySpec struct {
|
||||
@ -71,6 +74,12 @@ type Gateway struct {
|
||||
// be overridden per FQDN in PublicGateways.
|
||||
DeserializedResponses Flag
|
||||
|
||||
// AllowCodecConversion enables automatic conversion between codecs when
|
||||
// the requested format differs from the block's native codec (e.g.,
|
||||
// converting dag-pb or dag-cbor to dag-json). When disabled, the gateway
|
||||
// returns 406 Not Acceptable for codec mismatches per IPIP-524.
|
||||
AllowCodecConversion Flag
|
||||
|
||||
// DisableHTMLErrors disables pretty HTML pages when an error occurs. Instead, a `text/plain`
|
||||
// page will be sent with the raw error message.
|
||||
DisableHTMLErrors Flag
|
||||
@ -95,11 +104,25 @@ type Gateway struct {
|
||||
// A value of 0 disables this timeout.
|
||||
RetrievalTimeout *OptionalDuration `json:",omitempty"`
|
||||
|
||||
// MaxRequestDuration is an absolute deadline for the entire request.
|
||||
// Unlike RetrievalTimeout (which resets on each data write and catches
|
||||
// stalled transfers), this is a hard limit on the total time a request
|
||||
// can take. Returns 504 Gateway Timeout when exceeded.
|
||||
// This protects the gateway from edge cases and slow client attacks.
|
||||
// A value of 0 uses the default (1 hour).
|
||||
MaxRequestDuration *OptionalDuration `json:",omitempty"`
|
||||
|
||||
// MaxConcurrentRequests limits concurrent HTTP requests handled by the gateway.
|
||||
// Requests beyond this limit receive 429 Too Many Requests with Retry-After header.
|
||||
// A value of 0 disables the limit.
|
||||
MaxConcurrentRequests *OptionalInteger `json:",omitempty"`
|
||||
|
||||
// MaxRangeRequestFileSize limits the maximum file size for HTTP range requests.
|
||||
// Range requests for files larger than this limit return 501 Not Implemented.
|
||||
// This protects against CDN issues with large file range requests and prevents
|
||||
// excessive bandwidth consumption. A value of 0 disables the limit.
|
||||
MaxRangeRequestFileSize *OptionalBytes `json:",omitempty"`
|
||||
|
||||
// DiagnosticServiceURL is the URL for a service to diagnose CID retrievability issues.
|
||||
// When the gateway returns a 504 Gateway Timeout error, an "Inspect retrievability of CID"
|
||||
// button will be shown that links to this service with the CID appended as ?cid=<CID-to-diagnose>.
|
||||
|
||||
114
config/import.go
114
config/import.go
@ -2,11 +2,13 @@ package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
chunk "github.com/ipfs/boxo/chunker"
|
||||
"github.com/ipfs/boxo/ipld/unixfs/importer/helpers"
|
||||
"github.com/ipfs/boxo/ipld/unixfs/io"
|
||||
uio "github.com/ipfs/boxo/ipld/unixfs/io"
|
||||
"github.com/ipfs/boxo/verifcid"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
)
|
||||
@ -16,8 +18,10 @@ const (
|
||||
DefaultUnixFSRawLeaves = false
|
||||
DefaultUnixFSChunker = "size-262144"
|
||||
DefaultHashFunction = "sha2-256"
|
||||
DefaultFastProvideRoot = true
|
||||
DefaultFastProvideWait = false
|
||||
|
||||
DefaultUnixFSHAMTDirectorySizeThreshold = "256KiB" // https://github.com/ipfs/boxo/blob/6c5a07602aed248acc86598f30ab61923a54a83e/ipld/unixfs/io/directory.go#L26
|
||||
DefaultUnixFSHAMTDirectorySizeThreshold = 262144 // 256KiB - https://github.com/ipfs/boxo/blob/6c5a07602aed248acc86598f30ab61923a54a83e/ipld/unixfs/io/directory.go#L26
|
||||
|
||||
// DefaultBatchMaxNodes controls the maximum number of nodes in a
|
||||
// write-batch. The total size of the batch is limited by
|
||||
@ -27,27 +31,44 @@ const (
|
||||
// write-batch. The total size of the batch is limited by
|
||||
// BatchMaxnodes and BatchMaxSize.
|
||||
DefaultBatchMaxSize = 100 << 20 // 20MiB
|
||||
|
||||
// HAMTSizeEstimation values for Import.UnixFSHAMTDirectorySizeEstimation
|
||||
HAMTSizeEstimationLinks = "links" // legacy: estimate using link names + CID byte lengths (default)
|
||||
HAMTSizeEstimationBlock = "block" // full serialized dag-pb block size
|
||||
HAMTSizeEstimationDisabled = "disabled" // disable HAMT sharding entirely
|
||||
|
||||
// DAGLayout values for Import.UnixFSDAGLayout
|
||||
DAGLayoutBalanced = "balanced" // balanced DAG layout (default)
|
||||
DAGLayoutTrickle = "trickle" // trickle DAG layout
|
||||
|
||||
DefaultUnixFSHAMTDirectorySizeEstimation = HAMTSizeEstimationLinks // legacy behavior
|
||||
DefaultUnixFSDAGLayout = DAGLayoutBalanced // balanced DAG layout
|
||||
DefaultUnixFSIncludeEmptyDirs = true // include empty directories
|
||||
)
|
||||
|
||||
var (
|
||||
DefaultUnixFSFileMaxLinks = int64(helpers.DefaultLinksPerBlock)
|
||||
DefaultUnixFSDirectoryMaxLinks = int64(0)
|
||||
DefaultUnixFSHAMTDirectoryMaxFanout = int64(io.DefaultShardWidth)
|
||||
DefaultUnixFSHAMTDirectoryMaxFanout = int64(uio.DefaultShardWidth)
|
||||
)
|
||||
|
||||
// Import configures the default options for ingesting data. This affects commands
|
||||
// that ingest data, such as 'ipfs add', 'ipfs dag put, 'ipfs block put', 'ipfs files write'.
|
||||
type Import struct {
|
||||
CidVersion OptionalInteger
|
||||
UnixFSRawLeaves Flag
|
||||
UnixFSChunker OptionalString
|
||||
HashFunction OptionalString
|
||||
UnixFSFileMaxLinks OptionalInteger
|
||||
UnixFSDirectoryMaxLinks OptionalInteger
|
||||
UnixFSHAMTDirectoryMaxFanout OptionalInteger
|
||||
UnixFSHAMTDirectorySizeThreshold OptionalString
|
||||
BatchMaxNodes OptionalInteger
|
||||
BatchMaxSize OptionalInteger
|
||||
CidVersion OptionalInteger
|
||||
UnixFSRawLeaves Flag
|
||||
UnixFSChunker OptionalString
|
||||
HashFunction OptionalString
|
||||
UnixFSFileMaxLinks OptionalInteger
|
||||
UnixFSDirectoryMaxLinks OptionalInteger
|
||||
UnixFSHAMTDirectoryMaxFanout OptionalInteger
|
||||
UnixFSHAMTDirectorySizeThreshold OptionalBytes
|
||||
UnixFSHAMTDirectorySizeEstimation OptionalString // "links", "block", or "disabled"
|
||||
UnixFSDAGLayout OptionalString // "balanced" or "trickle"
|
||||
BatchMaxNodes OptionalInteger
|
||||
BatchMaxSize OptionalInteger
|
||||
FastProvideRoot Flag
|
||||
FastProvideWait Flag
|
||||
}
|
||||
|
||||
// ValidateImportConfig validates the Import configuration according to UnixFS spec requirements.
|
||||
@ -125,6 +146,30 @@ func ValidateImportConfig(cfg *Import) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Validate UnixFSHAMTDirectorySizeEstimation
|
||||
if !cfg.UnixFSHAMTDirectorySizeEstimation.IsDefault() {
|
||||
est := cfg.UnixFSHAMTDirectorySizeEstimation.WithDefault(DefaultUnixFSHAMTDirectorySizeEstimation)
|
||||
switch est {
|
||||
case HAMTSizeEstimationLinks, HAMTSizeEstimationBlock, HAMTSizeEstimationDisabled:
|
||||
// valid
|
||||
default:
|
||||
return fmt.Errorf("Import.UnixFSHAMTDirectorySizeEstimation must be %q, %q, or %q, got %q",
|
||||
HAMTSizeEstimationLinks, HAMTSizeEstimationBlock, HAMTSizeEstimationDisabled, est)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate UnixFSDAGLayout
|
||||
if !cfg.UnixFSDAGLayout.IsDefault() {
|
||||
layout := cfg.UnixFSDAGLayout.WithDefault(DefaultUnixFSDAGLayout)
|
||||
switch layout {
|
||||
case DAGLayoutBalanced, DAGLayoutTrickle:
|
||||
// valid
|
||||
default:
|
||||
return fmt.Errorf("Import.UnixFSDAGLayout must be %q or %q, got %q",
|
||||
DAGLayoutBalanced, DAGLayoutTrickle, layout)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -140,8 +185,7 @@ func isValidChunker(chunker string) bool {
|
||||
}
|
||||
|
||||
// Check for size-<bytes> format
|
||||
if strings.HasPrefix(chunker, "size-") {
|
||||
sizeStr := strings.TrimPrefix(chunker, "size-")
|
||||
if sizeStr, ok := strings.CutPrefix(chunker, "size-"); ok {
|
||||
if sizeStr == "" {
|
||||
return false
|
||||
}
|
||||
@ -163,7 +207,7 @@ func isValidChunker(chunker string) bool {
|
||||
|
||||
// Parse and validate min, avg, max values
|
||||
values := make([]int, 3)
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := range 3 {
|
||||
val, err := strconv.Atoi(parts[i+1])
|
||||
if err != nil {
|
||||
return false
|
||||
@ -178,3 +222,41 @@ func isValidChunker(chunker string) bool {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// HAMTSizeEstimationMode returns the boxo SizeEstimationMode based on the config value.
|
||||
func (i *Import) HAMTSizeEstimationMode() uio.SizeEstimationMode {
|
||||
switch i.UnixFSHAMTDirectorySizeEstimation.WithDefault(DefaultUnixFSHAMTDirectorySizeEstimation) {
|
||||
case HAMTSizeEstimationLinks:
|
||||
return uio.SizeEstimationLinks
|
||||
case HAMTSizeEstimationBlock:
|
||||
return uio.SizeEstimationBlock
|
||||
case HAMTSizeEstimationDisabled:
|
||||
return uio.SizeEstimationDisabled
|
||||
default:
|
||||
return uio.SizeEstimationLinks
|
||||
}
|
||||
}
|
||||
|
||||
// UnixFSSplitterFunc returns a SplitterGen function based on Import.UnixFSChunker.
|
||||
// The returned function creates a Splitter for the configured chunking strategy.
|
||||
// The chunker string is parsed once when this method is called, not on each use.
|
||||
func (i *Import) UnixFSSplitterFunc() chunk.SplitterGen {
|
||||
chunkerStr := i.UnixFSChunker.WithDefault(DefaultUnixFSChunker)
|
||||
|
||||
// Parse size-based chunker (most common case) and return optimized generator
|
||||
if sizeStr, ok := strings.CutPrefix(chunkerStr, "size-"); ok {
|
||||
if size, err := strconv.ParseInt(sizeStr, 10, 64); err == nil && size > 0 {
|
||||
return chunk.SizeSplitterGen(size)
|
||||
}
|
||||
}
|
||||
|
||||
// For other chunker types (rabin, buzhash) or invalid config,
|
||||
// fall back to parsing per-use (these are rare cases)
|
||||
return func(r io.Reader) chunk.Splitter {
|
||||
s, err := chunk.FromString(r, chunkerStr)
|
||||
if err != nil {
|
||||
return chunk.DefaultSplitter(r)
|
||||
}
|
||||
return s
|
||||
}
|
||||
}
|
||||
|
||||
@ -4,6 +4,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/boxo/ipld/unixfs/io"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
@ -406,3 +407,104 @@ func TestIsPowerOfTwo(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateImportConfig_HAMTSizeEstimation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
value string
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{name: "valid links", value: HAMTSizeEstimationLinks, wantErr: false},
|
||||
{name: "valid block", value: HAMTSizeEstimationBlock, wantErr: false},
|
||||
{name: "valid disabled", value: HAMTSizeEstimationDisabled, wantErr: false},
|
||||
{name: "invalid unknown", value: "unknown", wantErr: true, errMsg: "must be"},
|
||||
{name: "invalid empty", value: "", wantErr: true, errMsg: "must be"},
|
||||
{name: "invalid typo", value: "link", wantErr: true, errMsg: "must be"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &Import{
|
||||
UnixFSHAMTDirectorySizeEstimation: *NewOptionalString(tt.value),
|
||||
}
|
||||
|
||||
err := ValidateImportConfig(cfg)
|
||||
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Errorf("expected error for value=%q, got nil", tt.value)
|
||||
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
|
||||
t.Errorf("error = %v, want error containing %q", err, tt.errMsg)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error for value=%q: %v", tt.value, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateImportConfig_DAGLayout(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
value string
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{name: "valid balanced", value: DAGLayoutBalanced, wantErr: false},
|
||||
{name: "valid trickle", value: DAGLayoutTrickle, wantErr: false},
|
||||
{name: "invalid unknown", value: "unknown", wantErr: true, errMsg: "must be"},
|
||||
{name: "invalid empty", value: "", wantErr: true, errMsg: "must be"},
|
||||
{name: "invalid flat", value: "flat", wantErr: true, errMsg: "must be"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &Import{
|
||||
UnixFSDAGLayout: *NewOptionalString(tt.value),
|
||||
}
|
||||
|
||||
err := ValidateImportConfig(cfg)
|
||||
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Errorf("expected error for value=%q, got nil", tt.value)
|
||||
} else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) {
|
||||
t.Errorf("error = %v, want error containing %q", err, tt.errMsg)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error for value=%q: %v", tt.value, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestImport_HAMTSizeEstimationMode(t *testing.T) {
|
||||
tests := []struct {
|
||||
cfg string
|
||||
want io.SizeEstimationMode
|
||||
}{
|
||||
{HAMTSizeEstimationLinks, io.SizeEstimationLinks},
|
||||
{HAMTSizeEstimationBlock, io.SizeEstimationBlock},
|
||||
{HAMTSizeEstimationDisabled, io.SizeEstimationDisabled},
|
||||
{"", io.SizeEstimationLinks}, // default (unset returns default)
|
||||
{"unknown", io.SizeEstimationLinks}, // fallback to default
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.cfg, func(t *testing.T) {
|
||||
var imp Import
|
||||
if tt.cfg != "" {
|
||||
imp.UnixFSHAMTDirectorySizeEstimation = *NewOptionalString(tt.cfg)
|
||||
}
|
||||
got := imp.HAMTSizeEstimationMode()
|
||||
if got != tt.want {
|
||||
t.Errorf("Import.HAMTSizeEstimationMode() with %q = %v, want %v", tt.cfg, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -130,8 +130,8 @@ func DefaultDatastoreConfig() Datastore {
|
||||
}
|
||||
}
|
||||
|
||||
func pebbleSpec() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
func pebbleSpec() map[string]any {
|
||||
return map[string]any{
|
||||
"type": "pebbleds",
|
||||
"prefix": "pebble.datastore",
|
||||
"path": "pebbleds",
|
||||
@ -139,11 +139,11 @@ func pebbleSpec() map[string]interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
func pebbleSpecMeasure() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
func pebbleSpecMeasure() map[string]any {
|
||||
return map[string]any{
|
||||
"type": "measure",
|
||||
"prefix": "pebble.datastore",
|
||||
"child": map[string]interface{}{
|
||||
"child": map[string]any{
|
||||
"formatMajorVersion": int(pebble.FormatNewest),
|
||||
"type": "pebbleds",
|
||||
"path": "pebbleds",
|
||||
@ -151,8 +151,8 @@ func pebbleSpecMeasure() map[string]interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
func badgerSpec() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
func badgerSpec() map[string]any {
|
||||
return map[string]any{
|
||||
"type": "badgerds",
|
||||
"prefix": "badger.datastore",
|
||||
"path": "badgerds",
|
||||
@ -161,11 +161,11 @@ func badgerSpec() map[string]interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
func badgerSpecMeasure() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
func badgerSpecMeasure() map[string]any {
|
||||
return map[string]any{
|
||||
"type": "measure",
|
||||
"prefix": "badger.datastore",
|
||||
"child": map[string]interface{}{
|
||||
"child": map[string]any{
|
||||
"type": "badgerds",
|
||||
"path": "badgerds",
|
||||
"syncWrites": false,
|
||||
@ -174,11 +174,11 @@ func badgerSpecMeasure() map[string]interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
func flatfsSpec() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
func flatfsSpec() map[string]any {
|
||||
return map[string]any{
|
||||
"type": "mount",
|
||||
"mounts": []interface{}{
|
||||
map[string]interface{}{
|
||||
"mounts": []any{
|
||||
map[string]any{
|
||||
"mountpoint": "/blocks",
|
||||
"type": "flatfs",
|
||||
"prefix": "flatfs.datastore",
|
||||
@ -186,7 +186,7 @@ func flatfsSpec() map[string]interface{} {
|
||||
"sync": false,
|
||||
"shardFunc": "/repo/flatfs/shard/v1/next-to-last/2",
|
||||
},
|
||||
map[string]interface{}{
|
||||
map[string]any{
|
||||
"mountpoint": "/",
|
||||
"type": "levelds",
|
||||
"prefix": "leveldb.datastore",
|
||||
@ -197,26 +197,26 @@ func flatfsSpec() map[string]interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
func flatfsSpecMeasure() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
func flatfsSpecMeasure() map[string]any {
|
||||
return map[string]any{
|
||||
"type": "mount",
|
||||
"mounts": []interface{}{
|
||||
map[string]interface{}{
|
||||
"mounts": []any{
|
||||
map[string]any{
|
||||
"mountpoint": "/blocks",
|
||||
"type": "measure",
|
||||
"prefix": "flatfs.datastore",
|
||||
"child": map[string]interface{}{
|
||||
"child": map[string]any{
|
||||
"type": "flatfs",
|
||||
"path": "blocks",
|
||||
"sync": false,
|
||||
"shardFunc": "/repo/flatfs/shard/v1/next-to-last/2",
|
||||
},
|
||||
},
|
||||
map[string]interface{}{
|
||||
map[string]any{
|
||||
"mountpoint": "/",
|
||||
"type": "measure",
|
||||
"prefix": "leveldb.datastore",
|
||||
"child": map[string]interface{}{
|
||||
"child": map[string]any{
|
||||
"type": "levelds",
|
||||
"path": "datastore",
|
||||
"compression": "none",
|
||||
|
||||
@ -41,7 +41,7 @@ type BitswapBroadcastControl struct {
|
||||
// MaxPeers sets a hard limit on the number of peers to send broadcasts to.
|
||||
// A value of 0 means no broadcasts are sent. A value of -1 means there is
|
||||
// no limit. Default is [DefaultBroadcastControlMaxPeers].
|
||||
MaxPeers OptionalInteger `json:",omitempty"`
|
||||
MaxPeers OptionalInteger
|
||||
// LocalPeers enables or disables broadcast control for peers on the local
|
||||
// network. If false, than always broadcast to peers on the local network.
|
||||
// If true, apply broadcast control to local peers. Default is
|
||||
@ -58,7 +58,7 @@ type BitswapBroadcastControl struct {
|
||||
// this number of random peers receives a broadcast. This may be helpful in
|
||||
// cases where peers that are not receiving broadcasts my have wanted
|
||||
// blocks. Default is [DefaultBroadcastControlMaxRandomPeers].
|
||||
MaxRandomPeers OptionalInteger `json:",omitempty"`
|
||||
MaxRandomPeers OptionalInteger
|
||||
// SendToPendingPeers enables or disables sending broadcasts to any peers
|
||||
// to which there is a pending message to send. When enabled, this sends
|
||||
// broadcasts to many more peers, but does so in a way that does not
|
||||
|
||||
@ -7,5 +7,5 @@ type Plugins struct {
|
||||
|
||||
type Plugin struct {
|
||||
Disabled bool
|
||||
Config interface{} `json:",omitempty"`
|
||||
Config any `json:",omitempty"`
|
||||
}
|
||||
|
||||
@ -210,7 +210,9 @@ NOTE: This profile may only be applied when first initializing node at IPFS_PATH
|
||||
},
|
||||
},
|
||||
"badgerds": {
|
||||
Description: `Configures the node to use the legacy badgerv1 datastore.
|
||||
Description: `DEPRECATED: Configures the node to use the legacy badgerv1 datastore.
|
||||
This profile will be removed in a future Kubo release.
|
||||
New deployments should use 'flatfs' or 'pebbleds' instead.
|
||||
|
||||
NOTE: this is badger 1.x, which has known bugs and is no longer supported by the upstream team.
|
||||
It is provided here only for pre-existing users, allowing them to migrate away to more modern datastore.
|
||||
@ -225,6 +227,14 @@ Other caveats:
|
||||
* Good for medium-size datastores, but may run into performance issues
|
||||
if your dataset is bigger than a terabyte.
|
||||
|
||||
To migrate: create a new IPFS_PATH with 'ipfs init --profile=flatfs',
|
||||
move pinned data via 'ipfs dag export/import' or 'ipfs pin ls -t recursive|add',
|
||||
and decommission the old badger-based node.
|
||||
When it comes to block storage, use experimental 'pebbleds' only if you are sure
|
||||
modern 'flatfs' does not serve your use case (most users will be perfectly fine
|
||||
with flatfs, it is also possible to keep flatfs for blocks and replace leveldb
|
||||
with pebble if preferred over leveldb).
|
||||
|
||||
See configuration documentation at:
|
||||
https://github.com/ipfs/kubo/blob/master/docs/datastores.md#badgerds
|
||||
|
||||
@ -239,8 +249,9 @@ NOTE: This profile may only be applied when first initializing node at IPFS_PATH
|
||||
},
|
||||
},
|
||||
"badgerds-measure": {
|
||||
Description: `Configures the node to use the legacy badgerv1 datastore with metrics wrapper.
|
||||
Additional '*_datastore_*' metrics will be exposed on /debug/metrics/prometheus
|
||||
Description: `DEPRECATED: Configures the node to use the legacy badgerv1 datastore with metrics wrapper.
|
||||
This profile will be removed in a future Kubo release.
|
||||
New deployments should use 'flatfs' or 'pebbleds' instead.
|
||||
|
||||
NOTE: This profile may only be applied when first initializing node at IPFS_PATH
|
||||
via 'ipfs init --profile badgerds-measure'
|
||||
@ -312,45 +323,33 @@ fetching may be degraded.
|
||||
return nil
|
||||
},
|
||||
},
|
||||
"unixfs-v0-2015": {
|
||||
Description: `Legacy UnixFS import profile for backward-compatible CID generation.
|
||||
Produces CIDv0 with no raw leaves, sha2-256, 256 KiB chunks, and
|
||||
link-based HAMT size estimation. Use only when legacy CIDs are required.
|
||||
See https://github.com/ipfs/specs/pull/499. Alias: legacy-cid-v0`,
|
||||
Transform: applyUnixFSv02015,
|
||||
},
|
||||
"legacy-cid-v0": {
|
||||
Description: `Makes UnixFS import produce legacy CIDv0 with no raw leaves, sha2-256 and 256 KiB chunks. This is likely the least optimal preset, use only if legacy behavior is required.`,
|
||||
Transform: func(c *Config) error {
|
||||
c.Import.CidVersion = *NewOptionalInteger(0)
|
||||
c.Import.UnixFSRawLeaves = False
|
||||
c.Import.UnixFSChunker = *NewOptionalString("size-262144")
|
||||
c.Import.HashFunction = *NewOptionalString("sha2-256")
|
||||
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174)
|
||||
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
|
||||
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
|
||||
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalString("256KiB")
|
||||
return nil
|
||||
},
|
||||
Description: `Alias for unixfs-v0-2015 profile.`,
|
||||
Transform: applyUnixFSv02015,
|
||||
},
|
||||
"test-cid-v1": {
|
||||
Description: `Makes UnixFS import produce CIDv1 with raw leaves, sha2-256 and 1 MiB chunks (max 174 links per file, 256 per HAMT node, switch dir to HAMT above 256KiB).`,
|
||||
"unixfs-v1-2025": {
|
||||
Description: `Recommended UnixFS import profile for cross-implementation CID determinism.
|
||||
Uses CIDv1, raw leaves, sha2-256, 1 MiB chunks, 1024 links per file node,
|
||||
256 HAMT fanout, and block-based size estimation for HAMT threshold.
|
||||
See https://github.com/ipfs/specs/pull/499`,
|
||||
Transform: func(c *Config) error {
|
||||
c.Import.CidVersion = *NewOptionalInteger(1)
|
||||
c.Import.UnixFSRawLeaves = True
|
||||
c.Import.UnixFSChunker = *NewOptionalString("size-1048576")
|
||||
c.Import.HashFunction = *NewOptionalString("sha2-256")
|
||||
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174)
|
||||
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
|
||||
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
|
||||
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalString("256KiB")
|
||||
return nil
|
||||
},
|
||||
},
|
||||
"test-cid-v1-wide": {
|
||||
Description: `Makes UnixFS import produce CIDv1 with raw leaves, sha2-256 and 1MiB chunks and wider file DAGs (max 1024 links per every node type, switch dir to HAMT above 1MiB).`,
|
||||
Transform: func(c *Config) error {
|
||||
c.Import.CidVersion = *NewOptionalInteger(1)
|
||||
c.Import.UnixFSRawLeaves = True
|
||||
c.Import.UnixFSChunker = *NewOptionalString("size-1048576") // 1MiB
|
||||
c.Import.UnixFSChunker = *NewOptionalString("size-1048576") // 1 MiB
|
||||
c.Import.HashFunction = *NewOptionalString("sha2-256")
|
||||
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(1024)
|
||||
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0) // no limit here, use size-based Import.UnixFSHAMTDirectorySizeThreshold instead
|
||||
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(1024)
|
||||
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalString("1MiB") // 1MiB
|
||||
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
|
||||
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
|
||||
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("256KiB")
|
||||
c.Import.UnixFSHAMTDirectorySizeEstimation = *NewOptionalString(HAMTSizeEstimationBlock)
|
||||
c.Import.UnixFSDAGLayout = *NewOptionalString(DAGLayoutBalanced)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
@ -435,3 +434,18 @@ func mapKeys(m map[string]struct{}) []string {
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// applyUnixFSv02015 applies the legacy UnixFS v0 (2015) import settings.
|
||||
func applyUnixFSv02015(c *Config) error {
|
||||
c.Import.CidVersion = *NewOptionalInteger(0)
|
||||
c.Import.UnixFSRawLeaves = False
|
||||
c.Import.UnixFSChunker = *NewOptionalString("size-262144") // 256 KiB
|
||||
c.Import.HashFunction = *NewOptionalString("sha2-256")
|
||||
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174)
|
||||
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
|
||||
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
|
||||
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalBytes("256KiB")
|
||||
c.Import.UnixFSHAMTDirectorySizeEstimation = *NewOptionalString(HAMTSizeEstimationLinks)
|
||||
c.Import.UnixFSDAGLayout = *NewOptionalString(DAGLayoutBalanced)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -15,12 +15,18 @@ const (
|
||||
// DHT provider defaults
|
||||
DefaultProvideDHTInterval = 22 * time.Hour // https://github.com/ipfs/kubo/pull/9326
|
||||
DefaultProvideDHTMaxWorkers = 16 // Unified default for both sweep and legacy providers
|
||||
DefaultProvideDHTSweepEnabled = false
|
||||
DefaultProvideDHTSweepEnabled = true
|
||||
DefaultProvideDHTResumeEnabled = true
|
||||
DefaultProvideDHTDedicatedPeriodicWorkers = 2
|
||||
DefaultProvideDHTDedicatedBurstWorkers = 1
|
||||
DefaultProvideDHTMaxProvideConnsPerWorker = 16
|
||||
DefaultProvideDHTMaxProvideConnsPerWorker = 20
|
||||
DefaultProvideDHTKeystoreBatchSize = 1 << 14 // ~544 KiB per batch (1 multihash = 34 bytes)
|
||||
DefaultProvideDHTOfflineDelay = 2 * time.Hour
|
||||
|
||||
// DefaultFastProvideTimeout is the maximum time allowed for fast-provide operations.
|
||||
// Prevents hanging on network issues when providing root CID.
|
||||
// 10 seconds is sufficient for DHT operations with sweep provider or accelerated client.
|
||||
DefaultFastProvideTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
type ProvideStrategy int
|
||||
@ -63,7 +69,7 @@ type ProvideDHT struct {
|
||||
MaxWorkers *OptionalInteger `json:",omitempty"`
|
||||
|
||||
// SweepEnabled activates the sweeping reprovider system which spreads
|
||||
// reprovide operations over time. This will become the default in a future release.
|
||||
// reprovide operations over time.
|
||||
// Default: DefaultProvideDHTSweepEnabled
|
||||
SweepEnabled Flag `json:",omitempty"`
|
||||
|
||||
@ -86,11 +92,17 @@ type ProvideDHT struct {
|
||||
// OfflineDelay sets the delay after which the provider switches from Disconnected to Offline state (sweep mode only).
|
||||
// Default: DefaultProvideDHTOfflineDelay
|
||||
OfflineDelay *OptionalDuration `json:",omitempty"`
|
||||
|
||||
// ResumeEnabled controls whether the provider resumes from its previous state on restart.
|
||||
// When enabled, the provider persists its reprovide cycle state and provide queue to the datastore,
|
||||
// and restores them on restart. When disabled, the provider starts fresh on each restart.
|
||||
// Default: true
|
||||
ResumeEnabled Flag `json:",omitempty"`
|
||||
}
|
||||
|
||||
func ParseProvideStrategy(s string) ProvideStrategy {
|
||||
var strategy ProvideStrategy
|
||||
for _, part := range strings.Split(s, "+") {
|
||||
for part := range strings.SplitSeq(s, "+") {
|
||||
switch part {
|
||||
case "all", "flat", "": // special case, does not mix with others ("flat" is deprecated, maps to "all")
|
||||
return ProvideStrategyAll
|
||||
@ -168,3 +180,25 @@ func ValidateProvideConfig(cfg *Provide) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShouldProvideForStrategy determines if content should be provided based on the provide strategy
|
||||
// and content characteristics (pinned status, root status, MFS status).
|
||||
func ShouldProvideForStrategy(strategy ProvideStrategy, isPinned bool, isPinnedRoot bool, isMFS bool) bool {
|
||||
if strategy == ProvideStrategyAll {
|
||||
// 'all' strategy: always provide
|
||||
return true
|
||||
}
|
||||
|
||||
// For combined strategies, check each component
|
||||
if strategy&ProvideStrategyPinned != 0 && isPinned {
|
||||
return true
|
||||
}
|
||||
if strategy&ProvideStrategyRoots != 0 && isPinnedRoot {
|
||||
return true
|
||||
}
|
||||
if strategy&ProvideStrategyMFS != 0 && isMFS {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@ -105,3 +105,87 @@ func TestValidateProvideConfig_MaxWorkers(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldProvideForStrategy(t *testing.T) {
|
||||
t.Run("all strategy always provides", func(t *testing.T) {
|
||||
// ProvideStrategyAll should return true regardless of flags
|
||||
testCases := []struct{ pinned, pinnedRoot, mfs bool }{
|
||||
{false, false, false},
|
||||
{true, true, true},
|
||||
{true, false, false},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
assert.True(t, ShouldProvideForStrategy(
|
||||
ProvideStrategyAll, tc.pinned, tc.pinnedRoot, tc.mfs))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("single strategies match only their flag", func(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
strategy ProvideStrategy
|
||||
pinned, pinnedRoot, mfs bool
|
||||
want bool
|
||||
}{
|
||||
{"pinned: matches when pinned=true", ProvideStrategyPinned, true, false, false, true},
|
||||
{"pinned: ignores other flags", ProvideStrategyPinned, false, true, true, false},
|
||||
|
||||
{"roots: matches when pinnedRoot=true", ProvideStrategyRoots, false, true, false, true},
|
||||
{"roots: ignores other flags", ProvideStrategyRoots, true, false, true, false},
|
||||
|
||||
{"mfs: matches when mfs=true", ProvideStrategyMFS, false, false, true, true},
|
||||
{"mfs: ignores other flags", ProvideStrategyMFS, true, true, false, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := ShouldProvideForStrategy(tt.strategy, tt.pinned, tt.pinnedRoot, tt.mfs)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("combined strategies use OR logic (else-if bug fix)", func(t *testing.T) {
|
||||
// CRITICAL: Tests the fix where bitflag combinations (pinned+mfs) didn't work
|
||||
// because of else-if instead of separate if statements
|
||||
tests := []struct {
|
||||
name string
|
||||
strategy ProvideStrategy
|
||||
pinned, pinnedRoot, mfs bool
|
||||
want bool
|
||||
}{
|
||||
// pinned|mfs: provide if EITHER matches
|
||||
{"pinned|mfs when pinned", ProvideStrategyPinned | ProvideStrategyMFS, true, false, false, true},
|
||||
{"pinned|mfs when mfs", ProvideStrategyPinned | ProvideStrategyMFS, false, false, true, true},
|
||||
{"pinned|mfs when both", ProvideStrategyPinned | ProvideStrategyMFS, true, false, true, true},
|
||||
{"pinned|mfs when neither", ProvideStrategyPinned | ProvideStrategyMFS, false, false, false, false},
|
||||
|
||||
// roots|mfs
|
||||
{"roots|mfs when root", ProvideStrategyRoots | ProvideStrategyMFS, false, true, false, true},
|
||||
{"roots|mfs when mfs", ProvideStrategyRoots | ProvideStrategyMFS, false, false, true, true},
|
||||
{"roots|mfs when neither", ProvideStrategyRoots | ProvideStrategyMFS, false, false, false, false},
|
||||
|
||||
// pinned|roots
|
||||
{"pinned|roots when pinned", ProvideStrategyPinned | ProvideStrategyRoots, true, false, false, true},
|
||||
{"pinned|roots when root", ProvideStrategyPinned | ProvideStrategyRoots, false, true, false, true},
|
||||
{"pinned|roots when neither", ProvideStrategyPinned | ProvideStrategyRoots, false, false, false, false},
|
||||
|
||||
// triple combination
|
||||
{"all-three when any matches", ProvideStrategyPinned | ProvideStrategyRoots | ProvideStrategyMFS, false, false, true, true},
|
||||
{"all-three when none match", ProvideStrategyPinned | ProvideStrategyRoots | ProvideStrategyMFS, false, false, false, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := ShouldProvideForStrategy(tt.strategy, tt.pinned, tt.pinnedRoot, tt.mfs)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("zero strategy never provides", func(t *testing.T) {
|
||||
assert.False(t, ShouldProvideForStrategy(ProvideStrategy(0), false, false, false))
|
||||
assert.False(t, ShouldProvideForStrategy(ProvideStrategy(0), true, true, true))
|
||||
})
|
||||
}
|
||||
|
||||
@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@ -59,7 +60,7 @@ type Router struct {
|
||||
|
||||
// Parameters are extra configuration that this router might need.
|
||||
// A common one for HTTP router is "Endpoint".
|
||||
Parameters interface{}
|
||||
Parameters any
|
||||
}
|
||||
|
||||
type (
|
||||
@ -78,13 +79,7 @@ func (m Methods) Check() error {
|
||||
|
||||
// Check unsupported methods
|
||||
for k := range m {
|
||||
seen := false
|
||||
for _, mn := range MethodNameList {
|
||||
if mn == k {
|
||||
seen = true
|
||||
break
|
||||
}
|
||||
}
|
||||
seen := slices.Contains(MethodNameList, k)
|
||||
|
||||
if seen {
|
||||
continue
|
||||
@ -108,7 +103,7 @@ func (r *RouterParser) UnmarshalJSON(b []byte) error {
|
||||
}
|
||||
raw := out.Parameters.(*json.RawMessage)
|
||||
|
||||
var p interface{}
|
||||
var p any
|
||||
switch out.Type {
|
||||
case RouterTypeHTTP:
|
||||
p = &HTTPRouterParams{}
|
||||
|
||||
@ -18,7 +18,7 @@ import (
|
||||
var ErrNotInitialized = errors.New("ipfs not initialized, please run 'ipfs init'")
|
||||
|
||||
// ReadConfigFile reads the config from `filename` into `cfg`.
|
||||
func ReadConfigFile(filename string, cfg interface{}) error {
|
||||
func ReadConfigFile(filename string, cfg any) error {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
@ -34,7 +34,7 @@ func ReadConfigFile(filename string, cfg interface{}) error {
|
||||
}
|
||||
|
||||
// WriteConfigFile writes the config from `cfg` into `filename`.
|
||||
func WriteConfigFile(filename string, cfg interface{}) error {
|
||||
func WriteConfigFile(filename string, cfg any) error {
|
||||
err := os.MkdirAll(filepath.Dir(filename), 0o755)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -50,7 +50,7 @@ func WriteConfigFile(filename string, cfg interface{}) error {
|
||||
}
|
||||
|
||||
// encode configuration with JSON.
|
||||
func encode(w io.Writer, value interface{}) error {
|
||||
func encode(w io.Writer, value any) error {
|
||||
// need to prettyprint, hence MarshalIndent, instead of Encoder
|
||||
buf, err := config.Marshal(value)
|
||||
if err != nil {
|
||||
|
||||
@ -118,7 +118,7 @@ type ResourceMgr struct {
|
||||
Enabled Flag `json:",omitempty"`
|
||||
Limits swarmLimits `json:",omitempty"`
|
||||
|
||||
MaxMemory *OptionalString `json:",omitempty"`
|
||||
MaxMemory *OptionalBytes `json:",omitempty"`
|
||||
MaxFileDescriptors *OptionalInteger `json:",omitempty"`
|
||||
|
||||
// A list of multiaddrs that can bypass normal system limits (but are still
|
||||
|
||||
@ -7,6 +7,8 @@ import (
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
)
|
||||
|
||||
// Strings is a helper type that (un)marshals a single string to/from a single
|
||||
@ -115,6 +117,16 @@ func (f Flag) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
// ResolveBoolFromConfig returns the resolved boolean value based on:
|
||||
// - If userSet is true, returns userValue (user explicitly set the flag)
|
||||
// - Otherwise, uses configFlag.WithDefault(defaultValue) (respects config or falls back to default)
|
||||
func ResolveBoolFromConfig(userValue bool, userSet bool, configFlag Flag, defaultValue bool) bool {
|
||||
if userSet {
|
||||
return userValue
|
||||
}
|
||||
return configFlag.WithDefault(defaultValue)
|
||||
}
|
||||
|
||||
var (
|
||||
_ json.Unmarshaler = (*Flag)(nil)
|
||||
_ json.Marshaler = (*Flag)(nil)
|
||||
@ -286,7 +298,7 @@ func (d Duration) MarshalJSON() ([]byte, error) {
|
||||
}
|
||||
|
||||
func (d *Duration) UnmarshalJSON(b []byte) error {
|
||||
var v interface{}
|
||||
var v any
|
||||
if err := json.Unmarshal(b, &v); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -425,8 +437,79 @@ func (p OptionalString) String() string {
|
||||
}
|
||||
|
||||
var (
|
||||
_ json.Unmarshaler = (*OptionalInteger)(nil)
|
||||
_ json.Marshaler = (*OptionalInteger)(nil)
|
||||
_ json.Unmarshaler = (*OptionalString)(nil)
|
||||
_ json.Marshaler = (*OptionalString)(nil)
|
||||
)
|
||||
|
||||
// OptionalBytes represents a byte size that has a default value
|
||||
//
|
||||
// When encoded in json, Default is encoded as "null".
|
||||
// Stores the original string representation and parses on access.
|
||||
// Embeds OptionalString to share common functionality.
|
||||
type OptionalBytes struct {
|
||||
OptionalString
|
||||
}
|
||||
|
||||
// NewOptionalBytes returns an OptionalBytes from a string.
|
||||
func NewOptionalBytes(s string) *OptionalBytes {
|
||||
return &OptionalBytes{OptionalString{value: &s}}
|
||||
}
|
||||
|
||||
// IsDefault returns if this is a default optional byte value.
|
||||
func (p *OptionalBytes) IsDefault() bool {
|
||||
if p == nil {
|
||||
return true
|
||||
}
|
||||
return p.OptionalString.IsDefault()
|
||||
}
|
||||
|
||||
// WithDefault resolves the byte size with the given default.
|
||||
// Parses the stored string value using humanize.ParseBytes.
|
||||
func (p *OptionalBytes) WithDefault(defaultValue uint64) (value uint64) {
|
||||
if p.IsDefault() {
|
||||
return defaultValue
|
||||
}
|
||||
strValue := p.OptionalString.WithDefault("")
|
||||
bytes, err := humanize.ParseBytes(strValue)
|
||||
if err != nil {
|
||||
// This should never happen as values are validated during UnmarshalJSON.
|
||||
// If it does, it indicates either config corruption or a programming error.
|
||||
panic(fmt.Sprintf("invalid byte size in OptionalBytes: %q - %v", strValue, err))
|
||||
}
|
||||
return bytes
|
||||
}
|
||||
|
||||
// UnmarshalJSON validates the input is a parseable byte size.
|
||||
func (p *OptionalBytes) UnmarshalJSON(input []byte) error {
|
||||
switch string(input) {
|
||||
case "null", "undefined":
|
||||
*p = OptionalBytes{}
|
||||
default:
|
||||
var value any
|
||||
err := json.Unmarshal(input, &value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch v := value.(type) {
|
||||
case float64:
|
||||
str := fmt.Sprintf("%.0f", v)
|
||||
p.value = &str
|
||||
case string:
|
||||
_, err := humanize.ParseBytes(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.value = &v
|
||||
default:
|
||||
return fmt.Errorf("unable to parse byte size, expected a size string (e.g., \"5GiB\") or a number, but got %T", v)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
_ json.Unmarshaler = (*OptionalBytes)(nil)
|
||||
_ json.Marshaler = (*OptionalBytes)(nil)
|
||||
)
|
||||
|
||||
type swarmLimits doNotUse
|
||||
|
||||
@ -5,6 +5,9 @@ import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestOptionalDuration(t *testing.T) {
|
||||
@ -509,3 +512,125 @@ func TestOptionalString(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalBytes(t *testing.T) {
|
||||
makeStringPointer := func(v string) *string { return &v }
|
||||
|
||||
t.Run("default value", func(t *testing.T) {
|
||||
var b OptionalBytes
|
||||
assert.True(t, b.IsDefault())
|
||||
assert.Equal(t, uint64(0), b.WithDefault(0))
|
||||
assert.Equal(t, uint64(1024), b.WithDefault(1024))
|
||||
assert.Equal(t, "default", b.String())
|
||||
})
|
||||
|
||||
t.Run("non-default value", func(t *testing.T) {
|
||||
b := OptionalBytes{OptionalString{value: makeStringPointer("1MiB")}}
|
||||
assert.False(t, b.IsDefault())
|
||||
assert.Equal(t, uint64(1048576), b.WithDefault(512))
|
||||
assert.Equal(t, "1MiB", b.String())
|
||||
})
|
||||
|
||||
t.Run("JSON roundtrip", func(t *testing.T) {
|
||||
testCases := []struct {
|
||||
jsonInput string
|
||||
jsonOutput string
|
||||
expectedValue string
|
||||
}{
|
||||
{"null", "null", ""},
|
||||
{"\"256KiB\"", "\"256KiB\"", "256KiB"},
|
||||
{"\"1MiB\"", "\"1MiB\"", "1MiB"},
|
||||
{"\"5GiB\"", "\"5GiB\"", "5GiB"},
|
||||
{"\"256KB\"", "\"256KB\"", "256KB"},
|
||||
{"1048576", "\"1048576\"", "1048576"},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.jsonInput, func(t *testing.T) {
|
||||
var b OptionalBytes
|
||||
err := json.Unmarshal([]byte(tc.jsonInput), &b)
|
||||
require.NoError(t, err)
|
||||
|
||||
if tc.expectedValue == "" {
|
||||
assert.Nil(t, b.value)
|
||||
} else {
|
||||
require.NotNil(t, b.value)
|
||||
assert.Equal(t, tc.expectedValue, *b.value)
|
||||
}
|
||||
|
||||
out, err := json.Marshal(b)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.jsonOutput, string(out))
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("parsing byte sizes", func(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input string
|
||||
expected uint64
|
||||
}{
|
||||
{"256KiB", 262144},
|
||||
{"1MiB", 1048576},
|
||||
{"5GiB", 5368709120},
|
||||
{"256KB", 256000},
|
||||
{"1048576", 1048576},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.input, func(t *testing.T) {
|
||||
var b OptionalBytes
|
||||
err := json.Unmarshal([]byte("\""+tc.input+"\""), &b)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expected, b.WithDefault(0))
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("omitempty", func(t *testing.T) {
|
||||
type Foo struct {
|
||||
B *OptionalBytes `json:",omitempty"`
|
||||
}
|
||||
|
||||
out, err := json.Marshal(new(Foo))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "{}", string(out))
|
||||
|
||||
var foo2 Foo
|
||||
err = json.Unmarshal(out, &foo2)
|
||||
require.NoError(t, err)
|
||||
|
||||
if foo2.B != nil {
|
||||
assert.Equal(t, uint64(1024), foo2.B.WithDefault(1024))
|
||||
assert.True(t, foo2.B.IsDefault())
|
||||
} else {
|
||||
// When field is omitted, pointer is nil which is also considered default
|
||||
t.Log("B is nil, which is acceptable for omitempty")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("invalid values", func(t *testing.T) {
|
||||
invalidInputs := []string{
|
||||
"\"5XiB\"", "\"invalid\"", "\"\"", "[]", "{}",
|
||||
}
|
||||
|
||||
for _, invalid := range invalidInputs {
|
||||
t.Run(invalid, func(t *testing.T) {
|
||||
var b OptionalBytes
|
||||
err := json.Unmarshal([]byte(invalid), &b)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("panic on invalid stored value", func(t *testing.T) {
|
||||
// This tests that if somehow an invalid value gets stored
|
||||
// (bypassing UnmarshalJSON validation), WithDefault will panic
|
||||
invalidValue := "invalid-size"
|
||||
b := OptionalBytes{OptionalString{value: &invalidValue}}
|
||||
|
||||
assert.Panics(t, func() {
|
||||
b.WithDefault(1024)
|
||||
}, "should panic on invalid stored value")
|
||||
})
|
||||
}
|
||||
|
||||
@ -15,6 +15,7 @@ import (
|
||||
|
||||
"github.com/cheggaaa/pb"
|
||||
"github.com/ipfs/boxo/files"
|
||||
uio "github.com/ipfs/boxo/ipld/unixfs/io"
|
||||
mfs "github.com/ipfs/boxo/mfs"
|
||||
"github.com/ipfs/boxo/path"
|
||||
"github.com/ipfs/boxo/verifcid"
|
||||
@ -61,20 +62,46 @@ const (
|
||||
inlineLimitOptionName = "inline-limit"
|
||||
toFilesOptionName = "to-files"
|
||||
|
||||
preserveModeOptionName = "preserve-mode"
|
||||
preserveMtimeOptionName = "preserve-mtime"
|
||||
modeOptionName = "mode"
|
||||
mtimeOptionName = "mtime"
|
||||
mtimeNsecsOptionName = "mtime-nsecs"
|
||||
preserveModeOptionName = "preserve-mode"
|
||||
preserveMtimeOptionName = "preserve-mtime"
|
||||
modeOptionName = "mode"
|
||||
mtimeOptionName = "mtime"
|
||||
mtimeNsecsOptionName = "mtime-nsecs"
|
||||
fastProvideRootOptionName = "fast-provide-root"
|
||||
fastProvideWaitOptionName = "fast-provide-wait"
|
||||
emptyDirsOptionName = "empty-dirs"
|
||||
)
|
||||
|
||||
const adderOutChanSize = 8
|
||||
const (
|
||||
adderOutChanSize = 8
|
||||
)
|
||||
|
||||
var AddCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Add a file or directory to IPFS.",
|
||||
ShortDescription: `
|
||||
Adds the content of <path> to IPFS. Use -r to add directories (recursively).
|
||||
|
||||
FAST PROVIDE OPTIMIZATION:
|
||||
|
||||
When you add content to IPFS, the sweep provider queues it for efficient
|
||||
DHT provides over time. While this is resource-efficient, other peers won't
|
||||
find your content immediately after 'ipfs add' completes.
|
||||
|
||||
To make sharing faster, 'ipfs add' does an immediate provide of the root CID
|
||||
to the DHT in addition to the regular queue. This complements the sweep provider:
|
||||
fast-provide handles the urgent case (root CIDs that users share and reference),
|
||||
while the sweep provider efficiently provides all blocks according to
|
||||
Provide.Strategy over time.
|
||||
|
||||
By default, this immediate provide runs in the background without blocking
|
||||
the command. If you need certainty that the root CID is discoverable before
|
||||
the command returns (e.g., sharing a link immediately), use --fast-provide-wait
|
||||
to wait for the provide to complete. Use --fast-provide-root=false to skip
|
||||
this optimization.
|
||||
|
||||
This works best with the sweep provider and accelerated DHT client.
|
||||
Automatically skipped when DHT is not available.
|
||||
`,
|
||||
LongDescription: `
|
||||
Adds the content of <path> to IPFS. Use -r to add directories.
|
||||
@ -122,6 +149,18 @@ to find it in the future:
|
||||
See 'ipfs files --help' to learn more about using MFS
|
||||
for keeping track of added files and directories.
|
||||
|
||||
SYMLINK HANDLING:
|
||||
|
||||
By default, symbolic links are preserved as UnixFS symlink nodes that store
|
||||
the target path. Use --dereference-symlinks to resolve symlinks to their
|
||||
target content instead:
|
||||
|
||||
> ipfs add -r --dereference-symlinks ./mydir
|
||||
|
||||
This resolves all symlinks, including CLI arguments and those found inside
|
||||
directories. Symlinks to files become regular file content, symlinks to
|
||||
directories are traversed and their contents are added.
|
||||
|
||||
CHUNKING EXAMPLES:
|
||||
|
||||
The chunker option, '-s', specifies the chunking strategy that dictates
|
||||
@ -133,6 +172,16 @@ Buzhash or Rabin fingerprint chunker for content defined chunking by
|
||||
specifying buzhash or rabin-[min]-[avg]-[max] (where min/avg/max refer
|
||||
to the desired chunk sizes in bytes), e.g. 'rabin-262144-524288-1048576'.
|
||||
|
||||
The maximum accepted value for 'size-N' and rabin 'max' parameter is
|
||||
2MiB minus 256 bytes (2096896 bytes). The 256-byte overhead budget is
|
||||
reserved for protobuf/UnixFS framing so that serialized blocks stay
|
||||
within the 2MiB block size limit from the bitswap spec. The buzhash
|
||||
chunker uses a fixed internal maximum of 512KiB and is not affected.
|
||||
|
||||
Only the fixed-size chunker ('size-N') guarantees that the same data
|
||||
will always produce the same CID. The rabin and buzhash chunkers may
|
||||
change their internal parameters in a future release.
|
||||
|
||||
The following examples use very small byte sizes to demonstrate the
|
||||
properties of the different chunkers on a small file. You'll likely
|
||||
want to use a 1024 times larger chunk sizes for most files.
|
||||
@ -175,11 +224,13 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
|
||||
Options: []cmds.Option{
|
||||
// Input Processing
|
||||
cmds.OptionRecursivePath, // a builtin option that allows recursive paths (-r, --recursive)
|
||||
cmds.OptionDerefArgs, // a builtin option that resolves passed in filesystem links (--dereference-args)
|
||||
cmds.OptionDerefArgs, // DEPRECATED: use --dereference-symlinks instead
|
||||
cmds.OptionStdinName, // a builtin option that optionally allows wrapping stdin into a named file
|
||||
cmds.OptionHidden,
|
||||
cmds.OptionIgnore,
|
||||
cmds.OptionIgnoreRules,
|
||||
cmds.BoolOption(emptyDirsOptionName, "E", "Include empty directories in the import.").WithDefault(config.DefaultUnixFSIncludeEmptyDirs),
|
||||
cmds.OptionDerefSymlinks, // resolve symlinks to their target content
|
||||
// Output Control
|
||||
cmds.BoolOption(quietOptionName, "q", "Write minimal output."),
|
||||
cmds.BoolOption(quieterOptionName, "Q", "Write only final hash."),
|
||||
@ -213,6 +264,8 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
|
||||
cmds.UintOption(modeOptionName, "Custom POSIX file mode to store in created UnixFS entries. WARNING: experimental, forces dag-pb for root block, disables raw-leaves"),
|
||||
cmds.Int64Option(mtimeOptionName, "Custom POSIX modification time to store in created UnixFS entries (seconds before or after the Unix Epoch). WARNING: experimental, forces dag-pb for root block, disables raw-leaves"),
|
||||
cmds.UintOption(mtimeNsecsOptionName, "Custom POSIX modification time (optional time fraction in nanoseconds)"),
|
||||
cmds.BoolOption(fastProvideRootOptionName, "Immediately provide root CID to DHT in addition to regular queue, for faster discovery. Default: Import.FastProvideRoot"),
|
||||
cmds.BoolOption(fastProvideWaitOptionName, "Block until the immediate provide completes before returning. Default: Import.FastProvideWait"),
|
||||
},
|
||||
PreRun: func(req *cmds.Request, env cmds.Environment) error {
|
||||
quiet, _ := req.Options[quietOptionName].(bool)
|
||||
@ -247,7 +300,7 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
|
||||
}
|
||||
|
||||
progress, _ := req.Options[progressOptionName].(bool)
|
||||
trickle, _ := req.Options[trickleOptionName].(bool)
|
||||
trickle, trickleSet := req.Options[trickleOptionName].(bool)
|
||||
wrap, _ := req.Options[wrapOptionName].(bool)
|
||||
onlyHash, _ := req.Options[onlyHashOptionName].(bool)
|
||||
silent, _ := req.Options[silentOptionName].(bool)
|
||||
@ -258,6 +311,7 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
|
||||
maxFileLinks, maxFileLinksSet := req.Options[maxFileLinksOptionName].(int)
|
||||
maxDirectoryLinks, maxDirectoryLinksSet := req.Options[maxDirectoryLinksOptionName].(int)
|
||||
maxHAMTFanout, maxHAMTFanoutSet := req.Options[maxHAMTFanoutOptionName].(int)
|
||||
var sizeEstimationMode uio.SizeEstimationMode
|
||||
nocopy, _ := req.Options[noCopyOptionName].(bool)
|
||||
fscache, _ := req.Options[fstoreCacheOptionName].(bool)
|
||||
cidVer, cidVerSet := req.Options[cidVersionOptionName].(int)
|
||||
@ -283,6 +337,19 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
|
||||
mode, _ := req.Options[modeOptionName].(uint)
|
||||
mtime, _ := req.Options[mtimeOptionName].(int64)
|
||||
mtimeNsecs, _ := req.Options[mtimeNsecsOptionName].(uint)
|
||||
fastProvideRoot, fastProvideRootSet := req.Options[fastProvideRootOptionName].(bool)
|
||||
fastProvideWait, fastProvideWaitSet := req.Options[fastProvideWaitOptionName].(bool)
|
||||
emptyDirs, _ := req.Options[emptyDirsOptionName].(bool)
|
||||
|
||||
// Note: --dereference-args is deprecated but still works for backwards compatibility.
|
||||
// The help text marks it as DEPRECATED. Users should use --dereference-symlinks instead,
|
||||
// which is a superset (resolves both CLI arg symlinks AND nested symlinks in directories).
|
||||
|
||||
// Wire --trickle from config
|
||||
if !trickleSet && !cfg.Import.UnixFSDAGLayout.IsDefault() {
|
||||
layout := cfg.Import.UnixFSDAGLayout.WithDefault(config.DefaultUnixFSDAGLayout)
|
||||
trickle = layout == config.DAGLayoutTrickle
|
||||
}
|
||||
|
||||
if chunker == "" {
|
||||
chunker = cfg.Import.UnixFSChunker.WithDefault(config.DefaultUnixFSChunker)
|
||||
@ -319,6 +386,12 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
|
||||
maxHAMTFanout = int(cfg.Import.UnixFSHAMTDirectoryMaxFanout.WithDefault(config.DefaultUnixFSHAMTDirectoryMaxFanout))
|
||||
}
|
||||
|
||||
// SizeEstimationMode is always set from config (no CLI flag)
|
||||
sizeEstimationMode = cfg.Import.HAMTSizeEstimationMode()
|
||||
|
||||
fastProvideRoot = config.ResolveBoolFromConfig(fastProvideRoot, fastProvideRootSet, cfg.Import.FastProvideRoot, config.DefaultFastProvideRoot)
|
||||
fastProvideWait = config.ResolveBoolFromConfig(fastProvideWait, fastProvideWaitSet, cfg.Import.FastProvideWait, config.DefaultFastProvideWait)
|
||||
|
||||
// Storing optional mode or mtime (UnixFS 1.5) requires root block
|
||||
// to always be 'dag-pb' and not 'raw'. Below adjusts raw-leaves setting, if possible.
|
||||
if preserveMode || preserveMtime || mode != 0 || mtime != 0 {
|
||||
@ -377,6 +450,8 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
|
||||
|
||||
options.Unixfs.PreserveMode(preserveMode),
|
||||
options.Unixfs.PreserveMtime(preserveMtime),
|
||||
|
||||
options.Unixfs.IncludeEmptyDirs(emptyDirs),
|
||||
}
|
||||
|
||||
if mode != 0 {
|
||||
@ -409,6 +484,9 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
|
||||
opts = append(opts, options.Unixfs.MaxHAMTFanout(maxHAMTFanout))
|
||||
}
|
||||
|
||||
// SizeEstimationMode is always set from config
|
||||
opts = append(opts, options.Unixfs.SizeEstimationMode(sizeEstimationMode))
|
||||
|
||||
if trickle {
|
||||
opts = append(opts, options.Unixfs.Layout(options.TrickleLayout))
|
||||
}
|
||||
@ -421,11 +499,12 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
|
||||
}
|
||||
var added int
|
||||
var fileAddedToMFS bool
|
||||
var lastRootCid path.ImmutablePath // Track the root CID for fast-provide
|
||||
addit := toadd.Entries()
|
||||
for addit.Next() {
|
||||
_, dir := addit.Node().(files.Directory)
|
||||
errCh := make(chan error, 1)
|
||||
events := make(chan interface{}, adderOutChanSize)
|
||||
events := make(chan any, adderOutChanSize)
|
||||
opts[len(opts)-1] = options.Unixfs.Events(events)
|
||||
|
||||
go func() {
|
||||
@ -437,6 +516,9 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
|
||||
return
|
||||
}
|
||||
|
||||
// Store the root CID for potential fast-provide operation
|
||||
lastRootCid = pathAdded
|
||||
|
||||
// creating MFS pointers when optional --to-files is set
|
||||
if toFilesSet {
|
||||
if addit.Name() == "" {
|
||||
@ -560,12 +642,29 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#import
|
||||
return fmt.Errorf("expected a file argument")
|
||||
}
|
||||
|
||||
// Apply fast-provide-root if the flag is enabled
|
||||
if fastProvideRoot && (lastRootCid != path.ImmutablePath{}) {
|
||||
cfg, err := ipfsNode.Repo.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cmdenv.ExecuteFastProvide(req.Context, ipfsNode, cfg, lastRootCid.RootCid(), fastProvideWait, dopin, dopin, toFilesSet); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if !fastProvideRoot {
|
||||
if fastProvideWait {
|
||||
log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config", "wait-flag-ignored", true)
|
||||
} else {
|
||||
log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
PostRun: cmds.PostRunMap{
|
||||
cmds.CLI: func(res cmds.Response, re cmds.ResponseEmitter) error {
|
||||
sizeChan := make(chan int64, 1)
|
||||
outChan := make(chan interface{})
|
||||
outChan := make(chan any)
|
||||
req := res.Request()
|
||||
|
||||
// Could be slow.
|
||||
|
||||
@ -98,6 +98,9 @@ var blockGetCmd = &cmds.Command{
|
||||
'ipfs block get' is a plumbing command for retrieving raw IPFS blocks.
|
||||
It takes a <cid>, and outputs the block to stdout.
|
||||
`,
|
||||
HTTP: &cmds.HTTPHelpText{
|
||||
ResponseContentType: "application/vnd.ipld.raw",
|
||||
},
|
||||
},
|
||||
|
||||
Arguments: []cmds.Argument{
|
||||
@ -119,6 +122,8 @@ It takes a <cid>, and outputs the block to stdout.
|
||||
return err
|
||||
}
|
||||
|
||||
res.SetEncodingType(cmds.OctetStream)
|
||||
res.SetContentType("application/vnd.ipld.raw")
|
||||
return res.Emit(r)
|
||||
},
|
||||
}
|
||||
|
||||
@ -112,7 +112,7 @@ The optional format string is a printf style format string:
|
||||
return emitCids(req, resp, opts)
|
||||
},
|
||||
PostRun: cmds.PostRunMap{
|
||||
cmds.CLI: streamResult(func(v interface{}, out io.Writer) nonFatalError {
|
||||
cmds.CLI: streamResult(func(v any, out io.Writer) nonFatalError {
|
||||
r := v.(*CidFormatRes)
|
||||
if r.ErrorMsg != "" {
|
||||
return nonFatalError(fmt.Sprintf("%s: %s", r.CidStr, r.ErrorMsg))
|
||||
|
||||
@ -39,7 +39,7 @@ func TestCidFmtCmd(t *testing.T) {
|
||||
|
||||
// Mock request
|
||||
req := &cmds.Request{
|
||||
Options: map[string]interface{}{
|
||||
Options: map[string]any{
|
||||
cidToVersionOptionName: "0",
|
||||
cidMultibaseOptionName: e.MultibaseName,
|
||||
cidFormatOptionName: "%s",
|
||||
@ -90,7 +90,7 @@ func TestCidFmtCmd(t *testing.T) {
|
||||
for _, e := range testCases {
|
||||
// Mock request
|
||||
req := &cmds.Request{
|
||||
Options: map[string]interface{}{
|
||||
Options: map[string]any{
|
||||
cidToVersionOptionName: e.Ver,
|
||||
cidMultibaseOptionName: e.MultibaseName,
|
||||
cidFormatOptionName: "%s",
|
||||
|
||||
@ -1,15 +1,19 @@
|
||||
package cmdenv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ipfs/kubo/commands"
|
||||
"github.com/ipfs/kubo/core"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
routing "github.com/libp2p/go-libp2p/core/routing"
|
||||
|
||||
"github.com/ipfs/kubo/commands"
|
||||
"github.com/ipfs/kubo/config"
|
||||
"github.com/ipfs/kubo/core"
|
||||
coreiface "github.com/ipfs/kubo/core/coreiface"
|
||||
options "github.com/ipfs/kubo/core/coreiface/options"
|
||||
)
|
||||
@ -17,7 +21,7 @@ import (
|
||||
var log = logging.Logger("core/commands/cmdenv")
|
||||
|
||||
// GetNode extracts the node from the environment.
|
||||
func GetNode(env interface{}) (*core.IpfsNode, error) {
|
||||
func GetNode(env any) (*core.IpfsNode, error) {
|
||||
ctx, ok := env.(*commands.Context)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected env to be of type %T, got %T", ctx, env)
|
||||
@ -86,3 +90,103 @@ func needEscape(s string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// provideCIDSync performs a synchronous/blocking provide operation to announce
|
||||
// the given CID to the DHT.
|
||||
//
|
||||
// - If the accelerated DHT client is used, a DHT lookup isn't needed, we
|
||||
// directly allocate provider records to closest peers.
|
||||
// - If Provide.DHT.SweepEnabled=true or OptimisticProvide=true, we make an
|
||||
// optimistic provide call.
|
||||
// - Else we make a standard provide call (much slower).
|
||||
//
|
||||
// IMPORTANT: The caller MUST verify DHT availability using HasActiveDHTClient()
|
||||
// before calling this function. Calling with a nil or invalid router will cause
|
||||
// a panic - this is the caller's responsibility to prevent.
|
||||
func provideCIDSync(ctx context.Context, router routing.Routing, c cid.Cid) error {
|
||||
return router.Provide(ctx, c, true)
|
||||
}
|
||||
|
||||
// ExecuteFastProvide immediately provides a root CID to the DHT, bypassing the regular
|
||||
// provide queue for faster content discovery. This function is reusable across commands
|
||||
// that add or import content, such as ipfs add and ipfs dag import.
|
||||
//
|
||||
// Parameters:
|
||||
// - ctx: context for synchronous provides
|
||||
// - ipfsNode: the IPFS node instance
|
||||
// - cfg: node configuration
|
||||
// - rootCid: the CID to provide
|
||||
// - wait: whether to block until provide completes (sync mode)
|
||||
// - isPinned: whether content is pinned
|
||||
// - isPinnedRoot: whether this is a pinned root CID
|
||||
// - isMFS: whether content is in MFS
|
||||
//
|
||||
// Return value:
|
||||
// - Returns nil if operation succeeded or was skipped (preconditions not met)
|
||||
// - Returns error only in sync mode (wait=true) when provide operation fails
|
||||
// - In async mode (wait=false), always returns nil (errors logged in goroutine)
|
||||
//
|
||||
// The function handles all precondition checks (Provide.Enabled, DHT availability,
|
||||
// strategy matching) and logs appropriately. In async mode, it launches a goroutine
|
||||
// with a detached context and timeout.
|
||||
func ExecuteFastProvide(
|
||||
ctx context.Context,
|
||||
ipfsNode *core.IpfsNode,
|
||||
cfg *config.Config,
|
||||
rootCid cid.Cid,
|
||||
wait bool,
|
||||
isPinned bool,
|
||||
isPinnedRoot bool,
|
||||
isMFS bool,
|
||||
) error {
|
||||
log.Debugw("fast-provide-root: enabled", "wait", wait)
|
||||
|
||||
// Check preconditions for providing
|
||||
switch {
|
||||
case !cfg.Provide.Enabled.WithDefault(config.DefaultProvideEnabled):
|
||||
log.Debugw("fast-provide-root: skipped", "reason", "Provide.Enabled is false")
|
||||
return nil
|
||||
case cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) == 0:
|
||||
log.Debugw("fast-provide-root: skipped", "reason", "Provide.DHT.Interval is 0")
|
||||
return nil
|
||||
case !ipfsNode.HasActiveDHTClient():
|
||||
log.Debugw("fast-provide-root: skipped", "reason", "DHT not available")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if strategy allows providing this content
|
||||
strategyStr := cfg.Provide.Strategy.WithDefault(config.DefaultProvideStrategy)
|
||||
strategy := config.ParseProvideStrategy(strategyStr)
|
||||
shouldProvide := config.ShouldProvideForStrategy(strategy, isPinned, isPinnedRoot, isMFS)
|
||||
|
||||
if !shouldProvide {
|
||||
log.Debugw("fast-provide-root: skipped", "reason", "strategy does not match content", "strategy", strategyStr, "pinned", isPinned, "pinnedRoot", isPinnedRoot, "mfs", isMFS)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Execute provide operation
|
||||
if wait {
|
||||
// Synchronous mode: block until provide completes, return error on failure
|
||||
log.Debugw("fast-provide-root: providing synchronously", "cid", rootCid)
|
||||
if err := provideCIDSync(ctx, ipfsNode.DHTClient, rootCid); err != nil {
|
||||
log.Warnw("fast-provide-root: sync provide failed", "cid", rootCid, "error", err)
|
||||
return fmt.Errorf("fast-provide: %w", err)
|
||||
}
|
||||
log.Debugw("fast-provide-root: sync provide completed", "cid", rootCid)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Asynchronous mode (default): fire-and-forget, don't block, always return nil
|
||||
log.Debugw("fast-provide-root: providing asynchronously", "cid", rootCid)
|
||||
go func() {
|
||||
// Use detached context with timeout to prevent hanging on network issues
|
||||
ctx, cancel := context.WithTimeout(context.Background(), config.DefaultFastProvideTimeout)
|
||||
defer cancel()
|
||||
if err := provideCIDSync(ctx, ipfsNode.DHTClient, rootCid); err != nil {
|
||||
log.Warnw("fast-provide-root: async provide failed", "cid", rootCid, "error", err)
|
||||
} else {
|
||||
log.Debugw("fast-provide-root: async provide completed", "cid", rootCid)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -2,24 +2,28 @@ package cmdutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
|
||||
"github.com/ipfs/boxo/path"
|
||||
"github.com/ipfs/go-cid"
|
||||
coreiface "github.com/ipfs/kubo/core/coreiface"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
const (
|
||||
AllowBigBlockOptionName = "allow-big-block"
|
||||
SoftBlockLimit = 1024 * 1024 // https://github.com/ipfs/kubo/issues/7421#issuecomment-910833499
|
||||
MaxPinNameBytes = 255 // Maximum number of bytes allowed for a pin name
|
||||
// SoftBlockLimit is the maximum block size for bitswap transfer.
|
||||
// If this value changes, update the "2MiB" strings in error messages below.
|
||||
SoftBlockLimit = 2 * 1024 * 1024 // https://specs.ipfs.tech/bitswap-protocol/#block-sizes
|
||||
MaxPinNameBytes = 255 // Maximum number of bytes allowed for a pin name
|
||||
)
|
||||
|
||||
var AllowBigBlockOption cmds.Option
|
||||
|
||||
func init() {
|
||||
AllowBigBlockOption = cmds.BoolOption(AllowBigBlockOptionName, "Disable block size check and allow creation of blocks bigger than 1MiB. WARNING: such blocks won't be transferable over the standard bitswap.").WithDefault(false)
|
||||
AllowBigBlockOption = cmds.BoolOption(AllowBigBlockOptionName, "Disable block size check and allow creation of blocks bigger than 2MiB. WARNING: such blocks won't be transferable over the standard bitswap.").WithDefault(false)
|
||||
}
|
||||
|
||||
func CheckCIDSize(req *cmds.Request, c cid.Cid, dagAPI coreiface.APIDagService) error {
|
||||
@ -42,11 +46,10 @@ func CheckBlockSize(req *cmds.Request, size uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We do not allow producing blocks bigger than 1 MiB to avoid errors
|
||||
// when transmitting them over BitSwap. The 1 MiB constant is an
|
||||
// unenforced and undeclared rule of thumb hard-coded here.
|
||||
// Block size is limited to SoftBlockLimit (2MiB) as defined in the bitswap spec.
|
||||
// https://specs.ipfs.tech/bitswap-protocol/#block-sizes
|
||||
if size > SoftBlockLimit {
|
||||
return fmt.Errorf("produced block is over 1MiB: big blocks can't be exchanged with other peers. consider using UnixFS for automatic chunking of bigger files, or pass --allow-big-block to override")
|
||||
return fmt.Errorf("produced block is over 2MiB: big blocks can't be exchanged with other peers. consider using UnixFS for automatic chunking of bigger files, or pass --allow-big-block to override")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -74,10 +77,23 @@ func PathOrCidPath(str string) (path.Path, error) {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Save the original error before attempting fallback
|
||||
originalErr := err
|
||||
|
||||
if p, err := path.NewPath("/ipfs/" + str); err == nil {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Send back original err.
|
||||
return nil, err
|
||||
return nil, originalErr
|
||||
}
|
||||
|
||||
// CloneAddrInfo returns a copy of the AddrInfo with a cloned Addrs slice.
|
||||
// This prevents data races if the sender reuses the backing array.
|
||||
// See: https://github.com/ipfs/kubo/issues/11116
|
||||
func CloneAddrInfo(ai peer.AddrInfo) peer.AddrInfo {
|
||||
return peer.AddrInfo{
|
||||
ID: ai.ID,
|
||||
Addrs: slices.Clone(ai.Addrs),
|
||||
}
|
||||
}
|
||||
|
||||
106
core/commands/cmdutils/utils_test.go
Normal file
106
core/commands/cmdutils/utils_test.go
Normal file
@ -0,0 +1,106 @@
|
||||
package cmdutils
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPathOrCidPath(t *testing.T) {
|
||||
t.Run("valid path is returned as-is", func(t *testing.T) {
|
||||
validPath := "/ipfs/QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG"
|
||||
p, err := PathOrCidPath(validPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, validPath, p.String())
|
||||
})
|
||||
|
||||
t.Run("valid CID is converted to /ipfs/ path", func(t *testing.T) {
|
||||
cid := "QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG"
|
||||
p, err := PathOrCidPath(cid)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "/ipfs/"+cid, p.String())
|
||||
})
|
||||
|
||||
t.Run("valid ipns path is returned as-is", func(t *testing.T) {
|
||||
validPath := "/ipns/example.com"
|
||||
p, err := PathOrCidPath(validPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, validPath, p.String())
|
||||
})
|
||||
|
||||
t.Run("returns original error when both attempts fail", func(t *testing.T) {
|
||||
invalidInput := "invalid!@#path"
|
||||
_, err := PathOrCidPath(invalidInput)
|
||||
require.Error(t, err)
|
||||
|
||||
// The error should reference the original input attempt.
|
||||
// This ensures users get meaningful error messages about their actual input.
|
||||
assert.Contains(t, err.Error(), invalidInput,
|
||||
"error should mention the original input")
|
||||
assert.Contains(t, err.Error(), "path does not have enough components",
|
||||
"error should describe the problem with the original input")
|
||||
})
|
||||
|
||||
t.Run("empty string returns error about original input", func(t *testing.T) {
|
||||
_, err := PathOrCidPath("")
|
||||
require.Error(t, err)
|
||||
|
||||
// Verify we're not getting an error about "/ipfs/" (the fallback)
|
||||
errMsg := err.Error()
|
||||
assert.NotContains(t, errMsg, "/ipfs/",
|
||||
"error should be about empty input, not the fallback path")
|
||||
})
|
||||
|
||||
t.Run("invalid characters return error about original input", func(t *testing.T) {
|
||||
invalidInput := "not a valid path or CID with spaces and /@#$%"
|
||||
_, err := PathOrCidPath(invalidInput)
|
||||
require.Error(t, err)
|
||||
|
||||
// The error message should help debug the original input
|
||||
assert.True(t, strings.Contains(err.Error(), invalidInput) ||
|
||||
strings.Contains(err.Error(), "invalid"),
|
||||
"error should reference original problematic input")
|
||||
})
|
||||
|
||||
t.Run("CID with path is converted correctly", func(t *testing.T) {
|
||||
cidWithPath := "QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG/file.txt"
|
||||
p, err := PathOrCidPath(cidWithPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "/ipfs/"+cidWithPath, p.String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidatePinName(t *testing.T) {
|
||||
t.Run("valid pin name is accepted", func(t *testing.T) {
|
||||
err := ValidatePinName("my-pin-name")
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty pin name is accepted", func(t *testing.T) {
|
||||
err := ValidatePinName("")
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("pin name at max length is accepted", func(t *testing.T) {
|
||||
maxName := strings.Repeat("a", MaxPinNameBytes)
|
||||
err := ValidatePinName(maxName)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("pin name exceeding max length is rejected", func(t *testing.T) {
|
||||
tooLong := strings.Repeat("a", MaxPinNameBytes+1)
|
||||
err := ValidatePinName(tooLong)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "max")
|
||||
})
|
||||
|
||||
t.Run("pin name with unicode is counted by bytes", func(t *testing.T) {
|
||||
// Unicode character can be multiple bytes
|
||||
unicodeName := strings.Repeat("🔒", MaxPinNameBytes/4+1) // emoji is 4 bytes
|
||||
err := ValidatePinName(unicodeName)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "bytes")
|
||||
})
|
||||
}
|
||||
@ -20,7 +20,7 @@ type commandEncoder struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (e *commandEncoder) Encode(v interface{}) error {
|
||||
func (e *commandEncoder) Encode(v any) error {
|
||||
var (
|
||||
cmd *Command
|
||||
ok bool
|
||||
@ -232,7 +232,7 @@ type nonFatalError string
|
||||
// streamResult is a helper function to stream results that possibly
|
||||
// contain non-fatal errors. The helper function is allowed to panic
|
||||
// on internal errors.
|
||||
func streamResult(procVal func(interface{}, io.Writer) nonFatalError) func(cmds.Response, cmds.ResponseEmitter) error {
|
||||
func streamResult(procVal func(any, io.Writer) nonFatalError) func(cmds.Response, cmds.ResponseEmitter) error {
|
||||
return func(res cmds.Response, re cmds.ResponseEmitter) (rerr error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
|
||||
@ -76,6 +76,9 @@ func TestCommands(t *testing.T) {
|
||||
"/diag/cmds",
|
||||
"/diag/cmds/clear",
|
||||
"/diag/cmds/set-time",
|
||||
"/diag/datastore",
|
||||
"/diag/datastore/count",
|
||||
"/diag/datastore/get",
|
||||
"/diag/profile",
|
||||
"/diag/sys",
|
||||
"/files",
|
||||
@ -90,6 +93,7 @@ func TestCommands(t *testing.T) {
|
||||
"/files/stat",
|
||||
"/files/write",
|
||||
"/files/chmod",
|
||||
"/files/chroot",
|
||||
"/files/touch",
|
||||
"/filestore",
|
||||
"/filestore/dups",
|
||||
@ -102,6 +106,7 @@ func TestCommands(t *testing.T) {
|
||||
"/key/gen",
|
||||
"/key/import",
|
||||
"/key/list",
|
||||
"/key/ls",
|
||||
"/key/rename",
|
||||
"/key/rm",
|
||||
"/key/rotate",
|
||||
@ -119,12 +124,14 @@ func TestCommands(t *testing.T) {
|
||||
"/multibase/transcode",
|
||||
"/multibase/list",
|
||||
"/name",
|
||||
"/name/get",
|
||||
"/name/inspect",
|
||||
"/name/publish",
|
||||
"/name/pubsub",
|
||||
"/name/pubsub/cancel",
|
||||
"/name/pubsub/state",
|
||||
"/name/pubsub/subs",
|
||||
"/name/put",
|
||||
"/name/resolve",
|
||||
"/object",
|
||||
"/object/data",
|
||||
@ -169,6 +176,7 @@ func TestCommands(t *testing.T) {
|
||||
"/pubsub/ls",
|
||||
"/pubsub/peers",
|
||||
"/pubsub/pub",
|
||||
"/pubsub/reset",
|
||||
"/pubsub/sub",
|
||||
"/refs",
|
||||
"/refs/local",
|
||||
@ -190,6 +198,7 @@ func TestCommands(t *testing.T) {
|
||||
"/stats/repo",
|
||||
"/swarm",
|
||||
"/swarm/addrs",
|
||||
"/swarm/addrs/autonat",
|
||||
"/swarm/addrs/listen",
|
||||
"/swarm/addrs/local",
|
||||
"/swarm/connect",
|
||||
|
||||
@ -22,13 +22,13 @@ import (
|
||||
|
||||
// ConfigUpdateOutput is config profile apply command's output
|
||||
type ConfigUpdateOutput struct {
|
||||
OldCfg map[string]interface{}
|
||||
NewCfg map[string]interface{}
|
||||
OldCfg map[string]any
|
||||
NewCfg map[string]any
|
||||
}
|
||||
|
||||
type ConfigField struct {
|
||||
Key string
|
||||
Value interface{}
|
||||
Value any
|
||||
}
|
||||
|
||||
const (
|
||||
@ -117,7 +117,7 @@ Set multiple values in the 'Addresses.AppendAnnounce' array:
|
||||
value := args[1]
|
||||
|
||||
if parseJSON, _ := req.Options[configJSONOptionName].(bool); parseJSON {
|
||||
var jsonVal interface{}
|
||||
var jsonVal any
|
||||
if err := json.Unmarshal([]byte(value), &jsonVal); err != nil {
|
||||
err = fmt.Errorf("failed to unmarshal json. %s", err)
|
||||
return err
|
||||
@ -199,7 +199,7 @@ var configShowCmd = &cmds.Command{
|
||||
NOTE: For security reasons, this command will omit your private key and remote services. If you would like to make a full backup of your config (private key included), you must copy the config file from your repo.
|
||||
`,
|
||||
},
|
||||
Type: make(map[string]interface{}),
|
||||
Type: make(map[string]any),
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
cfgRoot, err := cmdenv.GetConfigRoot(env)
|
||||
if err != nil {
|
||||
@ -217,7 +217,7 @@ NOTE: For security reasons, this command will omit your private key and remote s
|
||||
return err
|
||||
}
|
||||
|
||||
var cfg map[string]interface{}
|
||||
var cfg map[string]any
|
||||
err = json.Unmarshal(data, &cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -262,7 +262,7 @@ NOTE: For security reasons, this command will omit your private key and remote s
|
||||
},
|
||||
}
|
||||
|
||||
var HumanJSONEncoder = cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *map[string]interface{}) error {
|
||||
var HumanJSONEncoder = cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *map[string]any) error {
|
||||
buf, err := config.HumanOutput(out)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -273,35 +273,35 @@ var HumanJSONEncoder = cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer
|
||||
})
|
||||
|
||||
// Scrubs value and returns error if missing
|
||||
func scrubValue(m map[string]interface{}, key []string) (map[string]interface{}, error) {
|
||||
func scrubValue(m map[string]any, key []string) (map[string]any, error) {
|
||||
return scrubMapInternal(m, key, false)
|
||||
}
|
||||
|
||||
// Scrubs value and returns no error if missing
|
||||
func scrubOptionalValue(m map[string]interface{}, key []string) (map[string]interface{}, error) {
|
||||
func scrubOptionalValue(m map[string]any, key []string) (map[string]any, error) {
|
||||
return scrubMapInternal(m, key, true)
|
||||
}
|
||||
|
||||
func scrubEither(u interface{}, key []string, okIfMissing bool) (interface{}, error) {
|
||||
m, ok := u.(map[string]interface{})
|
||||
func scrubEither(u any, key []string, okIfMissing bool) (any, error) {
|
||||
m, ok := u.(map[string]any)
|
||||
if ok {
|
||||
return scrubMapInternal(m, key, okIfMissing)
|
||||
}
|
||||
return scrubValueInternal(m, key, okIfMissing)
|
||||
}
|
||||
|
||||
func scrubValueInternal(v interface{}, key []string, okIfMissing bool) (interface{}, error) {
|
||||
func scrubValueInternal(v any, key []string, okIfMissing bool) (any, error) {
|
||||
if v == nil && !okIfMissing {
|
||||
return nil, errors.New("failed to find specified key")
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func scrubMapInternal(m map[string]interface{}, key []string, okIfMissing bool) (map[string]interface{}, error) {
|
||||
func scrubMapInternal(m map[string]any, key []string, okIfMissing bool) (map[string]any, error) {
|
||||
if len(key) == 0 {
|
||||
return make(map[string]interface{}), nil // delete value
|
||||
return make(map[string]any), nil // delete value
|
||||
}
|
||||
n := map[string]interface{}{}
|
||||
n := map[string]any{}
|
||||
for k, v := range m {
|
||||
if key[0] == "*" || strings.EqualFold(key[0], k) {
|
||||
u, err := scrubEither(v, key[1:], okIfMissing)
|
||||
@ -463,7 +463,7 @@ func buildProfileHelp() string {
|
||||
}
|
||||
|
||||
// scrubPrivKey scrubs private key for security reasons.
|
||||
func scrubPrivKey(cfg *config.Config) (map[string]interface{}, error) {
|
||||
func scrubPrivKey(cfg *config.Config) (map[string]any, error) {
|
||||
cfgMap, err := config.ToMap(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -553,7 +553,7 @@ func getConfigWithAutoExpand(r repo.Repo, key string) (*ConfigField, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func setConfig(r repo.Repo, key string, value interface{}) (*ConfigField, error) {
|
||||
func setConfig(r repo.Repo, key string, value any) (*ConfigField, error) {
|
||||
err := r.SetConfigKey(key, value)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to set config value: %s (maybe use --json?)", err)
|
||||
@ -646,7 +646,7 @@ func getRemotePinningServices(r repo.Repo) (map[string]config.RemotePinningServi
|
||||
if remoteServicesTag, err := getConfig(r, config.RemoteServicesPath); err == nil {
|
||||
// seems that golang cannot type assert map[string]interface{} to map[string]config.RemotePinningService
|
||||
// so we have to manually copy the data :-|
|
||||
if val, ok := remoteServicesTag.Value.(map[string]interface{}); ok {
|
||||
if val, ok := remoteServicesTag.Value.(map[string]any); ok {
|
||||
jsonString, err := json.Marshal(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@ -7,6 +7,7 @@ import (
|
||||
"io"
|
||||
"path"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/ipfs/kubo/core/commands/cmdenv"
|
||||
"github.com/ipfs/kubo/core/commands/cmdutils"
|
||||
|
||||
@ -16,10 +17,12 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
pinRootsOptionName = "pin-roots"
|
||||
progressOptionName = "progress"
|
||||
silentOptionName = "silent"
|
||||
statsOptionName = "stats"
|
||||
pinRootsOptionName = "pin-roots"
|
||||
progressOptionName = "progress"
|
||||
silentOptionName = "silent"
|
||||
statsOptionName = "stats"
|
||||
fastProvideRootOptionName = "fast-provide-root"
|
||||
fastProvideWaitOptionName = "fast-provide-wait"
|
||||
)
|
||||
|
||||
// DagCmd provides a subset of commands for interacting with ipld dag objects
|
||||
@ -189,6 +192,18 @@ Note:
|
||||
currently present in the blockstore does not represent a complete DAG,
|
||||
pinning of that individual root will fail.
|
||||
|
||||
FAST PROVIDE OPTIMIZATION:
|
||||
|
||||
Root CIDs from CAR headers are immediately provided to the DHT in addition
|
||||
to the regular provide queue, allowing other peers to discover your content
|
||||
right away. This complements the sweep provider, which efficiently provides
|
||||
all blocks according to Provide.Strategy over time.
|
||||
|
||||
By default, the provide happens in the background without blocking the
|
||||
command. Use --fast-provide-wait to wait for the provide to complete, or
|
||||
--fast-provide-root=false to skip it. Works even with --pin-roots=false.
|
||||
Automatically skipped when DHT is not available.
|
||||
|
||||
Maximum supported CAR version: 2
|
||||
Specification of CAR formats: https://ipld.io/specs/transport/car/
|
||||
`,
|
||||
@ -200,6 +215,8 @@ Specification of CAR formats: https://ipld.io/specs/transport/car/
|
||||
cmds.BoolOption(pinRootsOptionName, "Pin optional roots listed in the .car headers after importing.").WithDefault(true),
|
||||
cmds.BoolOption(silentOptionName, "No output."),
|
||||
cmds.BoolOption(statsOptionName, "Output stats."),
|
||||
cmds.BoolOption(fastProvideRootOptionName, "Immediately provide root CIDs to DHT in addition to regular queue, for faster discovery. Default: Import.FastProvideRoot"),
|
||||
cmds.BoolOption(fastProvideWaitOptionName, "Block until the immediate provide completes before returning. Default: Import.FastProvideWait"),
|
||||
cmdutils.AllowBigBlockOption,
|
||||
},
|
||||
Type: CarImportOutput{},
|
||||
@ -259,6 +276,9 @@ Note that at present only single root selections / .car files are supported.
|
||||
The output of blocks happens in strict DAG-traversal, first-seen, order.
|
||||
CAR file follows the CARv1 format: https://ipld.io/specs/transport/car/carv1/
|
||||
`,
|
||||
HTTP: &cmds.HTTPHelpText{
|
||||
ResponseContentType: "application/vnd.ipld.car",
|
||||
},
|
||||
},
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("root", true, false, "CID of a root to recursively export").EnableStdin(),
|
||||
@ -274,9 +294,9 @@ CAR file follows the CARv1 format: https://ipld.io/specs/transport/car/carv1/
|
||||
|
||||
// DagStat is a dag stat command response
|
||||
type DagStat struct {
|
||||
Cid cid.Cid `json:",omitempty"`
|
||||
Size uint64 `json:",omitempty"`
|
||||
NumBlocks int64 `json:",omitempty"`
|
||||
Cid cid.Cid
|
||||
Size uint64 `json:",omitempty"`
|
||||
NumBlocks int64 `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (s *DagStat) String() string {
|
||||
@ -333,7 +353,11 @@ type DagStatSummary struct {
|
||||
}
|
||||
|
||||
func (s *DagStatSummary) String() string {
|
||||
return fmt.Sprintf("Total Size: %d\nUnique Blocks: %d\nShared Size: %d\nRatio: %f", s.TotalSize, s.UniqueBlocks, s.SharedSize, s.Ratio)
|
||||
return fmt.Sprintf("Total Size: %d (%s)\nUnique Blocks: %d\nShared Size: %d (%s)\nRatio: %f",
|
||||
s.TotalSize, humanize.Bytes(s.TotalSize),
|
||||
s.UniqueBlocks,
|
||||
s.SharedSize, humanize.Bytes(s.SharedSize),
|
||||
s.Ratio)
|
||||
}
|
||||
|
||||
func (s *DagStatSummary) incrementTotalSize(size uint64) {
|
||||
@ -368,7 +392,7 @@ Note: This command skips duplicate blocks in reporting both size and the number
|
||||
cmds.StringArg("root", true, true, "CID of a DAG root to get statistics for").EnableStdin(),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(progressOptionName, "p", "Return progressive data while reading through the DAG").WithDefault(true),
|
||||
cmds.BoolOption(progressOptionName, "p", "Show progress on stderr. Auto-detected if stderr is a terminal."),
|
||||
},
|
||||
Run: dagStat,
|
||||
Type: DagStatSummary{},
|
||||
|
||||
@ -73,6 +73,8 @@ func dagExport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment
|
||||
|
||||
}()
|
||||
|
||||
res.SetEncodingType(cmds.OctetStream)
|
||||
res.SetContentType("application/vnd.ipld.car")
|
||||
if err := res.Emit(pipeR); err != nil {
|
||||
pipeR.Close() // ignore the error if any
|
||||
return err
|
||||
|
||||
@ -11,6 +11,7 @@ import (
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
ipldlegacy "github.com/ipfs/go-ipld-legacy"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/ipfs/kubo/config"
|
||||
"github.com/ipfs/kubo/core/coreiface/options"
|
||||
gocarv2 "github.com/ipld/go-car/v2"
|
||||
@ -19,6 +20,8 @@ import (
|
||||
"github.com/ipfs/kubo/core/commands/cmdutils"
|
||||
)
|
||||
|
||||
var log = logging.Logger("core/commands")
|
||||
|
||||
func dagImport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
node, err := cmdenv.GetNode(env)
|
||||
if err != nil {
|
||||
@ -47,6 +50,12 @@ func dagImport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment
|
||||
|
||||
doPinRoots, _ := req.Options[pinRootsOptionName].(bool)
|
||||
|
||||
fastProvideRoot, fastProvideRootSet := req.Options[fastProvideRootOptionName].(bool)
|
||||
fastProvideWait, fastProvideWaitSet := req.Options[fastProvideWaitOptionName].(bool)
|
||||
|
||||
fastProvideRoot = config.ResolveBoolFromConfig(fastProvideRoot, fastProvideRootSet, cfg.Import.FastProvideRoot, config.DefaultFastProvideRoot)
|
||||
fastProvideWait = config.ResolveBoolFromConfig(fastProvideWait, fastProvideWaitSet, cfg.Import.FastProvideWait, config.DefaultFastProvideWait)
|
||||
|
||||
// grab a pinlock ( which doubles as a GC lock ) so that regardless of the
|
||||
// size of the streamed-in cars nothing will disappear on us before we had
|
||||
// a chance to roots that may show up at the very end
|
||||
@ -191,5 +200,21 @@ func dagImport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment
|
||||
}
|
||||
}
|
||||
|
||||
// Fast-provide roots for faster discovery
|
||||
if fastProvideRoot {
|
||||
err = roots.ForEach(func(c cid.Cid) error {
|
||||
return cmdenv.ExecuteFastProvide(req.Context, node, cfg, c, fastProvideWait, doPinRoots, doPinRoots, false)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if fastProvideWait {
|
||||
log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config", "wait-flag-ignored", true)
|
||||
} else {
|
||||
log.Debugw("fast-provide-root: skipped", "reason", "disabled by flag or config")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -5,6 +5,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
mdag "github.com/ipfs/boxo/ipld/merkledag"
|
||||
"github.com/ipfs/boxo/ipld/merkledag/traverse"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
@ -19,7 +20,11 @@ import (
|
||||
// to compute the new state
|
||||
|
||||
func dagStat(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
progressive := req.Options[progressOptionName].(bool)
|
||||
// Default to true (emit intermediate states) for HTTP/RPC clients that want progress
|
||||
progressive := true
|
||||
if val, specified := req.Options[progressOptionName].(bool); specified {
|
||||
progressive = val
|
||||
}
|
||||
api, err := cmdenv.GetApi(env, req)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -84,6 +89,18 @@ func dagStat(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment)
|
||||
}
|
||||
|
||||
func finishCLIStat(res cmds.Response, re cmds.ResponseEmitter) error {
|
||||
// Determine whether to show progress based on TTY detection or explicit flag
|
||||
var showProgress bool
|
||||
val, specified := res.Request().Options[progressOptionName]
|
||||
if !specified {
|
||||
// Auto-detect: show progress only if stderr is a TTY
|
||||
if errStat, err := os.Stderr.Stat(); err == nil {
|
||||
showProgress = (errStat.Mode() & os.ModeCharDevice) != 0
|
||||
}
|
||||
} else {
|
||||
showProgress = val.(bool)
|
||||
}
|
||||
|
||||
var dagStats *DagStatSummary
|
||||
for {
|
||||
v, err := res.Next()
|
||||
@ -96,17 +113,26 @@ func finishCLIStat(res cmds.Response, re cmds.ResponseEmitter) error {
|
||||
switch out := v.(type) {
|
||||
case *DagStatSummary:
|
||||
dagStats = out
|
||||
if dagStats.Ratio == 0 {
|
||||
length := len(dagStats.DagStatsArray)
|
||||
if length > 0 {
|
||||
currentStat := dagStats.DagStatsArray[length-1]
|
||||
fmt.Fprintf(os.Stderr, "CID: %s, Size: %d, NumBlocks: %d\n", currentStat.Cid, currentStat.Size, currentStat.NumBlocks)
|
||||
// Ratio == 0 means this is a progress update (not final result)
|
||||
if showProgress && dagStats.Ratio == 0 {
|
||||
// Sum up total progress across all DAGs being scanned
|
||||
var totalBlocks int64
|
||||
var totalSize uint64
|
||||
for _, stat := range dagStats.DagStatsArray {
|
||||
totalBlocks += stat.NumBlocks
|
||||
totalSize += stat.Size
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Fetched/Processed %d blocks, %d bytes (%s)\r", totalBlocks, totalSize, humanize.Bytes(totalSize))
|
||||
}
|
||||
default:
|
||||
return e.TypeErr(out, v)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the progress line before final output
|
||||
if showProgress {
|
||||
fmt.Fprint(os.Stderr, "\033[2K\r")
|
||||
}
|
||||
|
||||
return re.Emit(dagStats)
|
||||
}
|
||||
|
||||
@ -56,7 +56,7 @@ var queryDhtCmd = &cmds.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
if nd.DHTClient == nil {
|
||||
if !nd.HasActiveDHTClient() {
|
||||
return ErrNotDHT
|
||||
}
|
||||
|
||||
@ -70,7 +70,7 @@ var queryDhtCmd = &cmds.Command{
|
||||
ctx, events := routing.RegisterForQueryEvents(ctx)
|
||||
|
||||
client := nd.DHTClient
|
||||
if client == nd.DHT {
|
||||
if nd.DHT != nil && client == nd.DHT {
|
||||
client = nd.DHT.WAN
|
||||
if !nd.DHT.WANActive() {
|
||||
client = nd.DHT.LAN
|
||||
|
||||
@ -1,7 +1,16 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/query"
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
oldcmds "github.com/ipfs/kubo/commands"
|
||||
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
|
||||
)
|
||||
|
||||
var DiagCmd = &cmds.Command{
|
||||
@ -10,8 +19,182 @@ var DiagCmd = &cmds.Command{
|
||||
},
|
||||
|
||||
Subcommands: map[string]*cmds.Command{
|
||||
"sys": sysDiagCmd,
|
||||
"cmds": ActiveReqsCmd,
|
||||
"profile": sysProfileCmd,
|
||||
"sys": sysDiagCmd,
|
||||
"cmds": ActiveReqsCmd,
|
||||
"profile": sysProfileCmd,
|
||||
"datastore": diagDatastoreCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var diagDatastoreCmd = &cmds.Command{
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Low-level datastore inspection for debugging and testing.",
|
||||
ShortDescription: `
|
||||
'ipfs diag datastore' provides low-level access to the datastore for debugging
|
||||
and testing purposes.
|
||||
|
||||
WARNING: FOR DEBUGGING/TESTING ONLY
|
||||
|
||||
These commands expose internal datastore details and should not be used
|
||||
in production workflows. The datastore format may change between versions.
|
||||
|
||||
The daemon must not be running when calling these commands.
|
||||
|
||||
EXAMPLE
|
||||
|
||||
Inspecting pubsub seqno validator state:
|
||||
|
||||
$ ipfs diag datastore count /pubsub/seqno/
|
||||
2
|
||||
$ ipfs diag datastore get --hex /pubsub/seqno/12D3KooW...
|
||||
Key: /pubsub/seqno/12D3KooW...
|
||||
Hex Dump:
|
||||
00000000 18 81 81 c8 91 c0 ea f6 |........|
|
||||
`,
|
||||
},
|
||||
Subcommands: map[string]*cmds.Command{
|
||||
"get": diagDatastoreGetCmd,
|
||||
"count": diagDatastoreCountCmd,
|
||||
},
|
||||
}
|
||||
|
||||
const diagDatastoreHexOptionName = "hex"
|
||||
|
||||
type diagDatastoreGetResult struct {
|
||||
Key string `json:"key"`
|
||||
Value []byte `json:"value"`
|
||||
HexDump string `json:"hex_dump,omitempty"`
|
||||
}
|
||||
|
||||
var diagDatastoreGetCmd = &cmds.Command{
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Read a raw key from the datastore.",
|
||||
ShortDescription: `
|
||||
Returns the value stored at the given datastore key.
|
||||
Default output is raw bytes. Use --hex for human-readable hex dump.
|
||||
|
||||
The daemon must not be running when using this command.
|
||||
|
||||
WARNING: FOR DEBUGGING/TESTING ONLY
|
||||
`,
|
||||
},
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("key", true, false, "Datastore key to read (e.g., /pubsub/seqno/<peerid>)"),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(diagDatastoreHexOptionName, "Output hex dump instead of raw bytes"),
|
||||
},
|
||||
NoRemote: true,
|
||||
PreRun: DaemonNotRunning,
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
cctx := env.(*oldcmds.Context)
|
||||
repo, err := fsrepo.Open(cctx.ConfigRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open repo: %w", err)
|
||||
}
|
||||
defer repo.Close()
|
||||
|
||||
keyStr := req.Arguments[0]
|
||||
key := datastore.NewKey(keyStr)
|
||||
ds := repo.Datastore()
|
||||
|
||||
val, err := ds.Get(req.Context, key)
|
||||
if err != nil {
|
||||
if errors.Is(err, datastore.ErrNotFound) {
|
||||
return fmt.Errorf("key not found: %s", keyStr)
|
||||
}
|
||||
return fmt.Errorf("failed to read key: %w", err)
|
||||
}
|
||||
|
||||
result := &diagDatastoreGetResult{
|
||||
Key: keyStr,
|
||||
Value: val,
|
||||
}
|
||||
|
||||
if hexDump, _ := req.Options[diagDatastoreHexOptionName].(bool); hexDump {
|
||||
result.HexDump = hex.Dump(val)
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, result)
|
||||
},
|
||||
Type: diagDatastoreGetResult{},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, result *diagDatastoreGetResult) error {
|
||||
if result.HexDump != "" {
|
||||
fmt.Fprintf(w, "Key: %s\nHex Dump:\n%s", result.Key, result.HexDump)
|
||||
return nil
|
||||
}
|
||||
// Raw bytes output
|
||||
_, err := w.Write(result.Value)
|
||||
return err
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
type diagDatastoreCountResult struct {
|
||||
Prefix string `json:"prefix"`
|
||||
Count int64 `json:"count"`
|
||||
}
|
||||
|
||||
var diagDatastoreCountCmd = &cmds.Command{
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Count entries matching a datastore prefix.",
|
||||
ShortDescription: `
|
||||
Counts the number of datastore entries whose keys start with the given prefix.
|
||||
|
||||
The daemon must not be running when using this command.
|
||||
|
||||
WARNING: FOR DEBUGGING/TESTING ONLY
|
||||
`,
|
||||
},
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("prefix", true, false, "Datastore key prefix (e.g., /pubsub/seqno/)"),
|
||||
},
|
||||
NoRemote: true,
|
||||
PreRun: DaemonNotRunning,
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
cctx := env.(*oldcmds.Context)
|
||||
repo, err := fsrepo.Open(cctx.ConfigRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open repo: %w", err)
|
||||
}
|
||||
defer repo.Close()
|
||||
|
||||
prefix := req.Arguments[0]
|
||||
ds := repo.Datastore()
|
||||
|
||||
q := query.Query{
|
||||
Prefix: prefix,
|
||||
KeysOnly: true,
|
||||
}
|
||||
|
||||
results, err := ds.Query(req.Context, q)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query datastore: %w", err)
|
||||
}
|
||||
defer results.Close()
|
||||
|
||||
var count int64
|
||||
for result := range results.Next() {
|
||||
if result.Error != nil {
|
||||
return fmt.Errorf("query error: %w", result.Error)
|
||||
}
|
||||
count++
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &diagDatastoreCountResult{
|
||||
Prefix: prefix,
|
||||
Count: count,
|
||||
})
|
||||
},
|
||||
Type: diagDatastoreCountResult{},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, result *diagDatastoreCountResult) error {
|
||||
_, err := fmt.Fprintf(w, "%d\n", result.Count)
|
||||
return err
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
@ -6,7 +6,7 @@ import (
|
||||
)
|
||||
|
||||
// TypeErr returns an error with a string that explains what error was expected and what was received.
|
||||
func TypeErr(expected, actual interface{}) error {
|
||||
func TypeErr(expected, actual any) error {
|
||||
return fmt.Errorf("expected type %T, got %T", expected, actual)
|
||||
}
|
||||
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
package commands
|
||||
|
||||
import cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
import (
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
)
|
||||
|
||||
func CreateCmdExtras(opts ...func(e *cmds.Extra)) *cmds.Extra {
|
||||
e := new(cmds.Extra)
|
||||
@ -54,8 +56,8 @@ func GetPreemptsAutoUpdate(e *cmds.Extra) (val bool, found bool) {
|
||||
return getBoolFlag(e, preemptsAutoUpdate{})
|
||||
}
|
||||
|
||||
func getBoolFlag(e *cmds.Extra, key interface{}) (val bool, found bool) {
|
||||
var ival interface{}
|
||||
func getBoolFlag(e *cmds.Extra, key any) (val bool, found bool) {
|
||||
var ival any
|
||||
ival, found = e.GetValue(key)
|
||||
if !found {
|
||||
return false, false
|
||||
|
||||
@ -16,18 +16,24 @@ import (
|
||||
"time"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
oldcmds "github.com/ipfs/kubo/commands"
|
||||
"github.com/ipfs/kubo/config"
|
||||
"github.com/ipfs/kubo/core"
|
||||
"github.com/ipfs/kubo/core/commands/cmdenv"
|
||||
"github.com/ipfs/kubo/core/node"
|
||||
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
|
||||
|
||||
bservice "github.com/ipfs/boxo/blockservice"
|
||||
bstore "github.com/ipfs/boxo/blockstore"
|
||||
offline "github.com/ipfs/boxo/exchange/offline"
|
||||
dag "github.com/ipfs/boxo/ipld/merkledag"
|
||||
ft "github.com/ipfs/boxo/ipld/unixfs"
|
||||
uio "github.com/ipfs/boxo/ipld/unixfs/io"
|
||||
mfs "github.com/ipfs/boxo/mfs"
|
||||
"github.com/ipfs/boxo/path"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
cidenc "github.com/ipfs/go-cidutil/cidenc"
|
||||
"github.com/ipfs/go-datastore"
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
@ -120,18 +126,19 @@ performance.`,
|
||||
cmds.BoolOption(filesFlushOptionName, "f", "Flush target and ancestors after write.").WithDefault(true),
|
||||
},
|
||||
Subcommands: map[string]*cmds.Command{
|
||||
"read": filesReadCmd,
|
||||
"write": filesWriteCmd,
|
||||
"mv": filesMvCmd,
|
||||
"cp": filesCpCmd,
|
||||
"ls": filesLsCmd,
|
||||
"mkdir": filesMkdirCmd,
|
||||
"stat": filesStatCmd,
|
||||
"rm": filesRmCmd,
|
||||
"flush": filesFlushCmd,
|
||||
"chcid": filesChcidCmd,
|
||||
"chmod": filesChmodCmd,
|
||||
"touch": filesTouchCmd,
|
||||
"read": filesReadCmd,
|
||||
"write": filesWriteCmd,
|
||||
"mv": filesMvCmd,
|
||||
"cp": filesCpCmd,
|
||||
"ls": filesLsCmd,
|
||||
"mkdir": filesMkdirCmd,
|
||||
"stat": filesStatCmd,
|
||||
"rm": filesRmCmd,
|
||||
"flush": filesFlushCmd,
|
||||
"chcid": filesChcidCmd,
|
||||
"chmod": filesChmodCmd,
|
||||
"chroot": filesChrootCmd,
|
||||
"touch": filesTouchCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -493,7 +500,12 @@ being GC'ed.
|
||||
return err
|
||||
}
|
||||
|
||||
prefix, err := getPrefixNew(req)
|
||||
cfg, err := nd.Repo.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
prefix, err := getPrefixNew(req, &cfg.Import)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -544,7 +556,9 @@ being GC'ed.
|
||||
|
||||
mkParents, _ := req.Options[filesParentsOptionName].(bool)
|
||||
if mkParents {
|
||||
err := ensureContainingDirectoryExists(nd.FilesRoot, dst, prefix)
|
||||
maxDirLinks := int(cfg.Import.UnixFSDirectoryMaxLinks.WithDefault(config.DefaultUnixFSDirectoryMaxLinks))
|
||||
sizeEstimationMode := cfg.Import.HAMTSizeEstimationMode()
|
||||
err := ensureContainingDirectoryExists(nd.FilesRoot, dst, prefix, maxDirLinks, &sizeEstimationMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -983,9 +997,13 @@ stat' on the file or any of its ancestors.
|
||||
WARNING:
|
||||
|
||||
The CID produced by 'files write' will be different from 'ipfs add' because
|
||||
'ipfs file write' creates a trickle-dag optimized for append-only operations
|
||||
'ipfs files write' creates a trickle-dag optimized for append-only operations.
|
||||
See '--trickle' in 'ipfs add --help' for more information.
|
||||
|
||||
NOTE: The 'Import.UnixFSFileMaxLinks' config option does not apply to this command.
|
||||
Trickle DAG has a fixed internal structure optimized for append operations.
|
||||
To use configurable max-links, use 'ipfs add' with balanced DAG layout.
|
||||
|
||||
If you want to add a file without modifying an existing one,
|
||||
use 'ipfs add' with '--to-files':
|
||||
|
||||
@ -1042,7 +1060,7 @@ See '--to-files' in 'ipfs add --help' for more information.
|
||||
rawLeaves = cfg.Import.UnixFSRawLeaves.WithDefault(config.DefaultUnixFSRawLeaves)
|
||||
}
|
||||
|
||||
prefix, err := getPrefixNew(req)
|
||||
prefix, err := getPrefixNew(req, &cfg.Import)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1053,7 +1071,9 @@ See '--to-files' in 'ipfs add --help' for more information.
|
||||
}
|
||||
|
||||
if mkParents {
|
||||
err := ensureContainingDirectoryExists(nd.FilesRoot, path, prefix)
|
||||
maxDirLinks := int(cfg.Import.UnixFSDirectoryMaxLinks.WithDefault(config.DefaultUnixFSDirectoryMaxLinks))
|
||||
sizeEstimationMode := cfg.Import.HAMTSizeEstimationMode()
|
||||
err := ensureContainingDirectoryExists(nd.FilesRoot, path, prefix, maxDirLinks, &sizeEstimationMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1157,6 +1177,11 @@ Examples:
|
||||
return err
|
||||
}
|
||||
|
||||
cfg, err := n.Repo.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dashp, _ := req.Options[filesParentsOptionName].(bool)
|
||||
dirtomake, err := checkPath(req.Arguments[0])
|
||||
if err != nil {
|
||||
@ -1169,16 +1194,21 @@ Examples:
|
||||
return err
|
||||
}
|
||||
|
||||
prefix, err := getPrefix(req)
|
||||
prefix, err := getPrefix(req, &cfg.Import)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root := n.FilesRoot
|
||||
|
||||
maxDirLinks := int(cfg.Import.UnixFSDirectoryMaxLinks.WithDefault(config.DefaultUnixFSDirectoryMaxLinks))
|
||||
sizeEstimationMode := cfg.Import.HAMTSizeEstimationMode()
|
||||
|
||||
err = mfs.Mkdir(root, dirtomake, mfs.MkdirOpts{
|
||||
Mkparents: dashp,
|
||||
Flush: flush,
|
||||
CidBuilder: prefix,
|
||||
Mkparents: dashp,
|
||||
Flush: flush,
|
||||
CidBuilder: prefix,
|
||||
MaxLinks: maxDirLinks,
|
||||
SizeEstimationMode: &sizeEstimationMode,
|
||||
})
|
||||
|
||||
return err
|
||||
@ -1256,7 +1286,9 @@ Change the CID version or hash function of the root node of a given path.
|
||||
|
||||
flush, _ := req.Options[filesFlushOptionName].(bool)
|
||||
|
||||
prefix, err := getPrefix(req)
|
||||
// Note: files chcid is for explicitly changing CID format, so we don't
|
||||
// fall back to Import config here. If no options are provided, it does nothing.
|
||||
prefix, err := getPrefix(req, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1414,10 +1446,20 @@ func removePath(filesRoot *mfs.Root, path string, force bool, dashr bool) error
|
||||
return pdir.Flush()
|
||||
}
|
||||
|
||||
func getPrefixNew(req *cmds.Request) (cid.Builder, error) {
|
||||
func getPrefixNew(req *cmds.Request, importCfg *config.Import) (cid.Builder, error) {
|
||||
cidVer, cidVerSet := req.Options[filesCidVersionOptionName].(int)
|
||||
hashFunStr, hashFunSet := req.Options[filesHashOptionName].(string)
|
||||
|
||||
// Fall back to Import config if CLI options not set
|
||||
if !cidVerSet && importCfg != nil && !importCfg.CidVersion.IsDefault() {
|
||||
cidVer = int(importCfg.CidVersion.WithDefault(config.DefaultCidVersion))
|
||||
cidVerSet = true
|
||||
}
|
||||
if !hashFunSet && importCfg != nil && !importCfg.HashFunction.IsDefault() {
|
||||
hashFunStr = importCfg.HashFunction.WithDefault(config.DefaultHashFunction)
|
||||
hashFunSet = true
|
||||
}
|
||||
|
||||
if !cidVerSet && !hashFunSet {
|
||||
return nil, nil
|
||||
}
|
||||
@ -1443,10 +1485,20 @@ func getPrefixNew(req *cmds.Request) (cid.Builder, error) {
|
||||
return &prefix, nil
|
||||
}
|
||||
|
||||
func getPrefix(req *cmds.Request) (cid.Builder, error) {
|
||||
func getPrefix(req *cmds.Request, importCfg *config.Import) (cid.Builder, error) {
|
||||
cidVer, cidVerSet := req.Options[filesCidVersionOptionName].(int)
|
||||
hashFunStr, hashFunSet := req.Options[filesHashOptionName].(string)
|
||||
|
||||
// Fall back to Import config if CLI options not set
|
||||
if !cidVerSet && importCfg != nil && !importCfg.CidVersion.IsDefault() {
|
||||
cidVer = int(importCfg.CidVersion.WithDefault(config.DefaultCidVersion))
|
||||
cidVerSet = true
|
||||
}
|
||||
if !hashFunSet && importCfg != nil && !importCfg.HashFunction.IsDefault() {
|
||||
hashFunStr = importCfg.HashFunction.WithDefault(config.DefaultHashFunction)
|
||||
hashFunSet = true
|
||||
}
|
||||
|
||||
if !cidVerSet && !hashFunSet {
|
||||
return nil, nil
|
||||
}
|
||||
@ -1472,7 +1524,7 @@ func getPrefix(req *cmds.Request) (cid.Builder, error) {
|
||||
return &prefix, nil
|
||||
}
|
||||
|
||||
func ensureContainingDirectoryExists(r *mfs.Root, path string, builder cid.Builder) error {
|
||||
func ensureContainingDirectoryExists(r *mfs.Root, path string, builder cid.Builder, maxLinks int, sizeEstimationMode *uio.SizeEstimationMode) error {
|
||||
dirtomake := gopath.Dir(path)
|
||||
|
||||
if dirtomake == "/" {
|
||||
@ -1480,8 +1532,10 @@ func ensureContainingDirectoryExists(r *mfs.Root, path string, builder cid.Build
|
||||
}
|
||||
|
||||
return mfs.Mkdir(r, dirtomake, mfs.MkdirOpts{
|
||||
Mkparents: true,
|
||||
CidBuilder: builder,
|
||||
Mkparents: true,
|
||||
CidBuilder: builder,
|
||||
MaxLinks: maxLinks,
|
||||
SizeEstimationMode: sizeEstimationMode,
|
||||
})
|
||||
}
|
||||
|
||||
@ -1648,3 +1702,141 @@ Examples:
|
||||
return mfs.Touch(nd.FilesRoot, path, ts)
|
||||
},
|
||||
}
|
||||
|
||||
const chrootConfirmOptionName = "confirm"
|
||||
|
||||
var filesChrootCmd = &cmds.Command{
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Change the MFS root CID.",
|
||||
ShortDescription: `
|
||||
'ipfs files chroot' changes the root CID used by MFS (Mutable File System).
|
||||
This is a recovery command for when MFS becomes corrupted and prevents the
|
||||
daemon from starting.
|
||||
|
||||
When run without a CID argument, resets MFS to an empty directory.
|
||||
|
||||
WARNING: The old MFS root and its unpinned children will be removed during
|
||||
the next garbage collection. Pin the old root first if you want to preserve.
|
||||
|
||||
This command can only run when the daemon is not running.
|
||||
|
||||
Examples:
|
||||
|
||||
# Reset MFS to empty directory (recovery from corruption)
|
||||
$ ipfs files chroot --confirm
|
||||
|
||||
# Restore MFS to a known good directory CID
|
||||
$ ipfs files chroot --confirm QmYourBackupCID
|
||||
`,
|
||||
},
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("cid", false, false, "New root CID (defaults to empty directory if not specified)."),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(chrootConfirmOptionName, "Confirm this potentially destructive operation."),
|
||||
},
|
||||
NoRemote: true,
|
||||
Extra: CreateCmdExtras(SetDoesNotUseRepo(true)),
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
confirm, _ := req.Options[chrootConfirmOptionName].(bool)
|
||||
if !confirm {
|
||||
return errors.New("this is a potentially destructive operation; pass --confirm to proceed")
|
||||
}
|
||||
|
||||
// Determine new root CID
|
||||
var newRootCid cid.Cid
|
||||
if len(req.Arguments) > 0 {
|
||||
var err error
|
||||
newRootCid, err = cid.Decode(req.Arguments[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid CID %q: %w", req.Arguments[0], err)
|
||||
}
|
||||
} else {
|
||||
// Default to empty directory
|
||||
newRootCid = ft.EmptyDirNode().Cid()
|
||||
}
|
||||
|
||||
// Get config root to open repo directly
|
||||
cctx := env.(*oldcmds.Context)
|
||||
cfgRoot := cctx.ConfigRoot
|
||||
|
||||
// Open repo directly (daemon must not be running)
|
||||
repo, err := fsrepo.Open(cfgRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening repo (is the daemon running?): %w", err)
|
||||
}
|
||||
defer repo.Close()
|
||||
|
||||
localDS := repo.Datastore()
|
||||
bs := bstore.NewBlockstore(localDS)
|
||||
|
||||
// Check new root exists locally and is a directory
|
||||
hasBlock, err := bs.Has(req.Context, newRootCid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking if new root exists: %w", err)
|
||||
}
|
||||
if !hasBlock {
|
||||
// Special case: empty dir is always available (hardcoded in boxo)
|
||||
emptyDirCid := ft.EmptyDirNode().Cid()
|
||||
if !newRootCid.Equals(emptyDirCid) {
|
||||
return fmt.Errorf("new root %s does not exist locally; fetch it first with 'ipfs block get'", newRootCid)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate it's a directory (not a file)
|
||||
if hasBlock {
|
||||
blk, err := bs.Get(req.Context, newRootCid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading new root block: %w", err)
|
||||
}
|
||||
pbNode, err := dag.DecodeProtobuf(blk.RawData())
|
||||
if err != nil {
|
||||
return fmt.Errorf("new root is not a valid dag-pb node: %w", err)
|
||||
}
|
||||
fsNode, err := ft.FSNodeFromBytes(pbNode.Data())
|
||||
if err != nil {
|
||||
return fmt.Errorf("new root is not a valid UnixFS node: %w", err)
|
||||
}
|
||||
if fsNode.Type() != ft.TDirectory && fsNode.Type() != ft.THAMTShard {
|
||||
return fmt.Errorf("new root must be a directory, got %s", fsNode.Type())
|
||||
}
|
||||
}
|
||||
|
||||
// Get old root for display (if exists)
|
||||
var oldRootStr string
|
||||
oldRootBytes, err := localDS.Get(req.Context, node.FilesRootDatastoreKey)
|
||||
if err == nil {
|
||||
oldRootCid, err := cid.Cast(oldRootBytes)
|
||||
if err == nil {
|
||||
oldRootStr = oldRootCid.String()
|
||||
}
|
||||
} else if !errors.Is(err, datastore.ErrNotFound) {
|
||||
return fmt.Errorf("reading current MFS root: %w", err)
|
||||
}
|
||||
|
||||
// Write new root
|
||||
err = localDS.Put(req.Context, node.FilesRootDatastoreKey, newRootCid.Bytes())
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing new MFS root: %w", err)
|
||||
}
|
||||
|
||||
// Build output message
|
||||
var msg string
|
||||
if oldRootStr != "" {
|
||||
msg = fmt.Sprintf("MFS root changed from %s to %s\n", oldRootStr, newRootCid)
|
||||
msg += fmt.Sprintf("The old root %s will be garbage collected unless pinned.\n", oldRootStr)
|
||||
} else {
|
||||
msg = fmt.Sprintf("MFS root set to %s\n", newRootCid)
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &MessageOutput{Message: msg})
|
||||
},
|
||||
Type: MessageOutput{},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *MessageOutput) error {
|
||||
_, err := fmt.Fprint(w, out.Message)
|
||||
return err
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
@ -12,8 +11,7 @@ import (
|
||||
)
|
||||
|
||||
func TestFilesCp_DagCborNodeFails(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
ctx := t.Context()
|
||||
|
||||
cmdCtx, err := coremock.MockCmdsCtx()
|
||||
require.NoError(t, err)
|
||||
@ -32,7 +30,7 @@ func TestFilesCp_DagCborNodeFails(t *testing.T) {
|
||||
"/ipfs/" + protoNode.Cid().String(),
|
||||
"/test-destination",
|
||||
},
|
||||
Options: map[string]interface{}{
|
||||
Options: map[string]any{
|
||||
"force": false,
|
||||
},
|
||||
}
|
||||
|
||||
@ -85,7 +85,7 @@ The output is:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return streamResult(func(v interface{}, out io.Writer) nonFatalError {
|
||||
return streamResult(func(v any, out io.Writer) nonFatalError {
|
||||
r := v.(*filestore.ListRes)
|
||||
if r.ErrorMsg != "" {
|
||||
return nonFatalError(r.ErrorMsg)
|
||||
|
||||
@ -45,6 +45,9 @@ To output a TAR archive instead of unpacked files, use '--archive' or '-a'.
|
||||
To compress the output with GZIP compression, use '--compress' or '-C'. You
|
||||
may also specify the level of compression by specifying '-l=<1-9>'.
|
||||
`,
|
||||
HTTP: &cmds.HTTPHelpText{
|
||||
ResponseContentType: "application/x-tar, or application/gzip when compress=true",
|
||||
},
|
||||
},
|
||||
|
||||
Arguments: []cmds.Argument{
|
||||
@ -103,6 +106,16 @@ may also specify the level of compression by specifying '-l=<1-9>'.
|
||||
reader.Close()
|
||||
}()
|
||||
|
||||
// Set Content-Type based on output format.
|
||||
// When compression is enabled, output is gzip (or tar.gz for directories).
|
||||
// Otherwise, tar is used as the transport format.
|
||||
res.SetEncodingType(cmds.OctetStream)
|
||||
if cmplvl != gzip.NoCompression {
|
||||
res.SetContentType("application/gzip")
|
||||
} else {
|
||||
res.SetContentType("application/x-tar")
|
||||
}
|
||||
|
||||
return res.Emit(reader)
|
||||
},
|
||||
PostRun: cmds.PostRunMap{
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
@ -16,7 +15,7 @@ func TestGetOutputPath(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
args: []string{"/ipns/multiformats.io/"},
|
||||
opts: map[string]interface{}{
|
||||
opts: map[string]any{
|
||||
"output": "takes-precedence",
|
||||
},
|
||||
outPath: "takes-precedence",
|
||||
@ -52,8 +51,7 @@ func TestGetOutputPath(t *testing.T) {
|
||||
|
||||
for i, tc := range cases {
|
||||
t.Run(fmt.Sprintf("%s-%d", t.Name(), i), func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
ctx := t.Context()
|
||||
|
||||
req, err := cmds.NewRequest(ctx, []string{}, tc.opts, tc.args, nil, GetCmd)
|
||||
if err != nil {
|
||||
|
||||
@ -146,7 +146,7 @@ EXAMPLE:
|
||||
Type: IdOutput{},
|
||||
}
|
||||
|
||||
func printPeer(keyEnc ke.KeyEncoder, ps pstore.Peerstore, p peer.ID) (interface{}, error) {
|
||||
func printPeer(keyEnc ke.KeyEncoder, ps pstore.Peerstore, p peer.ID) (any, error) {
|
||||
if p == "" {
|
||||
return nil, errors.New("attempted to print nil peer")
|
||||
}
|
||||
@ -189,7 +189,7 @@ func printPeer(keyEnc ke.KeyEncoder, ps pstore.Peerstore, p peer.ID) (interface{
|
||||
}
|
||||
|
||||
// printing self is special cased as we get values differently.
|
||||
func printSelf(keyEnc ke.KeyEncoder, node *core.IpfsNode) (interface{}, error) {
|
||||
func printSelf(keyEnc ke.KeyEncoder, node *core.IpfsNode) (any, error) {
|
||||
info := new(IdOutput)
|
||||
info.ID = keyEnc.FormatID(node.Identity)
|
||||
|
||||
|
||||
@ -38,9 +38,9 @@ publish'.
|
||||
> ipfs key gen --type=rsa --size=2048 mykey
|
||||
> ipfs name publish --key=mykey QmSomeHash
|
||||
|
||||
'ipfs key list' lists the available keys.
|
||||
'ipfs key ls' lists the available keys.
|
||||
|
||||
> ipfs key list
|
||||
> ipfs key ls
|
||||
self
|
||||
mykey
|
||||
`,
|
||||
@ -49,7 +49,8 @@ publish'.
|
||||
"gen": keyGenCmd,
|
||||
"export": keyExportCmd,
|
||||
"import": keyImportCmd,
|
||||
"list": keyListCmd,
|
||||
"list": keyListDeprecatedCmd,
|
||||
"ls": keyListCmd,
|
||||
"rename": keyRenameCmd,
|
||||
"rm": keyRmCmd,
|
||||
"rotate": keyRotateCmd,
|
||||
@ -458,7 +459,7 @@ var keyListCmd = &cmds.Command{
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
keyEnc, err := ke.KeyEncoderFromString(req.Options[ke.OptionIPNSBase.Name()].(string))
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("cannot get key encoder: %w", err)
|
||||
}
|
||||
|
||||
api, err := cmdenv.GetApi(env, req)
|
||||
@ -468,7 +469,7 @@ var keyListCmd = &cmds.Command{
|
||||
|
||||
keys, err := api.Key().List(req.Context)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("listing keys failed: %w", err)
|
||||
}
|
||||
|
||||
list := make([]KeyOutput, 0, len(keys))
|
||||
@ -488,6 +489,17 @@ var keyListCmd = &cmds.Command{
|
||||
Type: KeyOutputList{},
|
||||
}
|
||||
|
||||
var keyListDeprecatedCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Deprecated: use 'ipfs key ls' instead.",
|
||||
},
|
||||
Options: keyListCmd.Options,
|
||||
Run: keyListCmd.Run,
|
||||
Encoders: keyListCmd.Encoders,
|
||||
Type: keyListCmd.Type,
|
||||
}
|
||||
|
||||
const (
|
||||
keyStoreForceOptionName = "force"
|
||||
)
|
||||
@ -773,7 +785,7 @@ the signed payload is always prefixed with "libp2p-key signed message:".
|
||||
`,
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.StringOption("key", "k", "The name of the key to use for signing."),
|
||||
cmds.StringOption("key", "k", "The name of the key to use for verifying."),
|
||||
cmds.StringOption("signature", "s", "Multibase-encoded signature to verify."),
|
||||
ke.OptionIPNSBase,
|
||||
},
|
||||
|
||||
@ -48,6 +48,7 @@ const (
|
||||
lsResolveTypeOptionName = "resolve-type"
|
||||
lsSizeOptionName = "size"
|
||||
lsStreamOptionName = "stream"
|
||||
lsLongOptionName = "long"
|
||||
)
|
||||
|
||||
var LsCmd = &cmds.Command{
|
||||
@ -57,7 +58,26 @@ var LsCmd = &cmds.Command{
|
||||
Displays the contents of an IPFS or IPNS object(s) at the given path, with
|
||||
the following format:
|
||||
|
||||
<link base58 hash> <link size in bytes> <link name>
|
||||
<cid> <size> <name>
|
||||
|
||||
With the --long (-l) option, display optional file mode (permissions) and
|
||||
modification time in a format similar to Unix 'ls -l':
|
||||
|
||||
<mode> <cid> <size> <mtime> <name>
|
||||
|
||||
Mode and mtime are optional UnixFS metadata. They are only present if the
|
||||
content was imported with 'ipfs add --preserve-mode' and '--preserve-mtime'.
|
||||
Without preserved metadata, both mode and mtime display '-'. Times are in UTC.
|
||||
|
||||
Example with --long and preserved metadata:
|
||||
|
||||
-rw-r--r-- QmZULkCELmmk5XNf... 1234 Jan 15 10:30 document.txt
|
||||
-rwxr-xr-x QmaRGe7bVmVaLmxb... 5678 Dec 01 2023 script.sh
|
||||
drwxr-xr-x QmWWEQhcLufF3qPm... - Nov 20 2023 subdir/
|
||||
|
||||
Example with --long without preserved metadata:
|
||||
|
||||
- QmZULkCELmmk5XNf... 1234 - document.txt
|
||||
|
||||
The JSON output contains type information.
|
||||
`,
|
||||
@ -71,6 +91,7 @@ The JSON output contains type information.
|
||||
cmds.BoolOption(lsResolveTypeOptionName, "Resolve linked objects to find out their types.").WithDefault(true),
|
||||
cmds.BoolOption(lsSizeOptionName, "Resolve linked objects to find out their file size.").WithDefault(true),
|
||||
cmds.BoolOption(lsStreamOptionName, "s", "Enable experimental streaming of directory entries as they are traversed."),
|
||||
cmds.BoolOption(lsLongOptionName, "l", "Use a long listing format, showing file mode and modification time."),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
api, err := cmdenv.GetApi(env, req)
|
||||
@ -215,10 +236,121 @@ The JSON output contains type information.
|
||||
Type: LsOutput{},
|
||||
}
|
||||
|
||||
// formatMode converts os.FileMode to a 10-character Unix ls-style string.
|
||||
//
|
||||
// Format: [type][owner rwx][group rwx][other rwx]
|
||||
//
|
||||
// Type indicators: - (regular), d (directory), l (symlink), p (named pipe),
|
||||
// s (socket), c (char device), b (block device).
|
||||
//
|
||||
// Special bits replace the execute position: setuid on owner (s/S),
|
||||
// setgid on group (s/S), sticky on other (t/T). Lowercase when the
|
||||
// underlying execute bit is also set, uppercase when not.
|
||||
func formatMode(mode os.FileMode) string {
|
||||
var buf [10]byte
|
||||
|
||||
// File type - handle all special file types like ls does
|
||||
switch {
|
||||
case mode&os.ModeDir != 0:
|
||||
buf[0] = 'd'
|
||||
case mode&os.ModeSymlink != 0:
|
||||
buf[0] = 'l'
|
||||
case mode&os.ModeNamedPipe != 0:
|
||||
buf[0] = 'p'
|
||||
case mode&os.ModeSocket != 0:
|
||||
buf[0] = 's'
|
||||
case mode&os.ModeDevice != 0:
|
||||
if mode&os.ModeCharDevice != 0 {
|
||||
buf[0] = 'c'
|
||||
} else {
|
||||
buf[0] = 'b'
|
||||
}
|
||||
default:
|
||||
buf[0] = '-'
|
||||
}
|
||||
|
||||
// Owner permissions (bits 8,7,6)
|
||||
buf[1] = permBit(mode, 0400, 'r') // read
|
||||
buf[2] = permBit(mode, 0200, 'w') // write
|
||||
// Handle setuid bit for owner execute
|
||||
if mode&os.ModeSetuid != 0 {
|
||||
if mode&0100 != 0 {
|
||||
buf[3] = 's'
|
||||
} else {
|
||||
buf[3] = 'S'
|
||||
}
|
||||
} else {
|
||||
buf[3] = permBit(mode, 0100, 'x') // execute
|
||||
}
|
||||
|
||||
// Group permissions (bits 5,4,3)
|
||||
buf[4] = permBit(mode, 0040, 'r') // read
|
||||
buf[5] = permBit(mode, 0020, 'w') // write
|
||||
// Handle setgid bit for group execute
|
||||
if mode&os.ModeSetgid != 0 {
|
||||
if mode&0010 != 0 {
|
||||
buf[6] = 's'
|
||||
} else {
|
||||
buf[6] = 'S'
|
||||
}
|
||||
} else {
|
||||
buf[6] = permBit(mode, 0010, 'x') // execute
|
||||
}
|
||||
|
||||
// Other permissions (bits 2,1,0)
|
||||
buf[7] = permBit(mode, 0004, 'r') // read
|
||||
buf[8] = permBit(mode, 0002, 'w') // write
|
||||
// Handle sticky bit for other execute
|
||||
if mode&os.ModeSticky != 0 {
|
||||
if mode&0001 != 0 {
|
||||
buf[9] = 't'
|
||||
} else {
|
||||
buf[9] = 'T'
|
||||
}
|
||||
} else {
|
||||
buf[9] = permBit(mode, 0001, 'x') // execute
|
||||
}
|
||||
|
||||
return string(buf[:])
|
||||
}
|
||||
|
||||
// permBit returns the permission character if the bit is set.
|
||||
func permBit(mode os.FileMode, bit os.FileMode, char byte) byte {
|
||||
if mode&bit != 0 {
|
||||
return char
|
||||
}
|
||||
return '-'
|
||||
}
|
||||
|
||||
// formatModTime formats time.Time for display, following Unix ls conventions.
|
||||
//
|
||||
// Returns "-" for zero time. Otherwise returns a 12-character string:
|
||||
// recent files (within 6 months) show "Jan 02 15:04",
|
||||
// older or future files show "Jan 02 2006".
|
||||
//
|
||||
// The output uses the timezone embedded in t (UTC for IPFS metadata).
|
||||
func formatModTime(t time.Time) string {
|
||||
if t.IsZero() {
|
||||
return "-"
|
||||
}
|
||||
|
||||
// Format: "Jan 02 15:04" for times within the last 6 months
|
||||
// Format: "Jan 02 2006" for older times (similar to ls)
|
||||
now := time.Now()
|
||||
sixMonthsAgo := now.AddDate(0, -6, 0)
|
||||
|
||||
if t.After(sixMonthsAgo) && t.Before(now.Add(24*time.Hour)) {
|
||||
return t.Format("Jan 02 15:04")
|
||||
}
|
||||
return t.Format("Jan 02 2006")
|
||||
}
|
||||
|
||||
func tabularOutput(req *cmds.Request, w io.Writer, out *LsOutput, lastObjectHash string, ignoreBreaks bool) string {
|
||||
headers, _ := req.Options[lsHeadersOptionNameTime].(bool)
|
||||
stream, _ := req.Options[lsStreamOptionName].(bool)
|
||||
size, _ := req.Options[lsSizeOptionName].(bool)
|
||||
long, _ := req.Options[lsLongOptionName].(bool)
|
||||
|
||||
// in streaming mode we can't automatically align the tabs
|
||||
// so we take a best guess
|
||||
var minTabWidth int
|
||||
@ -242,9 +374,21 @@ func tabularOutput(req *cmds.Request, w io.Writer, out *LsOutput, lastObjectHash
|
||||
fmt.Fprintf(tw, "%s:\n", object.Hash)
|
||||
}
|
||||
if headers {
|
||||
s := "Hash\tName"
|
||||
if size {
|
||||
s = "Hash\tSize\tName"
|
||||
var s string
|
||||
if long {
|
||||
// Long format: Mode Hash [Size] ModTime Name
|
||||
if size {
|
||||
s = "Mode\tHash\tSize\tModTime\tName"
|
||||
} else {
|
||||
s = "Mode\tHash\tModTime\tName"
|
||||
}
|
||||
} else {
|
||||
// Standard format: Hash [Size] Name
|
||||
if size {
|
||||
s = "Hash\tSize\tName"
|
||||
} else {
|
||||
s = "Hash\tName"
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(tw, s)
|
||||
}
|
||||
@ -253,23 +397,54 @@ func tabularOutput(req *cmds.Request, w io.Writer, out *LsOutput, lastObjectHash
|
||||
|
||||
for _, link := range object.Links {
|
||||
var s string
|
||||
switch link.Type {
|
||||
case unixfs.TDirectory, unixfs.THAMTShard, unixfs.TMetadata:
|
||||
if size {
|
||||
s = "%[1]s\t-\t%[3]s/\n"
|
||||
} else {
|
||||
s = "%[1]s\t%[3]s/\n"
|
||||
}
|
||||
default:
|
||||
if size {
|
||||
s = "%s\t%v\t%s\n"
|
||||
} else {
|
||||
s = "%[1]s\t%[3]s\n"
|
||||
}
|
||||
}
|
||||
isDir := link.Type == unixfs.TDirectory || link.Type == unixfs.THAMTShard || link.Type == unixfs.TMetadata
|
||||
|
||||
// TODO: Print link.Mode and link.ModTime?
|
||||
fmt.Fprintf(tw, s, link.Hash, link.Size, cmdenv.EscNonPrint(link.Name))
|
||||
if long {
|
||||
// Long format: Mode Hash Size ModTime Name
|
||||
var mode string
|
||||
if link.Mode == 0 {
|
||||
// No mode metadata preserved. Show "-" to indicate
|
||||
// "not available" rather than "----------" (mode 0000).
|
||||
mode = "-"
|
||||
} else {
|
||||
mode = formatMode(link.Mode)
|
||||
}
|
||||
modTime := formatModTime(link.ModTime)
|
||||
|
||||
if isDir {
|
||||
if size {
|
||||
s = "%s\t%s\t-\t%s\t%s/\n"
|
||||
} else {
|
||||
s = "%s\t%s\t%s\t%s/\n"
|
||||
}
|
||||
fmt.Fprintf(tw, s, mode, link.Hash, modTime, cmdenv.EscNonPrint(link.Name))
|
||||
} else {
|
||||
if size {
|
||||
s = "%s\t%s\t%v\t%s\t%s\n"
|
||||
fmt.Fprintf(tw, s, mode, link.Hash, link.Size, modTime, cmdenv.EscNonPrint(link.Name))
|
||||
} else {
|
||||
s = "%s\t%s\t%s\t%s\n"
|
||||
fmt.Fprintf(tw, s, mode, link.Hash, modTime, cmdenv.EscNonPrint(link.Name))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Standard format: Hash [Size] Name
|
||||
switch {
|
||||
case isDir:
|
||||
if size {
|
||||
s = "%[1]s\t-\t%[3]s/\n"
|
||||
} else {
|
||||
s = "%[1]s\t%[3]s/\n"
|
||||
}
|
||||
default:
|
||||
if size {
|
||||
s = "%s\t%v\t%s\n"
|
||||
} else {
|
||||
s = "%[1]s\t%[3]s\n"
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(tw, s, link.Hash, link.Size, cmdenv.EscNonPrint(link.Name))
|
||||
}
|
||||
}
|
||||
}
|
||||
tw.Flush()
|
||||
|
||||
189
core/commands/ls_test.go
Normal file
189
core/commands/ls_test.go
Normal file
@ -0,0 +1,189 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFormatMode(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
mode os.FileMode
|
||||
expected string
|
||||
}{
|
||||
// File types
|
||||
{
|
||||
name: "regular file with rw-r--r--",
|
||||
mode: 0644,
|
||||
expected: "-rw-r--r--",
|
||||
},
|
||||
{
|
||||
name: "regular file with rwxr-xr-x",
|
||||
mode: 0755,
|
||||
expected: "-rwxr-xr-x",
|
||||
},
|
||||
{
|
||||
name: "regular file with no permissions",
|
||||
mode: 0,
|
||||
expected: "----------",
|
||||
},
|
||||
{
|
||||
name: "regular file with full permissions",
|
||||
mode: 0777,
|
||||
expected: "-rwxrwxrwx",
|
||||
},
|
||||
{
|
||||
name: "directory with rwxr-xr-x",
|
||||
mode: os.ModeDir | 0755,
|
||||
expected: "drwxr-xr-x",
|
||||
},
|
||||
{
|
||||
name: "directory with rwx------",
|
||||
mode: os.ModeDir | 0700,
|
||||
expected: "drwx------",
|
||||
},
|
||||
{
|
||||
name: "symlink with rwxrwxrwx",
|
||||
mode: os.ModeSymlink | 0777,
|
||||
expected: "lrwxrwxrwx",
|
||||
},
|
||||
{
|
||||
name: "named pipe with rw-r--r--",
|
||||
mode: os.ModeNamedPipe | 0644,
|
||||
expected: "prw-r--r--",
|
||||
},
|
||||
{
|
||||
name: "socket with rw-rw-rw-",
|
||||
mode: os.ModeSocket | 0666,
|
||||
expected: "srw-rw-rw-",
|
||||
},
|
||||
{
|
||||
name: "block device with rw-rw----",
|
||||
mode: os.ModeDevice | 0660,
|
||||
expected: "brw-rw----",
|
||||
},
|
||||
{
|
||||
name: "character device with rw-rw-rw-",
|
||||
mode: os.ModeDevice | os.ModeCharDevice | 0666,
|
||||
expected: "crw-rw-rw-",
|
||||
},
|
||||
|
||||
// Special permission bits - setuid
|
||||
{
|
||||
name: "setuid with execute",
|
||||
mode: os.ModeSetuid | 0755,
|
||||
expected: "-rwsr-xr-x",
|
||||
},
|
||||
{
|
||||
name: "setuid without execute",
|
||||
mode: os.ModeSetuid | 0644,
|
||||
expected: "-rwSr--r--",
|
||||
},
|
||||
|
||||
// Special permission bits - setgid
|
||||
{
|
||||
name: "setgid with execute",
|
||||
mode: os.ModeSetgid | 0755,
|
||||
expected: "-rwxr-sr-x",
|
||||
},
|
||||
{
|
||||
name: "setgid without execute",
|
||||
mode: os.ModeSetgid | 0745,
|
||||
expected: "-rwxr-Sr-x",
|
||||
},
|
||||
|
||||
// Special permission bits - sticky
|
||||
{
|
||||
name: "sticky with execute",
|
||||
mode: os.ModeSticky | 0755,
|
||||
expected: "-rwxr-xr-t",
|
||||
},
|
||||
{
|
||||
name: "sticky without execute",
|
||||
mode: os.ModeSticky | 0754,
|
||||
expected: "-rwxr-xr-T",
|
||||
},
|
||||
|
||||
// Combined special bits
|
||||
{
|
||||
name: "setuid + setgid + sticky all with execute",
|
||||
mode: os.ModeSetuid | os.ModeSetgid | os.ModeSticky | 0777,
|
||||
expected: "-rwsrwsrwt",
|
||||
},
|
||||
{
|
||||
name: "setuid + setgid + sticky none with execute",
|
||||
mode: os.ModeSetuid | os.ModeSetgid | os.ModeSticky | 0666,
|
||||
expected: "-rwSrwSrwT",
|
||||
},
|
||||
|
||||
// Directory with special bits
|
||||
{
|
||||
name: "directory with sticky bit",
|
||||
mode: os.ModeDir | os.ModeSticky | 0755,
|
||||
expected: "drwxr-xr-t",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
result := formatMode(tc.mode)
|
||||
assert.Equal(t, tc.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatModTime(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("zero time returns dash", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
result := formatModTime(time.Time{})
|
||||
assert.Equal(t, "-", result)
|
||||
})
|
||||
|
||||
t.Run("old time shows year format", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Use a time clearly in the past (more than 6 months ago)
|
||||
oldTime := time.Date(2020, time.March, 15, 10, 30, 0, 0, time.UTC)
|
||||
result := formatModTime(oldTime)
|
||||
// Format: "Jan 02 2006" (note: two spaces before year)
|
||||
assert.Equal(t, "Mar 15 2020", result)
|
||||
})
|
||||
|
||||
t.Run("very old time shows year format", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
veryOldTime := time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||
result := formatModTime(veryOldTime)
|
||||
assert.Equal(t, "Jan 01 2000", result)
|
||||
})
|
||||
|
||||
t.Run("future time shows year format", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Times more than 24h in the future should show year format
|
||||
futureTime := time.Now().AddDate(1, 0, 0)
|
||||
result := formatModTime(futureTime)
|
||||
// Should contain the future year
|
||||
assert.Contains(t, result, " ") // two spaces before year
|
||||
assert.Regexp(t, `^[A-Z][a-z]{2} \d{2} \d{4}$`, result) // matches "Mon DD YYYY"
|
||||
assert.Contains(t, result, futureTime.Format("2006")) // contains the year
|
||||
})
|
||||
|
||||
t.Run("format lengths are consistent", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Both formats should produce 12-character strings for alignment
|
||||
oldTime := time.Date(2020, time.March, 15, 10, 30, 0, 0, time.UTC)
|
||||
oldResult := formatModTime(oldTime)
|
||||
assert.Len(t, oldResult, 12, "old time format should be 12 chars")
|
||||
|
||||
// Recent time: use 1 month ago to ensure it's always within the 6-month window
|
||||
recentTime := time.Now().AddDate(0, -1, 0)
|
||||
recentResult := formatModTime(recentTime)
|
||||
assert.Len(t, recentResult, 12, "recent time format should be 12 chars")
|
||||
})
|
||||
}
|
||||
@ -3,15 +3,18 @@ package name
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/boxo/ipns"
|
||||
ipns_pb "github.com/ipfs/boxo/ipns/pb"
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
|
||||
"github.com/ipfs/kubo/core/commands/cmdenv"
|
||||
"github.com/ipfs/kubo/core/coreiface/options"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@ -42,29 +45,30 @@ Examples:
|
||||
|
||||
Publish an <ipfs-path> with your default name:
|
||||
|
||||
> ipfs name publish /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
Published to QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
> ipfs name publish /ipfs/bafkreifjjcie6lypi6ny7amxnfftagclbuxndqonfipmb64f2km2devei4
|
||||
Published to k51qzi5uqu5dgklc20hksmmzhoy5lfrn5xcnryq6xp4r50b5yc0vnivpywfu9p: /ipfs/bafk...
|
||||
|
||||
Publish an <ipfs-path> with another name, added by an 'ipfs key' command:
|
||||
|
||||
> ipfs key gen --type=rsa --size=2048 mykey
|
||||
> ipfs name publish --key=mykey /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
Published to QmSrPmbaUKA3ZodhzPWZnpFgcPMFWF4QsxXbkWfEptTBJd: /ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
> ipfs key gen --type=ed25519 mykey
|
||||
k51qzi5uqu5dlz49qkb657myg6f1buu6rauv8c6b489a9i1e4dkt7a3yo9j2wr
|
||||
> ipfs name publish --key=mykey /ipfs/bafkreifjjcie6lypi6ny7amxnfftagclbuxndqonfipmb64f2km2devei4
|
||||
Published to k51qzi5uqu5dlz49qkb657myg6f1buu6rauv8c6b489a9i1e4dkt7a3yo9j2wr: /ipfs/bafk...
|
||||
|
||||
Resolve the value of your name:
|
||||
|
||||
> ipfs name resolve
|
||||
/ipfs/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy
|
||||
/ipfs/bafk...
|
||||
|
||||
Resolve the value of another name:
|
||||
|
||||
> ipfs name resolve QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ
|
||||
/ipfs/QmSiTko9JZyabH56y2fussEt1A5oDqsFXB3CkvAqraFryz
|
||||
> ipfs name resolve k51qzi5uqu5dlz49qkb657myg6f1buu6rauv8c6b489a9i1e4dkt7a3yo9j2wr
|
||||
/ipfs/bafk...
|
||||
|
||||
Resolve the value of a dnslink:
|
||||
|
||||
> ipfs name resolve ipfs.io
|
||||
/ipfs/QmaBvfZooxWkrv7D3r8LS9moNjzD2o525XMZze69hhoxf5
|
||||
> ipfs name resolve specs.ipfs.tech
|
||||
/ipfs/bafy...
|
||||
|
||||
`,
|
||||
},
|
||||
@ -74,6 +78,8 @@ Resolve the value of a dnslink:
|
||||
"resolve": IpnsCmd,
|
||||
"pubsub": IpnsPubsubCmd,
|
||||
"inspect": IpnsInspectCmd,
|
||||
"get": IpnsGetCmd,
|
||||
"put": IpnsPutCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -123,6 +129,9 @@ in Multibase. The Data field is DAG-CBOR represented as DAG-JSON.
|
||||
Passing --verify will verify signature against provided public key.
|
||||
|
||||
`,
|
||||
HTTP: &cmds.HTTPHelpText{
|
||||
Description: "Request body should be `multipart/form-data` with the IPNS record bytes.",
|
||||
},
|
||||
},
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.FileArg("record", true, false, "The IPNS record payload to be verified.").EnableStdin(),
|
||||
@ -225,7 +234,7 @@ Passing --verify will verify signature against provided public key.
|
||||
}
|
||||
|
||||
if out.Entry.ValidityType != nil {
|
||||
fmt.Fprintf(tw, "Validity Type:\t%q\n", *out.Entry.ValidityType)
|
||||
fmt.Fprintf(tw, "Validity Type:\t%d\n", *out.Entry.ValidityType)
|
||||
}
|
||||
|
||||
if out.Entry.Validity != nil {
|
||||
@ -267,3 +276,292 @@ Passing --verify will verify signature against provided public key.
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
var IpnsGetCmd = &cmds.Command{
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Retrieve a signed IPNS record.",
|
||||
ShortDescription: `
|
||||
Retrieves the signed IPNS record for a given name from the routing system.
|
||||
|
||||
The output is the raw IPNS record (protobuf) as defined in the IPNS spec:
|
||||
https://specs.ipfs.tech/ipns/ipns-record/
|
||||
|
||||
The record can be inspected with 'ipfs name inspect':
|
||||
|
||||
ipfs name get <name> | ipfs name inspect
|
||||
|
||||
This is equivalent to 'ipfs routing get /ipns/<name>' but only accepts
|
||||
IPNS names (not arbitrary routing keys).
|
||||
|
||||
Note: The routing system returns the "best" IPNS record it knows about.
|
||||
For IPNS, "best" means the record with the highest sequence number.
|
||||
If multiple records exist (e.g., after using 'ipfs name put'), this command
|
||||
returns the one the routing system considers most current.
|
||||
`,
|
||||
HTTP: &cmds.HTTPHelpText{
|
||||
ResponseContentType: "application/vnd.ipfs.ipns-record",
|
||||
},
|
||||
},
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("name", true, false, "The IPNS name to look up."),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
api, err := cmdenv.GetApi(env, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Normalize the argument: accept both "k51..." and "/ipns/k51..."
|
||||
name := req.Arguments[0]
|
||||
if !strings.HasPrefix(name, "/ipns/") {
|
||||
name = "/ipns/" + name
|
||||
}
|
||||
|
||||
data, err := api.Routing().Get(req.Context, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res.SetEncodingType(cmds.OctetStream)
|
||||
res.SetContentType("application/vnd.ipfs.ipns-record")
|
||||
return res.Emit(bytes.NewReader(data))
|
||||
},
|
||||
}
|
||||
|
||||
const (
|
||||
forceOptionName = "force"
|
||||
putAllowOfflineOption = "allow-offline"
|
||||
allowDelegatedOption = "allow-delegated"
|
||||
putQuietOptionName = "quiet"
|
||||
maxIPNSRecordSize = 10 << 10 // 10 KiB per IPNS spec
|
||||
)
|
||||
|
||||
var errPutAllowOffline = errors.New("can't put while offline: pass `--allow-offline` to store locally or `--allow-delegated` if Ipns.DelegatedPublishers are set up")
|
||||
|
||||
var IpnsPutCmd = &cmds.Command{
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Store a pre-signed IPNS record in the routing system.",
|
||||
ShortDescription: `
|
||||
Stores a pre-signed IPNS record in the routing system.
|
||||
|
||||
This command accepts a raw IPNS record (protobuf) as defined in the IPNS spec:
|
||||
https://specs.ipfs.tech/ipns/ipns-record/
|
||||
|
||||
The record must be signed by the private key corresponding to the IPNS name.
|
||||
Use 'ipfs name get' to retrieve records and 'ipfs name inspect' to examine.
|
||||
`,
|
||||
LongDescription: `
|
||||
Stores a pre-signed IPNS record in the routing system.
|
||||
|
||||
This command accepts a raw IPNS record (protobuf) as defined in the IPNS spec:
|
||||
https://specs.ipfs.tech/ipns/ipns-record/
|
||||
|
||||
The record must be signed by the private key corresponding to the IPNS name.
|
||||
Use 'ipfs name get' to retrieve records and 'ipfs name inspect' to examine.
|
||||
|
||||
Use Cases:
|
||||
|
||||
- Re-publishing third-party records: store someone else's signed record
|
||||
- Cross-node sync: import records exported from another node
|
||||
- Backup/restore: export with 'name get', restore with 'name put'
|
||||
|
||||
Validation:
|
||||
|
||||
By default, the command validates that:
|
||||
|
||||
- The record is a valid IPNS record (protobuf)
|
||||
- The record size is within 10 KiB limit
|
||||
- The signature matches the provided IPNS name
|
||||
- The record's sequence number is higher than any existing record
|
||||
(identical records are allowed for republishing)
|
||||
|
||||
The --force flag skips this command's validation and passes the record
|
||||
directly to the routing system. Note that --force only affects this command;
|
||||
it does not control how the routing system handles the record. The routing
|
||||
system may still reject invalid records or prefer records with higher sequence
|
||||
numbers. Use --force primarily for testing (e.g., to observe how the routing
|
||||
system reacts to incorrectly signed or malformed records).
|
||||
|
||||
Important: Even after a successful 'name put', a subsequent 'name get' may
|
||||
return a different record if one with a higher sequence number exists.
|
||||
This is expected IPNS behavior, not a bug.
|
||||
|
||||
Publishing Modes:
|
||||
|
||||
By default, IPNS records are published to both the DHT and any configured
|
||||
HTTP delegated publishers. You can control this behavior with:
|
||||
|
||||
--allow-offline Store locally without requiring network connectivity
|
||||
--allow-delegated Publish via HTTP delegated publishers only (no DHT)
|
||||
|
||||
Examples:
|
||||
|
||||
Export and re-import a record:
|
||||
|
||||
> ipfs name get k51... > record.bin
|
||||
> ipfs name put k51... record.bin
|
||||
|
||||
Store a record received from someone else:
|
||||
|
||||
> ipfs name put k51... third-party-record.bin
|
||||
|
||||
Force store a record to test routing validation:
|
||||
|
||||
> ipfs name put --force k51... possibly-invalid-record.bin
|
||||
`,
|
||||
HTTP: &cmds.HTTPHelpText{
|
||||
Description: "Request body should be `multipart/form-data` with the IPNS record bytes.",
|
||||
},
|
||||
},
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("name", true, false, "The IPNS name to store the record for (e.g., k51... or /ipns/k51...)."),
|
||||
cmds.FileArg("record", true, false, "Path to file containing the signed IPNS record.").EnableStdin(),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(forceOptionName, "f", "Skip validation (signature, sequence, size)."),
|
||||
cmds.BoolOption(putAllowOfflineOption, "Store locally without broadcasting to the network."),
|
||||
cmds.BoolOption(allowDelegatedOption, "Publish via HTTP delegated publishers only (no DHT)."),
|
||||
cmds.BoolOption(putQuietOptionName, "q", "Write no output."),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
nd, err := cmdenv.GetNode(env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
api, err := cmdenv.GetApi(env, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse options
|
||||
force, _ := req.Options[forceOptionName].(bool)
|
||||
allowOffline, _ := req.Options[putAllowOfflineOption].(bool)
|
||||
allowDelegated, _ := req.Options[allowDelegatedOption].(bool)
|
||||
|
||||
// Validate flag combinations
|
||||
if allowOffline && allowDelegated {
|
||||
return errors.New("cannot use both --allow-offline and --allow-delegated flags")
|
||||
}
|
||||
|
||||
// Handle different publishing modes
|
||||
if allowDelegated {
|
||||
// AllowDelegated mode: check if delegated publishers are configured
|
||||
cfg, err := nd.Repo.Config()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read config: %w", err)
|
||||
}
|
||||
delegatedPublishers := cfg.DelegatedPublishersWithAutoConf()
|
||||
if len(delegatedPublishers) == 0 {
|
||||
return errors.New("no delegated publishers configured: add Ipns.DelegatedPublishers or use --allow-offline for local-only publishing")
|
||||
}
|
||||
// For allow-delegated mode, we proceed even if offline
|
||||
// since we're using HTTP publishing via delegated publishers
|
||||
}
|
||||
|
||||
// Parse the IPNS name argument
|
||||
nameArg := req.Arguments[0]
|
||||
if !strings.HasPrefix(nameArg, "/ipns/") {
|
||||
nameArg = "/ipns/" + nameArg
|
||||
}
|
||||
// Extract the name part after /ipns/
|
||||
namePart := strings.TrimPrefix(nameArg, "/ipns/")
|
||||
name, err := ipns.NameFromString(namePart)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid IPNS name: %w", err)
|
||||
}
|
||||
|
||||
// Read raw record bytes from file/stdin
|
||||
file, err := cmdenv.GetFileArg(req.Files.Entries())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Read record data (limit to 1 MiB for memory safety)
|
||||
data, err := io.ReadAll(io.LimitReader(file, 1<<20))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read record: %w", err)
|
||||
}
|
||||
if len(data) == 0 {
|
||||
return errors.New("record is empty")
|
||||
}
|
||||
|
||||
// Validate unless --force
|
||||
if !force {
|
||||
// Check size limit per IPNS spec
|
||||
if len(data) > maxIPNSRecordSize {
|
||||
return fmt.Errorf("record exceeds maximum size of %d bytes, use --force to skip size check", maxIPNSRecordSize)
|
||||
}
|
||||
rec, err := ipns.UnmarshalRecord(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid IPNS record: %w", err)
|
||||
}
|
||||
|
||||
// Validate signature against provided name
|
||||
err = ipns.ValidateWithName(rec, name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("record validation failed: %w", err)
|
||||
}
|
||||
|
||||
// Check for sequence conflicts with existing record
|
||||
existingData, err := api.Routing().Get(req.Context, nameArg)
|
||||
if err == nil {
|
||||
// Allow republishing the exact same record (common use case:
|
||||
// get a third-party record and put it back to refresh DHT)
|
||||
if !bytes.Equal(existingData, data) {
|
||||
existingRec, parseErr := ipns.UnmarshalRecord(existingData)
|
||||
if parseErr == nil {
|
||||
existingSeq, seqErr := existingRec.Sequence()
|
||||
newSeq, newSeqErr := rec.Sequence()
|
||||
if seqErr == nil && newSeqErr == nil && existingSeq >= newSeq {
|
||||
return fmt.Errorf("existing IPNS record has sequence %d >= new record sequence %d, use 'ipfs name put --force' to skip this check", existingSeq, newSeq)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// If Get fails (no existing record), that's fine - proceed with put
|
||||
}
|
||||
|
||||
// Publish the original bytes as-is
|
||||
// When allowDelegated is true, we set allowOffline to allow the operation
|
||||
// even without DHT connectivity (delegated publishers use HTTP)
|
||||
opts := []options.RoutingPutOption{
|
||||
options.Routing.AllowOffline(allowOffline || allowDelegated),
|
||||
}
|
||||
|
||||
err = api.Routing().Put(req.Context, nameArg, data, opts...)
|
||||
if err != nil {
|
||||
if err.Error() == "can't put while offline" {
|
||||
return errPutAllowOffline
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Extract value from the record for the response
|
||||
value := ""
|
||||
if rec, err := ipns.UnmarshalRecord(data); err == nil {
|
||||
if v, err := rec.Value(); err == nil {
|
||||
value = v.String()
|
||||
}
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &IpnsEntry{
|
||||
Name: name.String(),
|
||||
Value: value,
|
||||
})
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, ie *IpnsEntry) error {
|
||||
quiet, _ := req.Options[putQuietOptionName].(bool)
|
||||
if quiet {
|
||||
return nil
|
||||
}
|
||||
_, err := fmt.Fprintln(w, cmdenv.EscNonPrint(ie.Name))
|
||||
return err
|
||||
}),
|
||||
},
|
||||
Type: IpnsEntry{},
|
||||
}
|
||||
|
||||
@ -50,9 +50,17 @@ type P2PStreamsOutput struct {
|
||||
Streams []P2PStreamInfoOutput
|
||||
}
|
||||
|
||||
// P2PForegroundOutput is output type for foreground mode status messages
|
||||
type P2PForegroundOutput struct {
|
||||
Status string // "active" or "closing"
|
||||
Protocol string
|
||||
Address string
|
||||
}
|
||||
|
||||
const (
|
||||
allowCustomProtocolOptionName = "allow-custom-protocol"
|
||||
reportPeerIDOptionName = "report-peer-id"
|
||||
foregroundOptionName = "foreground"
|
||||
)
|
||||
|
||||
var resolveTimeout = 10 * time.Second
|
||||
@ -83,15 +91,37 @@ var p2pForwardCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Forward connections to libp2p service.",
|
||||
ShortDescription: `
|
||||
Forward connections made to <listen-address> to <target-address>.
|
||||
Forward connections made to <listen-address> to <target-address> via libp2p.
|
||||
|
||||
<protocol> specifies the libp2p protocol name to use for libp2p
|
||||
connections and/or handlers. It must be prefixed with '` + P2PProtoPrefix + `'.
|
||||
Creates a local TCP listener that tunnels connections through libp2p to a
|
||||
remote peer's p2p listener. Similar to SSH port forwarding (-L flag).
|
||||
|
||||
Example:
|
||||
ipfs p2p forward ` + P2PProtoPrefix + `myproto /ip4/127.0.0.1/tcp/4567 /p2p/QmPeer
|
||||
- Forward connections to 127.0.0.1:4567 to '` + P2PProtoPrefix + `myproto' service on /p2p/QmPeer
|
||||
ARGUMENTS:
|
||||
|
||||
<protocol> Protocol name (must start with '` + P2PProtoPrefix + `')
|
||||
<listen-address> Local multiaddr (e.g., /ip4/127.0.0.1/tcp/3000)
|
||||
<target-address> Remote peer multiaddr (e.g., /p2p/PeerID)
|
||||
|
||||
FOREGROUND MODE (--foreground, -f):
|
||||
|
||||
By default, the forwarder runs in the daemon and the command returns
|
||||
immediately. Use --foreground to block until interrupted:
|
||||
|
||||
- Ctrl+C or SIGTERM: Removes the forwarder and exits
|
||||
- 'ipfs p2p close': Removes the forwarder and exits
|
||||
- Daemon shutdown: Forwarder is automatically removed
|
||||
|
||||
Useful for systemd services or scripts that need cleanup on exit.
|
||||
|
||||
EXAMPLES:
|
||||
|
||||
# Persistent forwarder (command returns immediately)
|
||||
ipfs p2p forward /x/myapp /ip4/127.0.0.1/tcp/3000 /p2p/PeerID
|
||||
|
||||
# Temporary forwarder (removed when command exits)
|
||||
ipfs p2p forward -f /x/myapp /ip4/127.0.0.1/tcp/3000 /p2p/PeerID
|
||||
|
||||
Learn more: https://github.com/ipfs/kubo/blob/master/docs/p2p-tunnels.md
|
||||
`,
|
||||
},
|
||||
Arguments: []cmds.Argument{
|
||||
@ -101,6 +131,7 @@ Example:
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(allowCustomProtocolOptionName, "Don't require /x/ prefix"),
|
||||
cmds.BoolOption(foregroundOptionName, "f", "Run in foreground; forwarder is removed when command exits"),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
n, err := p2pGetNode(env)
|
||||
@ -130,7 +161,51 @@ Example:
|
||||
return errors.New("protocol name must be within '" + P2PProtoPrefix + "' namespace")
|
||||
}
|
||||
|
||||
return forwardLocal(n.Context(), n.P2P, n.Peerstore, proto, listen, targets)
|
||||
listener, err := forwardLocal(n.Context(), n.P2P, n.Peerstore, proto, listen, targets)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
foreground, _ := req.Options[foregroundOptionName].(bool)
|
||||
if foreground {
|
||||
if err := res.Emit(&P2PForegroundOutput{
|
||||
Status: "active",
|
||||
Protocol: protoOpt,
|
||||
Address: listenOpt,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
// Wait for either context cancellation (Ctrl+C/daemon shutdown)
|
||||
// or listener removal (ipfs p2p close)
|
||||
select {
|
||||
case <-req.Context.Done():
|
||||
// SIGTERM/Ctrl+C - cleanup silently (CLI stream already closing)
|
||||
n.P2P.ListenersLocal.Close(func(l p2p.Listener) bool {
|
||||
return l == listener
|
||||
})
|
||||
return nil
|
||||
case <-listener.Done():
|
||||
// Closed via "ipfs p2p close" - emit closing message
|
||||
return res.Emit(&P2PForegroundOutput{
|
||||
Status: "closing",
|
||||
Protocol: protoOpt,
|
||||
Address: listenOpt,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Type: P2PForegroundOutput{},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *P2PForegroundOutput) error {
|
||||
if out.Status == "active" {
|
||||
fmt.Fprintf(w, "Forwarding %s to %s, waiting for interrupt...\n", out.Protocol, out.Address)
|
||||
} else if out.Status == "closing" {
|
||||
fmt.Fprintf(w, "Received interrupt, removing forwarder for %s\n", out.Protocol)
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
@ -185,14 +260,40 @@ var p2pListenCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Create libp2p service.",
|
||||
ShortDescription: `
|
||||
Create libp2p service and forward connections made to <target-address>.
|
||||
Create a libp2p protocol handler that forwards incoming connections to
|
||||
<target-address>.
|
||||
|
||||
<protocol> specifies the libp2p handler name. It must be prefixed with '` + P2PProtoPrefix + `'.
|
||||
When a remote peer connects using 'ipfs p2p forward', the connection is
|
||||
forwarded to your local service. Similar to SSH port forwarding (server side).
|
||||
|
||||
Example:
|
||||
ipfs p2p listen ` + P2PProtoPrefix + `myproto /ip4/127.0.0.1/tcp/1234
|
||||
- Forward connections to 'myproto' libp2p service to 127.0.0.1:1234
|
||||
ARGUMENTS:
|
||||
|
||||
<protocol> Protocol name (must start with '` + P2PProtoPrefix + `')
|
||||
<target-address> Local multiaddr (e.g., /ip4/127.0.0.1/tcp/3000)
|
||||
|
||||
FOREGROUND MODE (--foreground, -f):
|
||||
|
||||
By default, the listener runs in the daemon and the command returns
|
||||
immediately. Use --foreground to block until interrupted:
|
||||
|
||||
- Ctrl+C or SIGTERM: Removes the listener and exits
|
||||
- 'ipfs p2p close': Removes the listener and exits
|
||||
- Daemon shutdown: Listener is automatically removed
|
||||
|
||||
Useful for systemd services or scripts that need cleanup on exit.
|
||||
|
||||
EXAMPLES:
|
||||
|
||||
# Persistent listener (command returns immediately)
|
||||
ipfs p2p listen /x/myapp /ip4/127.0.0.1/tcp/3000
|
||||
|
||||
# Temporary listener (removed when command exits)
|
||||
ipfs p2p listen -f /x/myapp /ip4/127.0.0.1/tcp/3000
|
||||
|
||||
# Report connecting peer ID to the target application
|
||||
ipfs p2p listen -r /x/myapp /ip4/127.0.0.1/tcp/3000
|
||||
|
||||
Learn more: https://github.com/ipfs/kubo/blob/master/docs/p2p-tunnels.md
|
||||
`,
|
||||
},
|
||||
Arguments: []cmds.Argument{
|
||||
@ -202,6 +303,7 @@ Example:
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(allowCustomProtocolOptionName, "Don't require /x/ prefix"),
|
||||
cmds.BoolOption(reportPeerIDOptionName, "r", "Send remote base58 peerid to target when a new connection is established"),
|
||||
cmds.BoolOption(foregroundOptionName, "f", "Run in foreground; listener is removed when command exits"),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
n, err := p2pGetNode(env)
|
||||
@ -231,8 +333,51 @@ Example:
|
||||
return errors.New("protocol name must be within '" + P2PProtoPrefix + "' namespace")
|
||||
}
|
||||
|
||||
_, err = n.P2P.ForwardRemote(n.Context(), proto, target, reportPeerID)
|
||||
return err
|
||||
listener, err := n.P2P.ForwardRemote(n.Context(), proto, target, reportPeerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
foreground, _ := req.Options[foregroundOptionName].(bool)
|
||||
if foreground {
|
||||
if err := res.Emit(&P2PForegroundOutput{
|
||||
Status: "active",
|
||||
Protocol: protoOpt,
|
||||
Address: targetOpt,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
// Wait for either context cancellation (Ctrl+C/daemon shutdown)
|
||||
// or listener removal (ipfs p2p close)
|
||||
select {
|
||||
case <-req.Context.Done():
|
||||
// SIGTERM/Ctrl+C - cleanup silently (CLI stream already closing)
|
||||
n.P2P.ListenersP2P.Close(func(l p2p.Listener) bool {
|
||||
return l == listener
|
||||
})
|
||||
return nil
|
||||
case <-listener.Done():
|
||||
// Closed via "ipfs p2p close" - emit closing message
|
||||
return res.Emit(&P2PForegroundOutput{
|
||||
Status: "closing",
|
||||
Protocol: protoOpt,
|
||||
Address: targetOpt,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Type: P2PForegroundOutput{},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *P2PForegroundOutput) error {
|
||||
if out.Status == "active" {
|
||||
fmt.Fprintf(w, "Listening on %s, forwarding to %s, waiting for interrupt...\n", out.Protocol, out.Address)
|
||||
} else if out.Status == "closing" {
|
||||
fmt.Fprintf(w, "Received interrupt, removing listener for %s\n", out.Protocol)
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
@ -271,11 +416,9 @@ func checkPort(target ma.Multiaddr) error {
|
||||
}
|
||||
|
||||
// forwardLocal forwards local connections to a libp2p service
|
||||
func forwardLocal(ctx context.Context, p *p2p.P2P, ps pstore.Peerstore, proto protocol.ID, bindAddr ma.Multiaddr, addr *peer.AddrInfo) error {
|
||||
func forwardLocal(ctx context.Context, p *p2p.P2P, ps pstore.Peerstore, proto protocol.ID, bindAddr ma.Multiaddr, addr *peer.AddrInfo) (p2p.Listener, error) {
|
||||
ps.AddAddrs(addr.ID, addr.Addrs, pstore.TempAddrTTL)
|
||||
// TODO: return some info
|
||||
_, err := p.ForwardLocal(ctx, addr.ID, proto, bindAddr)
|
||||
return err
|
||||
return p.ForwardLocal(ctx, addr.ID, proto, bindAddr)
|
||||
}
|
||||
|
||||
const (
|
||||
|
||||
@ -8,6 +8,7 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
bserv "github.com/ipfs/boxo/blockservice"
|
||||
offline "github.com/ipfs/boxo/exchange/offline"
|
||||
dag "github.com/ipfs/boxo/ipld/merkledag"
|
||||
@ -47,6 +48,7 @@ type PinOutput struct {
|
||||
type AddPinOutput struct {
|
||||
Pins []string `json:",omitempty"`
|
||||
Progress int `json:",omitempty"`
|
||||
Bytes uint64 `json:",omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
@ -147,14 +149,15 @@ It may take some time. Pass '--progress' to track the progress.
|
||||
return val.err
|
||||
}
|
||||
|
||||
if pv := v.Value(); pv != 0 {
|
||||
if err := res.Emit(&AddPinOutput{Progress: v.Value()}); err != nil {
|
||||
if ps := v.ProgressStat(); ps.Nodes != 0 {
|
||||
if err := res.Emit(&AddPinOutput{Progress: ps.Nodes, Bytes: ps.Bytes}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return res.Emit(&AddPinOutput{Pins: val.pins})
|
||||
case <-ticker.C:
|
||||
if err := res.Emit(&AddPinOutput{Progress: v.Value()}); err != nil {
|
||||
ps := v.ProgressStat()
|
||||
if err := res.Emit(&AddPinOutput{Progress: ps.Nodes, Bytes: ps.Bytes}); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-ctx.Done():
|
||||
@ -197,7 +200,7 @@ It may take some time. Pass '--progress' to track the progress.
|
||||
}
|
||||
if out.Pins == nil {
|
||||
// this can only happen if the progress option is set
|
||||
fmt.Fprintf(os.Stderr, "Fetched/Processed %d nodes\r", out.Progress)
|
||||
fmt.Fprintf(os.Stderr, "Fetched/Processed %d nodes (%s)\r", out.Progress, humanize.Bytes(out.Bytes))
|
||||
} else {
|
||||
err = re.Emit(out)
|
||||
if err != nil {
|
||||
|
||||
@ -112,7 +112,7 @@ trip latency information.
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for i := 0; i < numPings; i++ {
|
||||
for range numPings {
|
||||
r, ok := <-pings
|
||||
if !ok {
|
||||
break
|
||||
|
||||
@ -70,6 +70,9 @@ However, it could reveal:
|
||||
- Memory offsets of various data structures.
|
||||
- Any modifications you've made to go-ipfs.
|
||||
`,
|
||||
HTTP: &cmds.HTTPHelpText{
|
||||
ResponseContentType: "application/zip",
|
||||
},
|
||||
},
|
||||
NoLocal: true,
|
||||
Options: []cmds.Option{
|
||||
@ -121,6 +124,8 @@ However, it could reveal:
|
||||
archive.Close()
|
||||
_ = w.CloseWithError(err)
|
||||
}()
|
||||
res.SetEncodingType(cmds.OctetStream)
|
||||
res.SetContentType("application/zip")
|
||||
return res.Emit(r)
|
||||
},
|
||||
PostRun: cmds.PostRunMap{
|
||||
|
||||
@ -1,36 +1,69 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/ipfs/boxo/provider"
|
||||
boxoprovider "github.com/ipfs/boxo/provider"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
"github.com/ipfs/kubo/core/commands/cmdenv"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/fullrt"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/provider"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/provider/buffered"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/provider/dual"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/provider/stats"
|
||||
routing "github.com/libp2p/go-libp2p/core/routing"
|
||||
"github.com/probe-lab/go-libdht/kad/key"
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
const (
|
||||
provideQuietOptionName = "quiet"
|
||||
provideLanOptionName = "lan"
|
||||
|
||||
provideStatAllOptionName = "all"
|
||||
provideStatCompactOptionName = "compact"
|
||||
provideStatNetworkOptionName = "network"
|
||||
provideStatConnectivityOptionName = "connectivity"
|
||||
provideStatOperationsOptionName = "operations"
|
||||
provideStatTimingsOptionName = "timings"
|
||||
provideStatScheduleOptionName = "schedule"
|
||||
provideStatQueuesOptionName = "queues"
|
||||
provideStatWorkersOptionName = "workers"
|
||||
|
||||
// lowWorkerThreshold is the threshold below which worker availability warnings are shown
|
||||
lowWorkerThreshold = 2
|
||||
)
|
||||
|
||||
var ProvideCmd = &cmds.Command{
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Control providing operations",
|
||||
Tagline: "Control and monitor content providing",
|
||||
ShortDescription: `
|
||||
Control providing operations.
|
||||
|
||||
NOTE: This command is experimental and not all provide-related commands have
|
||||
been migrated to this namespace yet. For example, 'ipfs routing
|
||||
provide|reprovide' are still under the routing namespace, 'ipfs stats
|
||||
reprovide' provides statistics. Additionally, 'ipfs bitswap reprovide' and
|
||||
'ipfs stats provide' are deprecated.
|
||||
OVERVIEW:
|
||||
|
||||
The provider system advertises content by publishing provider records,
|
||||
allowing other nodes to discover which peers have specific content.
|
||||
Content is reprovided periodically (every Provide.DHT.Interval)
|
||||
according to Provide.Strategy.
|
||||
|
||||
CONFIGURATION:
|
||||
|
||||
Learn more: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide
|
||||
|
||||
SEE ALSO:
|
||||
|
||||
For ad-hoc one-time provide, see 'ipfs routing provide'
|
||||
`,
|
||||
},
|
||||
|
||||
@ -47,10 +80,18 @@ var provideClearCmd = &cmds.Command{
|
||||
ShortDescription: `
|
||||
Clear all CIDs pending to be provided for the first time.
|
||||
|
||||
Note: Kubo will automatically clear the queue when it detects a change of
|
||||
Provide.Strategy upon a restart. For more information about provide
|
||||
strategies, see:
|
||||
https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy
|
||||
BEHAVIOR:
|
||||
|
||||
This command removes CIDs from the provide queue that are waiting to be
|
||||
advertised to the DHT for the first time. It does not affect content that
|
||||
is already being reprovided on schedule.
|
||||
|
||||
AUTOMATIC CLEARING:
|
||||
|
||||
Kubo will automatically clear the queue when it detects a change of
|
||||
Provide.Strategy upon a restart.
|
||||
|
||||
Learn: https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy
|
||||
`,
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
@ -90,25 +131,108 @@ https://github.com/ipfs/kubo/blob/master/docs/config.md#providestrategy
|
||||
}
|
||||
|
||||
type provideStats struct {
|
||||
provider.ReproviderStats
|
||||
fullRT bool
|
||||
Sweep *stats.Stats
|
||||
Legacy *boxoprovider.ReproviderStats
|
||||
FullRT bool // only used for legacy stats
|
||||
}
|
||||
|
||||
// extractSweepingProvider extracts a SweepingProvider from the given provider interface.
|
||||
// It handles unwrapping buffered and dual providers, selecting LAN or WAN as specified.
|
||||
// Returns nil if the provider is not a sweeping provider type.
|
||||
func extractSweepingProvider(prov any, useLAN bool) *provider.SweepingProvider {
|
||||
switch p := prov.(type) {
|
||||
case *provider.SweepingProvider:
|
||||
return p
|
||||
case *dual.SweepingProvider:
|
||||
if useLAN {
|
||||
return p.LAN
|
||||
}
|
||||
return p.WAN
|
||||
case *buffered.SweepingProvider:
|
||||
// Recursively extract from the inner provider
|
||||
return extractSweepingProvider(p.Provider, useLAN)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var provideStatCmd = &cmds.Command{
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Returns statistics about the node's provider system.",
|
||||
Tagline: "Show statistics about the provider system",
|
||||
ShortDescription: `
|
||||
Returns statistics about the content the node is reproviding every
|
||||
Provide.DHT.Interval according to Provide.Strategy:
|
||||
https://github.com/ipfs/kubo/blob/master/docs/config.md#provide
|
||||
Returns statistics about the node's provider system.
|
||||
|
||||
This interface is not stable and may change from release to release.
|
||||
OVERVIEW:
|
||||
|
||||
The provide system advertises content to the DHT by publishing provider
|
||||
records that map CIDs to your peer ID. These records expire after a fixed
|
||||
TTL to account for node churn, so content must be reprovided periodically
|
||||
to stay discoverable.
|
||||
|
||||
Two provider types exist:
|
||||
|
||||
- Sweep provider: Divides the DHT keyspace into regions and systematically
|
||||
sweeps through them over the reprovide interval. Batches CIDs allocated
|
||||
to the same DHT servers, reducing lookups from N (one per CID) to a
|
||||
small static number based on DHT size (~3k for 10k DHT servers). Spreads
|
||||
work evenly over time to prevent resource spikes and ensure announcements
|
||||
happen just before records expire.
|
||||
|
||||
- Legacy provider: Processes each CID individually with separate DHT
|
||||
lookups. Attempts to reprovide all content as quickly as possible at the
|
||||
start of each cycle. Works well for small datasets but struggles with
|
||||
large collections.
|
||||
|
||||
Learn more:
|
||||
- Config: https://github.com/ipfs/kubo/blob/master/docs/config.md#provide
|
||||
- Metrics: https://github.com/ipfs/kubo/blob/master/docs/provide-stats.md
|
||||
|
||||
DEFAULT OUTPUT:
|
||||
|
||||
Shows a brief summary including queue sizes, scheduled items, average record
|
||||
holders, ongoing/total provides, and worker warnings.
|
||||
|
||||
DETAILED OUTPUT:
|
||||
|
||||
Use --all for detailed statistics with these sections: connectivity, queues,
|
||||
schedule, timings, network, operations, and workers. Individual sections can
|
||||
be displayed with their flags (e.g., --network, --operations). Multiple flags
|
||||
can be combined.
|
||||
|
||||
Use --compact for monitoring-friendly 2-column output (requires --all).
|
||||
|
||||
EXAMPLES:
|
||||
|
||||
Monitor provider statistics in real-time with 2-column layout:
|
||||
|
||||
watch ipfs provide stat --all --compact
|
||||
|
||||
Get statistics in JSON format for programmatic processing:
|
||||
|
||||
ipfs provide stat --enc=json | jq
|
||||
|
||||
NOTES:
|
||||
|
||||
- This interface is experimental and may change between releases
|
||||
- Legacy provider shows basic stats only (no flags supported)
|
||||
- "Regions" are keyspace divisions for spreading reprovide work
|
||||
- For Dual DHT: use --lan for LAN provider stats (default is WAN)
|
||||
`,
|
||||
},
|
||||
Arguments: []cmds.Argument{},
|
||||
Options: []cmds.Option{},
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(provideLanOptionName, "Show stats for LAN DHT only (for Sweep+Dual DHT only)"),
|
||||
cmds.BoolOption(provideStatAllOptionName, "a", "Display all provide sweep stats"),
|
||||
cmds.BoolOption(provideStatCompactOptionName, "Display stats in 2-column layout (requires --all)"),
|
||||
cmds.BoolOption(provideStatConnectivityOptionName, "Display DHT connectivity status"),
|
||||
cmds.BoolOption(provideStatNetworkOptionName, "Display network stats (peers, reachability, region size)"),
|
||||
cmds.BoolOption(provideStatScheduleOptionName, "Display reprovide schedule (CIDs/regions scheduled, next reprovide time)"),
|
||||
cmds.BoolOption(provideStatTimingsOptionName, "Display timing information (uptime, cycle start, reprovide interval)"),
|
||||
cmds.BoolOption(provideStatWorkersOptionName, "Display worker pool stats (active/available/queued workers)"),
|
||||
cmds.BoolOption(provideStatOperationsOptionName, "Display operation stats (ongoing/past provides, rates, errors)"),
|
||||
cmds.BoolOption(provideStatQueuesOptionName, "Display provide and reprovide queue sizes"),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
nd, err := cmdenv.GetNode(env)
|
||||
if err != nil {
|
||||
@ -119,35 +243,271 @@ This interface is not stable and may change from release to release.
|
||||
return ErrNotOnline
|
||||
}
|
||||
|
||||
provideSys, ok := nd.Provider.(provider.System)
|
||||
if !ok {
|
||||
return errors.New("stats not available with experimental sweeping provider (Provide.DHT.SweepEnabled=true)")
|
||||
lanStats, _ := req.Options[provideLanOptionName].(bool)
|
||||
|
||||
// Handle legacy provider
|
||||
if legacySys, ok := nd.Provider.(boxoprovider.System); ok {
|
||||
if lanStats {
|
||||
return errors.New("LAN stats only available for Sweep provider with Dual DHT")
|
||||
}
|
||||
stats, err := legacySys.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, fullRT := nd.DHTClient.(*fullrt.FullRT)
|
||||
return res.Emit(provideStats{Legacy: &stats, FullRT: fullRT})
|
||||
}
|
||||
|
||||
stats, err := provideSys.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, fullRT := nd.DHTClient.(*fullrt.FullRT)
|
||||
|
||||
if err := res.Emit(provideStats{stats, fullRT}); err != nil {
|
||||
return err
|
||||
// Extract sweeping provider (handles buffered and dual unwrapping)
|
||||
sweepingProvider := extractSweepingProvider(nd.Provider, lanStats)
|
||||
if sweepingProvider == nil {
|
||||
if lanStats {
|
||||
return errors.New("LAN stats only available for Sweep provider with Dual DHT")
|
||||
}
|
||||
return fmt.Errorf("stats not available with current routing system %T", nd.Provider)
|
||||
}
|
||||
|
||||
return nil
|
||||
s := sweepingProvider.Stats()
|
||||
return res.Emit(provideStats{Sweep: &s})
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, s provideStats) error {
|
||||
wtr := tabwriter.NewWriter(w, 1, 2, 1, ' ', 0)
|
||||
defer wtr.Flush()
|
||||
|
||||
fmt.Fprintf(wtr, "TotalReprovides:\t%s\n", humanNumber(s.TotalReprovides))
|
||||
fmt.Fprintf(wtr, "AvgReprovideDuration:\t%s\n", humanDuration(s.AvgReprovideDuration))
|
||||
fmt.Fprintf(wtr, "LastReprovideDuration:\t%s\n", humanDuration(s.LastReprovideDuration))
|
||||
if !s.LastRun.IsZero() {
|
||||
fmt.Fprintf(wtr, "LastReprovide:\t%s\n", humanTime(s.LastRun))
|
||||
if s.fullRT {
|
||||
fmt.Fprintf(wtr, "NextReprovide:\t%s\n", humanTime(s.LastRun.Add(s.ReprovideInterval)))
|
||||
all, _ := req.Options[provideStatAllOptionName].(bool)
|
||||
compact, _ := req.Options[provideStatCompactOptionName].(bool)
|
||||
connectivity, _ := req.Options[provideStatConnectivityOptionName].(bool)
|
||||
queues, _ := req.Options[provideStatQueuesOptionName].(bool)
|
||||
schedule, _ := req.Options[provideStatScheduleOptionName].(bool)
|
||||
network, _ := req.Options[provideStatNetworkOptionName].(bool)
|
||||
timings, _ := req.Options[provideStatTimingsOptionName].(bool)
|
||||
operations, _ := req.Options[provideStatOperationsOptionName].(bool)
|
||||
workers, _ := req.Options[provideStatWorkersOptionName].(bool)
|
||||
|
||||
flagCount := 0
|
||||
for _, enabled := range []bool{all, connectivity, queues, schedule, network, timings, operations, workers} {
|
||||
if enabled {
|
||||
flagCount++
|
||||
}
|
||||
}
|
||||
|
||||
if s.Legacy != nil {
|
||||
if flagCount > 0 {
|
||||
return errors.New("cannot use flags with legacy provide stats")
|
||||
}
|
||||
fmt.Fprintf(wtr, "TotalReprovides:\t%s\n", humanNumber(s.Legacy.TotalReprovides))
|
||||
fmt.Fprintf(wtr, "AvgReprovideDuration:\t%s\n", humanDuration(s.Legacy.AvgReprovideDuration))
|
||||
fmt.Fprintf(wtr, "LastReprovideDuration:\t%s\n", humanDuration(s.Legacy.LastReprovideDuration))
|
||||
if !s.Legacy.LastRun.IsZero() {
|
||||
fmt.Fprintf(wtr, "LastReprovide:\t%s\n", humanTime(s.Legacy.LastRun))
|
||||
if s.FullRT {
|
||||
fmt.Fprintf(wtr, "NextReprovide:\t%s\n", humanTime(s.Legacy.LastRun.Add(s.Legacy.ReprovideInterval)))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if s.Sweep == nil {
|
||||
return errors.New("no provide stats available")
|
||||
}
|
||||
|
||||
// Sweep provider stats
|
||||
if s.Sweep.Closed {
|
||||
fmt.Fprintf(wtr, "Provider is closed\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
if compact && !all {
|
||||
return errors.New("--compact requires --all flag")
|
||||
}
|
||||
|
||||
brief := flagCount == 0
|
||||
showHeadings := flagCount > 1 || all
|
||||
|
||||
compactMode := all && compact
|
||||
var cols [2][]string
|
||||
col0MaxWidth := 0
|
||||
// formatLine handles both normal and compact output modes:
|
||||
// - Normal mode: all lines go to cols[0], col parameter is ignored
|
||||
// - Compact mode: col 0 for left column, col 1 for right column
|
||||
formatLine := func(col int, format string, a ...any) {
|
||||
if compactMode {
|
||||
s := fmt.Sprintf(format, a...)
|
||||
cols[col] = append(cols[col], s)
|
||||
if col == 0 {
|
||||
col0MaxWidth = max(col0MaxWidth, utf8.RuneCountInString(s))
|
||||
}
|
||||
return
|
||||
}
|
||||
format = strings.Replace(format, ": ", ":\t", 1)
|
||||
format = strings.Replace(format, ", ", ",\t", 1)
|
||||
cols[0] = append(cols[0], fmt.Sprintf(format, a...))
|
||||
}
|
||||
addBlankLine := func(col int) {
|
||||
if !brief {
|
||||
formatLine(col, "")
|
||||
}
|
||||
}
|
||||
sectionTitle := func(col int, title string) {
|
||||
if !brief && showHeadings {
|
||||
formatLine(col, "%s:", title)
|
||||
}
|
||||
}
|
||||
|
||||
indent := " "
|
||||
if brief || !showHeadings {
|
||||
indent = ""
|
||||
}
|
||||
|
||||
// Connectivity
|
||||
if all || connectivity || brief && s.Sweep.Connectivity.Status != "online" {
|
||||
sectionTitle(1, "Connectivity")
|
||||
since := s.Sweep.Connectivity.Since
|
||||
if since.IsZero() {
|
||||
formatLine(1, "%sStatus: %s", indent, s.Sweep.Connectivity.Status)
|
||||
} else {
|
||||
formatLine(1, "%sStatus: %s (%s)", indent, s.Sweep.Connectivity.Status, humanTime(since))
|
||||
}
|
||||
addBlankLine(1)
|
||||
}
|
||||
|
||||
// Queues
|
||||
if all || queues || brief {
|
||||
sectionTitle(1, "Queues")
|
||||
formatLine(1, "%sProvide queue: %s CIDs, %s regions", indent, humanSI(s.Sweep.Queues.PendingKeyProvides, 1), humanSI(s.Sweep.Queues.PendingRegionProvides, 1))
|
||||
formatLine(1, "%sReprovide queue: %s regions", indent, humanSI(s.Sweep.Queues.PendingRegionReprovides, 1))
|
||||
addBlankLine(1)
|
||||
}
|
||||
|
||||
// Schedule
|
||||
if all || schedule || brief {
|
||||
sectionTitle(0, "Schedule")
|
||||
formatLine(0, "%sCIDs scheduled: %s", indent, humanNumber(s.Sweep.Schedule.Keys))
|
||||
formatLine(0, "%sRegions scheduled: %s", indent, humanNumberOrNA(s.Sweep.Schedule.Regions))
|
||||
if !brief {
|
||||
formatLine(0, "%sAvg prefix length: %s", indent, humanFloatOrNA(s.Sweep.Schedule.AvgPrefixLength))
|
||||
nextPrefix := key.BitString(s.Sweep.Schedule.NextReprovidePrefix)
|
||||
if nextPrefix == "" {
|
||||
nextPrefix = "N/A"
|
||||
}
|
||||
formatLine(0, "%sNext region prefix: %s", indent, nextPrefix)
|
||||
nextReprovideAt := s.Sweep.Schedule.NextReprovideAt.Format("15:04:05")
|
||||
if s.Sweep.Schedule.NextReprovideAt.IsZero() {
|
||||
nextReprovideAt = "N/A"
|
||||
}
|
||||
formatLine(0, "%sNext region reprovide: %s", indent, nextReprovideAt)
|
||||
}
|
||||
addBlankLine(0)
|
||||
}
|
||||
|
||||
// Timings
|
||||
if all || timings {
|
||||
sectionTitle(1, "Timings")
|
||||
formatLine(1, "%sUptime: %s (%s)", indent, humanDuration(s.Sweep.Timing.Uptime), humanTime(time.Now().Add(-s.Sweep.Timing.Uptime)))
|
||||
formatLine(1, "%sCurrent time offset: %s", indent, humanDuration(s.Sweep.Timing.CurrentTimeOffset))
|
||||
formatLine(1, "%sCycle started: %s", indent, humanTime(s.Sweep.Timing.CycleStart))
|
||||
formatLine(1, "%sReprovide interval: %s", indent, humanDuration(s.Sweep.Timing.ReprovidesInterval))
|
||||
addBlankLine(1)
|
||||
}
|
||||
|
||||
// Network
|
||||
if all || network || brief {
|
||||
sectionTitle(0, "Network")
|
||||
formatLine(0, "%sAvg record holders: %s", indent, humanFloatOrNA(s.Sweep.Network.AvgHolders))
|
||||
if !brief {
|
||||
formatLine(0, "%sPeers swept: %s", indent, humanInt(s.Sweep.Network.Peers))
|
||||
formatLine(0, "%sFull keyspace coverage: %t", indent, s.Sweep.Network.CompleteKeyspaceCoverage)
|
||||
if s.Sweep.Network.Peers > 0 {
|
||||
formatLine(0, "%sReachable peers: %s (%s%%)", indent, humanInt(s.Sweep.Network.Reachable), humanNumber(100*s.Sweep.Network.Reachable/s.Sweep.Network.Peers))
|
||||
} else {
|
||||
formatLine(0, "%sReachable peers: %s", indent, humanInt(s.Sweep.Network.Reachable))
|
||||
}
|
||||
formatLine(0, "%sAvg region size: %s", indent, humanFloatOrNA(s.Sweep.Network.AvgRegionSize))
|
||||
formatLine(0, "%sReplication factor: %s", indent, humanNumber(s.Sweep.Network.ReplicationFactor))
|
||||
addBlankLine(0)
|
||||
}
|
||||
}
|
||||
|
||||
// Operations
|
||||
if all || operations || brief {
|
||||
sectionTitle(1, "Operations")
|
||||
// Ongoing operations
|
||||
formatLine(1, "%sOngoing provides: %s CIDs, %s regions", indent, humanSI(s.Sweep.Operations.Ongoing.KeyProvides, 1), humanSI(s.Sweep.Operations.Ongoing.RegionProvides, 1))
|
||||
formatLine(1, "%sOngoing reprovides: %s CIDs, %s regions", indent, humanSI(s.Sweep.Operations.Ongoing.KeyReprovides, 1), humanSI(s.Sweep.Operations.Ongoing.RegionReprovides, 1))
|
||||
// Past operations summary
|
||||
formatLine(1, "%sTotal CIDs provided: %s", indent, humanNumber(s.Sweep.Operations.Past.KeysProvided))
|
||||
if !brief {
|
||||
formatLine(1, "%sTotal records provided: %s", indent, humanNumber(s.Sweep.Operations.Past.RecordsProvided))
|
||||
formatLine(1, "%sTotal provide errors: %s", indent, humanNumber(s.Sweep.Operations.Past.KeysFailed))
|
||||
formatLine(1, "%sCIDs provided/min/worker: %s", indent, humanFloatOrNA(s.Sweep.Operations.Past.KeysProvidedPerMinute))
|
||||
formatLine(1, "%sCIDs reprovided/min/worker: %s", indent, humanFloatOrNA(s.Sweep.Operations.Past.KeysReprovidedPerMinute))
|
||||
formatLine(1, "%sRegion reprovide duration: %s", indent, humanDurationOrNA(s.Sweep.Operations.Past.RegionReprovideDuration))
|
||||
formatLine(1, "%sAvg CIDs/reprovide: %s", indent, humanFloatOrNA(s.Sweep.Operations.Past.AvgKeysPerReprovide))
|
||||
formatLine(1, "%sRegions reprovided (last cycle): %s", indent, humanNumber(s.Sweep.Operations.Past.RegionReprovidedLastCycle))
|
||||
addBlankLine(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Workers
|
||||
displayWorkers := all || workers
|
||||
if displayWorkers || brief {
|
||||
availableReservedBurst := max(0, s.Sweep.Workers.DedicatedBurst-s.Sweep.Workers.ActiveBurst)
|
||||
availableReservedPeriodic := max(0, s.Sweep.Workers.DedicatedPeriodic-s.Sweep.Workers.ActivePeriodic)
|
||||
availableFreeWorkers := max(0, s.Sweep.Workers.Max-max(s.Sweep.Workers.DedicatedBurst, s.Sweep.Workers.ActiveBurst)-max(s.Sweep.Workers.DedicatedPeriodic, s.Sweep.Workers.ActivePeriodic))
|
||||
availableBurst := availableFreeWorkers + availableReservedBurst
|
||||
availablePeriodic := availableFreeWorkers + availableReservedPeriodic
|
||||
|
||||
if displayWorkers || availableBurst <= lowWorkerThreshold || availablePeriodic <= lowWorkerThreshold {
|
||||
// Either we want to display workers information, or we are low on
|
||||
// available workers and want to warn the user.
|
||||
sectionTitle(0, "Workers")
|
||||
specifyWorkers := " workers"
|
||||
if compactMode {
|
||||
specifyWorkers = ""
|
||||
}
|
||||
formatLine(0, "%sActive%s: %s / %s (max)", indent, specifyWorkers, humanInt(s.Sweep.Workers.Active), humanInt(s.Sweep.Workers.Max))
|
||||
if brief {
|
||||
// Brief mode - show condensed worker info
|
||||
formatLine(0, "%sPeriodic%s: %s active, %s available, %s queued", indent, specifyWorkers,
|
||||
humanInt(s.Sweep.Workers.ActivePeriodic), humanInt(availablePeriodic), humanInt(s.Sweep.Workers.QueuedPeriodic))
|
||||
formatLine(0, "%sBurst%s: %s active, %s available, %s queued\n", indent, specifyWorkers,
|
||||
humanInt(s.Sweep.Workers.ActiveBurst), humanInt(availableBurst), humanInt(s.Sweep.Workers.QueuedBurst))
|
||||
} else {
|
||||
formatLine(0, "%sFree%s: %s", indent, specifyWorkers, humanInt(availableFreeWorkers))
|
||||
formatLine(0, "%s %-14s %-9s %s", indent, "Workers stats:", "Periodic", "Burst")
|
||||
formatLine(0, "%s %-14s %-9s %s", indent, "Active:", humanInt(s.Sweep.Workers.ActivePeriodic), humanInt(s.Sweep.Workers.ActiveBurst))
|
||||
formatLine(0, "%s %-14s %-9s %s", indent, "Dedicated:", humanInt(s.Sweep.Workers.DedicatedPeriodic), humanInt(s.Sweep.Workers.DedicatedBurst))
|
||||
formatLine(0, "%s %-14s %-9s %s", indent, "Available:", humanInt(availablePeriodic), humanInt(availableBurst))
|
||||
formatLine(0, "%s %-14s %-9s %s", indent, "Queued:", humanInt(s.Sweep.Workers.QueuedPeriodic), humanInt(s.Sweep.Workers.QueuedBurst))
|
||||
formatLine(0, "%sMax connections/worker: %s", indent, humanInt(s.Sweep.Workers.MaxProvideConnsPerWorker))
|
||||
addBlankLine(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
if compactMode {
|
||||
col0Width := col0MaxWidth + 2
|
||||
// Print both columns side by side
|
||||
maxRows := max(len(cols[0]), len(cols[1]))
|
||||
if maxRows == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := range maxRows - 1 { // last line is empty
|
||||
var left, right string
|
||||
if i < len(cols[0]) {
|
||||
left = cols[0][i]
|
||||
}
|
||||
if i < len(cols[1]) {
|
||||
right = cols[1][i]
|
||||
}
|
||||
fmt.Fprintf(wtr, "%-*s %s\n", col0Width, left, right)
|
||||
}
|
||||
} else {
|
||||
if !brief {
|
||||
cols[0] = cols[0][:len(cols[0])-1] // remove last blank line
|
||||
}
|
||||
for _, line := range cols[0] {
|
||||
fmt.Fprintln(wtr, line)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -157,10 +517,23 @@ This interface is not stable and may change from release to release.
|
||||
}
|
||||
|
||||
func humanDuration(val time.Duration) string {
|
||||
if val > time.Second {
|
||||
return val.Truncate(100 * time.Millisecond).String()
|
||||
}
|
||||
return val.Truncate(time.Microsecond).String()
|
||||
}
|
||||
|
||||
func humanDurationOrNA(val time.Duration) string {
|
||||
if val <= 0 {
|
||||
return "N/A"
|
||||
}
|
||||
return humanDuration(val)
|
||||
}
|
||||
|
||||
func humanTime(val time.Time) string {
|
||||
if val.IsZero() {
|
||||
return "N/A"
|
||||
}
|
||||
return val.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
||||
@ -174,11 +547,49 @@ func humanNumber[T constraints.Float | constraints.Integer](n T) string {
|
||||
return str
|
||||
}
|
||||
|
||||
func humanSI(val float64, decimals int) string {
|
||||
v, unit := humanize.ComputeSI(val)
|
||||
// humanNumberOrNA is like humanNumber but returns "N/A" for non-positive values.
|
||||
func humanNumberOrNA[T constraints.Float | constraints.Integer](n T) string {
|
||||
if n <= 0 {
|
||||
return "N/A"
|
||||
}
|
||||
return humanNumber(n)
|
||||
}
|
||||
|
||||
// humanFloatOrNA formats a float with 1 decimal place, returning "N/A" for non-positive values.
|
||||
// This is separate from humanNumberOrNA because it provides simple decimal formatting for
|
||||
// continuous metrics (averages, rates) rather than SI unit formatting used for discrete counts.
|
||||
func humanFloatOrNA(val float64) string {
|
||||
if val <= 0 {
|
||||
return "N/A"
|
||||
}
|
||||
return humanFull(val, 1)
|
||||
}
|
||||
|
||||
func humanSI[T constraints.Float | constraints.Integer](val T, decimals int) string {
|
||||
v, unit := humanize.ComputeSI(float64(val))
|
||||
return fmt.Sprintf("%s%s", humanFull(v, decimals), unit)
|
||||
}
|
||||
|
||||
func humanInt[T constraints.Integer](val T) string {
|
||||
return humanFull(float64(val), 0)
|
||||
}
|
||||
|
||||
func humanFull(val float64, decimals int) string {
|
||||
return humanize.CommafWithDigits(val, decimals)
|
||||
}
|
||||
|
||||
// provideCIDSync performs a synchronous/blocking provide operation to announce
|
||||
// the given CID to the DHT.
|
||||
//
|
||||
// - If the accelerated DHT client is used, a DHT lookup isn't needed, we
|
||||
// directly allocate provider records to closest peers.
|
||||
// - If Provide.DHT.SweepEnabled=true or OptimisticProvide=true, we make an
|
||||
// optimistic provide call.
|
||||
// - Else we make a standard provide call (much slower).
|
||||
//
|
||||
// IMPORTANT: The caller MUST verify DHT availability using HasActiveDHTClient()
|
||||
// before calling this function. Calling with a nil or invalid router will cause
|
||||
// a panic - this is the caller's responsibility to prevent.
|
||||
func provideCIDSync(ctx context.Context, router routing.Routing, c cid.Cid) error {
|
||||
return router.Provide(ctx, c, true)
|
||||
}
|
||||
|
||||
@ -8,26 +8,35 @@ import (
|
||||
"net/http"
|
||||
"slices"
|
||||
|
||||
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
|
||||
mbase "github.com/multiformats/go-multibase"
|
||||
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/query"
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
|
||||
options "github.com/ipfs/kubo/core/coreiface/options"
|
||||
"github.com/ipfs/kubo/core/node/libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
mbase "github.com/multiformats/go-multibase"
|
||||
)
|
||||
|
||||
var PubsubCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated,
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "An experimental publish-subscribe system on ipfs.",
|
||||
ShortDescription: `
|
||||
ipfs pubsub allows you to publish messages to a given topic, and also to
|
||||
subscribe to new messages on a given topic.
|
||||
|
||||
DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717)
|
||||
EXPERIMENTAL FEATURE
|
||||
|
||||
It is not intended in its current state to be used in a production
|
||||
environment. To use, the daemon must be run with
|
||||
'--enable-pubsub-experiment'.
|
||||
This is an opt-in feature optimized for IPNS over PubSub
|
||||
(https://specs.ipfs.tech/ipns/ipns-pubsub-router/).
|
||||
|
||||
The default message validator is designed for IPNS record protocol.
|
||||
For custom pubsub applications requiring different validation logic,
|
||||
use go-libp2p-pubsub (https://github.com/libp2p/go-libp2p-pubsub)
|
||||
directly in a dedicated binary.
|
||||
|
||||
To enable, set 'Pubsub.Enabled' config to true.
|
||||
`,
|
||||
},
|
||||
Subcommands: map[string]*cmds.Command{
|
||||
@ -35,6 +44,7 @@ DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717)
|
||||
"sub": PubsubSubCmd,
|
||||
"ls": PubsubLsCmd,
|
||||
"peers": PubsubPeersCmd,
|
||||
"reset": PubsubResetCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -46,17 +56,18 @@ type pubsubMessage struct {
|
||||
}
|
||||
|
||||
var PubsubSubCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated,
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Subscribe to messages on a given topic.",
|
||||
ShortDescription: `
|
||||
ipfs pubsub sub subscribes to messages on a given topic.
|
||||
|
||||
DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717)
|
||||
EXPERIMENTAL FEATURE
|
||||
|
||||
It is not intended in its current state to be used in a production
|
||||
environment. To use, the daemon must be run with
|
||||
'--enable-pubsub-experiment'.
|
||||
This is an opt-in feature optimized for IPNS over PubSub
|
||||
(https://specs.ipfs.tech/ipns/ipns-pubsub-router/).
|
||||
|
||||
To enable, set 'Pubsub.Enabled' config to true.
|
||||
|
||||
PEER ENCODING
|
||||
|
||||
@ -145,18 +156,19 @@ TOPIC AND DATA ENCODING
|
||||
}
|
||||
|
||||
var PubsubPubCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated,
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Publish data to a given pubsub topic.",
|
||||
ShortDescription: `
|
||||
ipfs pubsub pub publishes a message to a specified topic.
|
||||
It reads binary data from stdin or a file.
|
||||
|
||||
DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717)
|
||||
EXPERIMENTAL FEATURE
|
||||
|
||||
It is not intended in its current state to be used in a production
|
||||
environment. To use, the daemon must be run with
|
||||
'--enable-pubsub-experiment'.
|
||||
This is an opt-in feature optimized for IPNS over PubSub
|
||||
(https://specs.ipfs.tech/ipns/ipns-pubsub-router/).
|
||||
|
||||
To enable, set 'Pubsub.Enabled' config to true.
|
||||
|
||||
HTTP RPC ENCODING
|
||||
|
||||
@ -201,17 +213,18 @@ HTTP RPC ENCODING
|
||||
}
|
||||
|
||||
var PubsubLsCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated,
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "List subscribed topics by name.",
|
||||
ShortDescription: `
|
||||
ipfs pubsub ls lists out the names of topics you are currently subscribed to.
|
||||
|
||||
DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717)
|
||||
EXPERIMENTAL FEATURE
|
||||
|
||||
It is not intended in its current state to be used in a production
|
||||
environment. To use, the daemon must be run with
|
||||
'--enable-pubsub-experiment'.
|
||||
This is an opt-in feature optimized for IPNS over PubSub
|
||||
(https://specs.ipfs.tech/ipns/ipns-pubsub-router/).
|
||||
|
||||
To enable, set 'Pubsub.Enabled' config to true.
|
||||
|
||||
TOPIC ENCODING
|
||||
|
||||
@ -273,7 +286,7 @@ func safeTextListEncoder(req *cmds.Request, w io.Writer, list *stringList) error
|
||||
}
|
||||
|
||||
var PubsubPeersCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated,
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "List peers we are currently pubsubbing with.",
|
||||
ShortDescription: `
|
||||
@ -281,11 +294,12 @@ ipfs pubsub peers with no arguments lists out the pubsub peers you are
|
||||
currently connected to. If given a topic, it will list connected peers who are
|
||||
subscribed to the named topic.
|
||||
|
||||
DEPRECATED FEATURE (see https://github.com/ipfs/kubo/issues/9717)
|
||||
EXPERIMENTAL FEATURE
|
||||
|
||||
It is not intended in its current state to be used in a production
|
||||
environment. To use, the daemon must be run with
|
||||
'--enable-pubsub-experiment'.
|
||||
This is an opt-in feature optimized for IPNS over PubSub
|
||||
(https://specs.ipfs.tech/ipns/ipns-pubsub-router/).
|
||||
|
||||
To enable, set 'Pubsub.Enabled' config to true.
|
||||
|
||||
TOPIC AND DATA ENCODING
|
||||
|
||||
@ -367,3 +381,122 @@ func urlArgsDecoder(req *cmds.Request, env cmds.Environment) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type pubsubResetResult struct {
|
||||
Deleted int64 `json:"deleted"`
|
||||
}
|
||||
|
||||
var PubsubResetCmd = &cmds.Command{
|
||||
Status: cmds.Experimental,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Reset pubsub validator state.",
|
||||
ShortDescription: `
|
||||
Clears persistent sequence number state used by the pubsub validator.
|
||||
|
||||
WARNING: FOR TESTING ONLY - DO NOT USE IN PRODUCTION
|
||||
|
||||
Resets validator state that protects against replay attacks. After reset,
|
||||
previously seen messages may be accepted again until their sequence numbers
|
||||
are re-learned.
|
||||
|
||||
Use cases:
|
||||
- Testing pubsub functionality
|
||||
- Recovery from a peer sending artificially high sequence numbers
|
||||
(which would cause subsequent messages from that peer to be rejected)
|
||||
|
||||
The --peer flag limits the reset to a specific peer's state.
|
||||
Without --peer, all validator state is cleared.
|
||||
|
||||
NOTE: This only resets the persistent seqno validator state. The in-memory
|
||||
seen messages cache (Pubsub.SeenMessagesTTL) auto-expires and can only be
|
||||
fully cleared by restarting the daemon.
|
||||
`,
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.StringOption(peerOptionName, "p", "Only reset state for this peer ID"),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
n, err := cmdenv.GetNode(env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ds := n.Repo.Datastore()
|
||||
ctx := req.Context
|
||||
|
||||
peerOpt, _ := req.Options[peerOptionName].(string)
|
||||
|
||||
var deleted int64
|
||||
if peerOpt != "" {
|
||||
// Reset specific peer
|
||||
pid, err := peer.Decode(peerOpt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid peer ID: %w", err)
|
||||
}
|
||||
key := datastore.NewKey(libp2p.SeqnoStorePrefix + pid.String())
|
||||
exists, err := ds.Has(ctx, key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check seqno state: %w", err)
|
||||
}
|
||||
if exists {
|
||||
if err := ds.Delete(ctx, key); err != nil {
|
||||
return fmt.Errorf("failed to delete seqno state: %w", err)
|
||||
}
|
||||
deleted = 1
|
||||
}
|
||||
} else {
|
||||
// Reset all peers using batched delete for efficiency
|
||||
q := query.Query{
|
||||
Prefix: libp2p.SeqnoStorePrefix,
|
||||
KeysOnly: true,
|
||||
}
|
||||
results, err := ds.Query(ctx, q)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query seqno state: %w", err)
|
||||
}
|
||||
defer results.Close()
|
||||
|
||||
batch, err := ds.Batch(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create batch: %w", err)
|
||||
}
|
||||
|
||||
for result := range results.Next() {
|
||||
if result.Error != nil {
|
||||
return fmt.Errorf("query error: %w", result.Error)
|
||||
}
|
||||
if err := batch.Delete(ctx, datastore.NewKey(result.Key)); err != nil {
|
||||
return fmt.Errorf("failed to batch delete key %s: %w", result.Key, err)
|
||||
}
|
||||
deleted++
|
||||
}
|
||||
|
||||
if err := batch.Commit(ctx); err != nil {
|
||||
return fmt.Errorf("failed to commit batch delete: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync to ensure deletions are persisted
|
||||
if err := ds.Sync(ctx, datastore.NewKey(libp2p.SeqnoStorePrefix)); err != nil {
|
||||
return fmt.Errorf("failed to sync datastore: %w", err)
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &pubsubResetResult{Deleted: deleted})
|
||||
},
|
||||
Type: pubsubResetResult{},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, result *pubsubResetResult) error {
|
||||
peerOpt, _ := req.Options[peerOptionName].(string)
|
||||
if peerOpt != "" {
|
||||
if result.Deleted == 0 {
|
||||
_, err := fmt.Fprintf(w, "No validator state found for peer %s\n", peerOpt)
|
||||
return err
|
||||
}
|
||||
_, err := fmt.Fprintf(w, "Reset validator state for peer %s\n", peerOpt)
|
||||
return err
|
||||
}
|
||||
_, err := fmt.Fprintf(w, "Reset validator state for %d peer(s)\n", result.Deleted)
|
||||
return err
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
@ -5,20 +5,22 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
oldcmds "github.com/ipfs/kubo/commands"
|
||||
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
|
||||
coreiface "github.com/ipfs/kubo/core/coreiface"
|
||||
corerepo "github.com/ipfs/kubo/core/corerepo"
|
||||
fsrepo "github.com/ipfs/kubo/repo/fsrepo"
|
||||
"github.com/ipfs/kubo/repo/fsrepo/migrations"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
bstore "github.com/ipfs/boxo/blockstore"
|
||||
"github.com/ipfs/boxo/path"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
)
|
||||
@ -226,45 +228,137 @@ Version string The repo version.
|
||||
},
|
||||
}
|
||||
|
||||
// VerifyProgress reports verification progress to the user.
|
||||
// It contains either a message about a corrupt block or a progress counter.
|
||||
type VerifyProgress struct {
|
||||
Msg string
|
||||
Progress int
|
||||
Msg string // Message about a corrupt/healed block (empty for valid blocks)
|
||||
Progress int // Number of blocks processed so far
|
||||
}
|
||||
|
||||
func verifyWorkerRun(ctx context.Context, wg *sync.WaitGroup, keys <-chan cid.Cid, results chan<- string, bs bstore.Blockstore) {
|
||||
// verifyState represents the state of a block after verification.
|
||||
// States track both the verification result and any remediation actions taken.
|
||||
type verifyState int
|
||||
|
||||
const (
|
||||
verifyStateValid verifyState = iota // Block is valid and uncorrupted
|
||||
verifyStateCorrupt // Block is corrupt, no action taken
|
||||
verifyStateCorruptRemoved // Block was corrupt and successfully removed
|
||||
verifyStateCorruptRemoveFailed // Block was corrupt but removal failed
|
||||
verifyStateCorruptHealed // Block was corrupt, removed, and successfully re-fetched
|
||||
verifyStateCorruptHealFailed // Block was corrupt and removed, but re-fetching failed
|
||||
)
|
||||
|
||||
const (
|
||||
// verifyWorkerMultiplier determines worker pool size relative to CPU count.
|
||||
// Since block verification is I/O-bound (disk reads + potential network fetches),
|
||||
// we use more workers than CPU cores to maximize throughput.
|
||||
verifyWorkerMultiplier = 2
|
||||
)
|
||||
|
||||
// verifyResult contains the outcome of verifying a single block.
|
||||
// It includes the block's CID, its verification state, and an optional
|
||||
// human-readable message describing what happened.
|
||||
type verifyResult struct {
|
||||
cid cid.Cid // CID of the block that was verified
|
||||
state verifyState // Final state after verification and any remediation
|
||||
msg string // Human-readable message (empty for valid blocks)
|
||||
}
|
||||
|
||||
// verifyWorkerRun processes CIDs from the keys channel, verifying their integrity.
|
||||
// If shouldDrop is true, corrupt blocks are removed from the blockstore.
|
||||
// If shouldHeal is true (implies shouldDrop), removed blocks are re-fetched from the network.
|
||||
// The api parameter must be non-nil when shouldHeal is true.
|
||||
// healTimeout specifies the maximum time to wait for each block heal (0 = no timeout).
|
||||
func verifyWorkerRun(ctx context.Context, wg *sync.WaitGroup, keys <-chan cid.Cid, results chan<- *verifyResult, bs bstore.Blockstore, api coreiface.CoreAPI, shouldDrop, shouldHeal bool, healTimeout time.Duration) {
|
||||
defer wg.Done()
|
||||
|
||||
sendResult := func(r *verifyResult) bool {
|
||||
select {
|
||||
case results <- r:
|
||||
return true
|
||||
case <-ctx.Done():
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for k := range keys {
|
||||
_, err := bs.Get(ctx, k)
|
||||
if err != nil {
|
||||
select {
|
||||
case results <- fmt.Sprintf("block %s was corrupt (%s)", k, err):
|
||||
case <-ctx.Done():
|
||||
return
|
||||
// Block is corrupt
|
||||
result := &verifyResult{cid: k, state: verifyStateCorrupt}
|
||||
|
||||
if !shouldDrop {
|
||||
result.msg = fmt.Sprintf("block %s was corrupt (%s)", k, err)
|
||||
if !sendResult(result) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to delete
|
||||
if delErr := bs.DeleteBlock(ctx, k); delErr != nil {
|
||||
result.state = verifyStateCorruptRemoveFailed
|
||||
result.msg = fmt.Sprintf("block %s was corrupt (%s), failed to remove (%s)", k, err, delErr)
|
||||
if !sendResult(result) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if !shouldHeal {
|
||||
result.state = verifyStateCorruptRemoved
|
||||
result.msg = fmt.Sprintf("block %s was corrupt (%s), removed", k, err)
|
||||
if !sendResult(result) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to heal by re-fetching from network (api is guaranteed non-nil here)
|
||||
healCtx := ctx
|
||||
var healCancel context.CancelFunc
|
||||
if healTimeout > 0 {
|
||||
healCtx, healCancel = context.WithTimeout(ctx, healTimeout)
|
||||
}
|
||||
|
||||
if _, healErr := api.Block().Get(healCtx, path.FromCid(k)); healErr != nil {
|
||||
result.state = verifyStateCorruptHealFailed
|
||||
result.msg = fmt.Sprintf("block %s was corrupt (%s), removed, failed to heal (%s)", k, err, healErr)
|
||||
} else {
|
||||
result.state = verifyStateCorruptHealed
|
||||
result.msg = fmt.Sprintf("block %s was corrupt (%s), removed, healed", k, err)
|
||||
}
|
||||
|
||||
if healCancel != nil {
|
||||
healCancel()
|
||||
}
|
||||
|
||||
if !sendResult(result) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case results <- "":
|
||||
case <-ctx.Done():
|
||||
// Block is valid
|
||||
if !sendResult(&verifyResult{cid: k, state: verifyStateValid}) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyResultChan(ctx context.Context, keys <-chan cid.Cid, bs bstore.Blockstore) <-chan string {
|
||||
results := make(chan string)
|
||||
// verifyResultChan creates a channel of verification results by spawning multiple worker goroutines
|
||||
// to process blocks in parallel. It returns immediately with a channel that will receive results.
|
||||
func verifyResultChan(ctx context.Context, keys <-chan cid.Cid, bs bstore.Blockstore, api coreiface.CoreAPI, shouldDrop, shouldHeal bool, healTimeout time.Duration) <-chan *verifyResult {
|
||||
results := make(chan *verifyResult)
|
||||
|
||||
go func() {
|
||||
defer close(results)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for i := 0; i < runtime.NumCPU()*2; i++ {
|
||||
for i := 0; i < runtime.NumCPU()*verifyWorkerMultiplier; i++ {
|
||||
wg.Add(1)
|
||||
go verifyWorkerRun(ctx, &wg, keys, results, bs)
|
||||
go verifyWorkerRun(ctx, &wg, keys, results, bs, api, shouldDrop, shouldHeal, healTimeout)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
@ -276,6 +370,45 @@ func verifyResultChan(ctx context.Context, keys <-chan cid.Cid, bs bstore.Blocks
|
||||
var repoVerifyCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Verify all blocks in repo are not corrupted.",
|
||||
ShortDescription: `
|
||||
'ipfs repo verify' checks integrity of all blocks in the local datastore.
|
||||
Each block is read and validated against its CID to ensure data integrity.
|
||||
|
||||
Without any flags, this is a SAFE, read-only check that only reports corrupt
|
||||
blocks without modifying the repository. This can be used as a "dry run" to
|
||||
preview what --drop or --heal would do.
|
||||
|
||||
Use --drop to remove corrupt blocks, or --heal to remove and re-fetch from
|
||||
the network.
|
||||
|
||||
Examples:
|
||||
ipfs repo verify # safe read-only check, reports corrupt blocks
|
||||
ipfs repo verify --drop # remove corrupt blocks
|
||||
ipfs repo verify --heal # remove and re-fetch corrupt blocks
|
||||
|
||||
Exit Codes:
|
||||
0: All blocks are valid, OR all corrupt blocks were successfully remediated
|
||||
(with --drop or --heal)
|
||||
1: Corrupt blocks detected (without flags), OR remediation failed (block
|
||||
removal or healing failed with --drop or --heal)
|
||||
|
||||
Note: --heal requires the daemon to be running in online mode with network
|
||||
connectivity to nodes that have the missing blocks. Make sure the daemon is
|
||||
online and connected to other peers. Healing will attempt to re-fetch each
|
||||
corrupt block from the network after removing it. If a block cannot be found
|
||||
on the network, it will remain deleted.
|
||||
|
||||
WARNING: Both --drop and --heal are DESTRUCTIVE operations that permanently
|
||||
delete corrupt blocks from your repository. Once deleted, blocks cannot be
|
||||
recovered unless --heal successfully fetches them from the network. Blocks
|
||||
that cannot be healed will remain permanently deleted. Always backup your
|
||||
repository before using these options.
|
||||
`,
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption("drop", "Remove corrupt blocks from datastore (destructive operation)."),
|
||||
cmds.BoolOption("heal", "Remove corrupt blocks and re-fetch from network (destructive operation, implies --drop)."),
|
||||
cmds.StringOption("heal-timeout", "Maximum time to wait for each block heal (e.g., \"30s\"). Only applies with --heal.").WithDefault("30s"),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
nd, err := cmdenv.GetNode(env)
|
||||
@ -283,6 +416,38 @@ var repoVerifyCmd = &cmds.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
drop, _ := req.Options["drop"].(bool)
|
||||
heal, _ := req.Options["heal"].(bool)
|
||||
|
||||
if heal {
|
||||
drop = true // heal implies drop
|
||||
}
|
||||
|
||||
// Parse and validate heal-timeout
|
||||
timeoutStr, _ := req.Options["heal-timeout"].(string)
|
||||
healTimeout, err := time.ParseDuration(timeoutStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid heal-timeout: %w", err)
|
||||
}
|
||||
if healTimeout < 0 {
|
||||
return errors.New("heal-timeout must be >= 0")
|
||||
}
|
||||
|
||||
// Check online mode and API availability for healing operation
|
||||
var api coreiface.CoreAPI
|
||||
if heal {
|
||||
if !nd.IsOnline {
|
||||
return ErrNotOnline
|
||||
}
|
||||
api, err = cmdenv.GetApi(env, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if api == nil {
|
||||
return fmt.Errorf("healing requested but API is not available - make sure daemon is online and connected to other peers")
|
||||
}
|
||||
}
|
||||
|
||||
bs := &bstore.ValidatingBlockstore{Blockstore: bstore.NewBlockstore(nd.Repo.Datastore())}
|
||||
|
||||
keys, err := bs.AllKeysChan(req.Context)
|
||||
@ -291,17 +456,47 @@ var repoVerifyCmd = &cmds.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
results := verifyResultChan(req.Context, keys, bs)
|
||||
results := verifyResultChan(req.Context, keys, bs, api, drop, heal, healTimeout)
|
||||
|
||||
var fails int
|
||||
// Track statistics for each type of outcome
|
||||
var corrupted, removed, removeFailed, healed, healFailed int
|
||||
var i int
|
||||
for msg := range results {
|
||||
if msg != "" {
|
||||
if err := res.Emit(&VerifyProgress{Msg: msg}); err != nil {
|
||||
|
||||
for result := range results {
|
||||
// Update counters based on the block's final state
|
||||
switch result.state {
|
||||
case verifyStateCorrupt:
|
||||
// Block is corrupt but no action was taken (--drop not specified)
|
||||
corrupted++
|
||||
case verifyStateCorruptRemoved:
|
||||
// Block was corrupt and successfully removed (--drop specified)
|
||||
corrupted++
|
||||
removed++
|
||||
case verifyStateCorruptRemoveFailed:
|
||||
// Block was corrupt but couldn't be removed
|
||||
corrupted++
|
||||
removeFailed++
|
||||
case verifyStateCorruptHealed:
|
||||
// Block was corrupt, removed, and successfully re-fetched (--heal specified)
|
||||
corrupted++
|
||||
removed++
|
||||
healed++
|
||||
case verifyStateCorruptHealFailed:
|
||||
// Block was corrupt and removed, but re-fetching failed
|
||||
corrupted++
|
||||
removed++
|
||||
healFailed++
|
||||
default:
|
||||
// verifyStateValid blocks are not counted (they're the expected case)
|
||||
}
|
||||
|
||||
// Emit progress message for corrupt blocks
|
||||
if result.state != verifyStateValid && result.msg != "" {
|
||||
if err := res.Emit(&VerifyProgress{Msg: result.msg}); err != nil {
|
||||
return err
|
||||
}
|
||||
fails++
|
||||
}
|
||||
|
||||
i++
|
||||
if err := res.Emit(&VerifyProgress{Progress: i}); err != nil {
|
||||
return err
|
||||
@ -312,8 +507,42 @@ var repoVerifyCmd = &cmds.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
if fails != 0 {
|
||||
return errors.New("verify complete, some blocks were corrupt")
|
||||
if corrupted > 0 {
|
||||
// Build a summary of what happened with corrupt blocks
|
||||
summary := fmt.Sprintf("verify complete, %d blocks corrupt", corrupted)
|
||||
if removed > 0 {
|
||||
summary += fmt.Sprintf(", %d removed", removed)
|
||||
}
|
||||
if removeFailed > 0 {
|
||||
summary += fmt.Sprintf(", %d failed to remove", removeFailed)
|
||||
}
|
||||
if healed > 0 {
|
||||
summary += fmt.Sprintf(", %d healed", healed)
|
||||
}
|
||||
if healFailed > 0 {
|
||||
summary += fmt.Sprintf(", %d failed to heal", healFailed)
|
||||
}
|
||||
|
||||
// Determine success/failure based on operation mode
|
||||
shouldFail := false
|
||||
|
||||
if !drop {
|
||||
// Detection-only mode: always fail if corruption found
|
||||
shouldFail = true
|
||||
} else if heal {
|
||||
// Heal mode: fail if any removal or heal failed
|
||||
shouldFail = (removeFailed > 0 || healFailed > 0)
|
||||
} else {
|
||||
// Drop mode: fail if any removal failed
|
||||
shouldFail = (removeFailed > 0)
|
||||
}
|
||||
|
||||
if shouldFail {
|
||||
return errors.New(summary)
|
||||
}
|
||||
|
||||
// Success: emit summary as a message instead of error
|
||||
return res.Emit(&VerifyProgress{Msg: summary})
|
||||
}
|
||||
|
||||
return res.Emit(&VerifyProgress{Msg: "verify complete, all blocks validated."})
|
||||
@ -322,7 +551,7 @@ var repoVerifyCmd = &cmds.Command{
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, obj *VerifyProgress) error {
|
||||
if strings.Contains(obj.Msg, "was corrupt") {
|
||||
fmt.Fprintln(os.Stdout, obj.Msg)
|
||||
fmt.Fprintln(w, obj.Msg)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
371
core/commands/repo_verify_test.go
Normal file
371
core/commands/repo_verify_test.go
Normal file
@ -0,0 +1,371 @@
|
||||
//go:build go1.25
|
||||
|
||||
package commands
|
||||
|
||||
// This file contains unit tests for the --heal-timeout flag functionality
|
||||
// using testing/synctest to avoid waiting for real timeouts.
|
||||
//
|
||||
// End-to-end tests for the full 'ipfs repo verify' command (including --drop
|
||||
// and --heal flags) are located in test/cli/repo_verify_test.go.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
"testing"
|
||||
"testing/synctest"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
coreiface "github.com/ipfs/kubo/core/coreiface"
|
||||
"github.com/ipfs/kubo/core/coreiface/options"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/ipfs/boxo/path"
|
||||
)
|
||||
|
||||
func TestVerifyWorkerHealTimeout(t *testing.T) {
|
||||
t.Run("heal succeeds before timeout", func(t *testing.T) {
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
const healTimeout = 5 * time.Second
|
||||
testCID := cid.MustParse("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi")
|
||||
|
||||
// Setup channels
|
||||
keys := make(chan cid.Cid, 1)
|
||||
keys <- testCID
|
||||
close(keys)
|
||||
results := make(chan *verifyResult, 1)
|
||||
|
||||
// Mock blockstore that returns error (simulating corruption)
|
||||
mockBS := &mockBlockstore{
|
||||
getError: errors.New("corrupt block"),
|
||||
}
|
||||
|
||||
// Mock API where Block().Get() completes before timeout
|
||||
mockAPI := &mockCoreAPI{
|
||||
blockAPI: &mockBlockAPI{
|
||||
getDelay: 2 * time.Second, // Less than healTimeout
|
||||
data: []byte("healed data"),
|
||||
},
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
// Run worker
|
||||
go verifyWorkerRun(t.Context(), &wg, keys, results, mockBS, mockAPI, true, true, healTimeout)
|
||||
|
||||
// Advance time past the mock delay but before timeout
|
||||
time.Sleep(3 * time.Second)
|
||||
synctest.Wait()
|
||||
|
||||
wg.Wait()
|
||||
close(results)
|
||||
|
||||
// Verify heal succeeded
|
||||
result := <-results
|
||||
require.NotNil(t, result)
|
||||
assert.Equal(t, verifyStateCorruptHealed, result.state)
|
||||
assert.Contains(t, result.msg, "healed")
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("heal fails due to timeout", func(t *testing.T) {
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
const healTimeout = 2 * time.Second
|
||||
testCID := cid.MustParse("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi")
|
||||
|
||||
// Setup channels
|
||||
keys := make(chan cid.Cid, 1)
|
||||
keys <- testCID
|
||||
close(keys)
|
||||
results := make(chan *verifyResult, 1)
|
||||
|
||||
// Mock blockstore that returns error (simulating corruption)
|
||||
mockBS := &mockBlockstore{
|
||||
getError: errors.New("corrupt block"),
|
||||
}
|
||||
|
||||
// Mock API where Block().Get() takes longer than healTimeout
|
||||
mockAPI := &mockCoreAPI{
|
||||
blockAPI: &mockBlockAPI{
|
||||
getDelay: 5 * time.Second, // More than healTimeout
|
||||
data: []byte("healed data"),
|
||||
},
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
// Run worker
|
||||
go verifyWorkerRun(t.Context(), &wg, keys, results, mockBS, mockAPI, true, true, healTimeout)
|
||||
|
||||
// Advance time past timeout
|
||||
time.Sleep(3 * time.Second)
|
||||
synctest.Wait()
|
||||
|
||||
wg.Wait()
|
||||
close(results)
|
||||
|
||||
// Verify heal failed due to timeout
|
||||
result := <-results
|
||||
require.NotNil(t, result)
|
||||
assert.Equal(t, verifyStateCorruptHealFailed, result.state)
|
||||
assert.Contains(t, result.msg, "failed to heal")
|
||||
assert.Contains(t, result.msg, "context deadline exceeded")
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("heal with zero timeout still attempts heal", func(t *testing.T) {
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
const healTimeout = 0 // Zero timeout means no timeout
|
||||
testCID := cid.MustParse("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi")
|
||||
|
||||
// Setup channels
|
||||
keys := make(chan cid.Cid, 1)
|
||||
keys <- testCID
|
||||
close(keys)
|
||||
results := make(chan *verifyResult, 1)
|
||||
|
||||
// Mock blockstore that returns error (simulating corruption)
|
||||
mockBS := &mockBlockstore{
|
||||
getError: errors.New("corrupt block"),
|
||||
}
|
||||
|
||||
// Mock API that succeeds quickly
|
||||
mockAPI := &mockCoreAPI{
|
||||
blockAPI: &mockBlockAPI{
|
||||
getDelay: 100 * time.Millisecond,
|
||||
data: []byte("healed data"),
|
||||
},
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
// Run worker
|
||||
go verifyWorkerRun(t.Context(), &wg, keys, results, mockBS, mockAPI, true, true, healTimeout)
|
||||
|
||||
// Advance time to let heal complete
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
synctest.Wait()
|
||||
|
||||
wg.Wait()
|
||||
close(results)
|
||||
|
||||
// Verify heal succeeded even with zero timeout
|
||||
result := <-results
|
||||
require.NotNil(t, result)
|
||||
assert.Equal(t, verifyStateCorruptHealed, result.state)
|
||||
assert.Contains(t, result.msg, "healed")
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("multiple blocks with different timeout outcomes", func(t *testing.T) {
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
const healTimeout = 3 * time.Second
|
||||
testCID1 := cid.MustParse("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi")
|
||||
testCID2 := cid.MustParse("bafybeihvvulpp4evxj7x7armbqcyg6uezzuig6jp3lktpbovlqfkjtgyby")
|
||||
|
||||
// Setup channels
|
||||
keys := make(chan cid.Cid, 2)
|
||||
keys <- testCID1
|
||||
keys <- testCID2
|
||||
close(keys)
|
||||
results := make(chan *verifyResult, 2)
|
||||
|
||||
// Mock blockstore that always returns error (all blocks corrupt)
|
||||
mockBS := &mockBlockstore{
|
||||
getError: errors.New("corrupt block"),
|
||||
}
|
||||
|
||||
// Create two mock block APIs with different delays
|
||||
// We'll need to alternate which one gets used
|
||||
// For simplicity, use one that succeeds fast
|
||||
mockAPI := &mockCoreAPI{
|
||||
blockAPI: &mockBlockAPI{
|
||||
getDelay: 1 * time.Second, // Less than healTimeout - will succeed
|
||||
data: []byte("healed data"),
|
||||
},
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2) // Two workers
|
||||
|
||||
// Run two workers
|
||||
go verifyWorkerRun(t.Context(), &wg, keys, results, mockBS, mockAPI, true, true, healTimeout)
|
||||
go verifyWorkerRun(t.Context(), &wg, keys, results, mockBS, mockAPI, true, true, healTimeout)
|
||||
|
||||
// Advance time to let both complete
|
||||
time.Sleep(2 * time.Second)
|
||||
synctest.Wait()
|
||||
|
||||
wg.Wait()
|
||||
close(results)
|
||||
|
||||
// Collect results
|
||||
var healedCount int
|
||||
for result := range results {
|
||||
if result.state == verifyStateCorruptHealed {
|
||||
healedCount++
|
||||
}
|
||||
}
|
||||
|
||||
// Both should heal successfully (both under timeout)
|
||||
assert.Equal(t, 2, healedCount)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("valid block is not healed", func(t *testing.T) {
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
const healTimeout = 5 * time.Second
|
||||
testCID := cid.MustParse("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi")
|
||||
|
||||
// Setup channels
|
||||
keys := make(chan cid.Cid, 1)
|
||||
keys <- testCID
|
||||
close(keys)
|
||||
results := make(chan *verifyResult, 1)
|
||||
|
||||
// Mock blockstore that returns valid block (no error)
|
||||
mockBS := &mockBlockstore{
|
||||
block: blocks.NewBlock([]byte("valid data")),
|
||||
}
|
||||
|
||||
// Mock API (won't be called since block is valid)
|
||||
mockAPI := &mockCoreAPI{
|
||||
blockAPI: &mockBlockAPI{},
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
// Run worker with heal enabled
|
||||
go verifyWorkerRun(t.Context(), &wg, keys, results, mockBS, mockAPI, false, true, healTimeout)
|
||||
|
||||
synctest.Wait()
|
||||
|
||||
wg.Wait()
|
||||
close(results)
|
||||
|
||||
// Verify block is marked valid, not healed
|
||||
result := <-results
|
||||
require.NotNil(t, result)
|
||||
assert.Equal(t, verifyStateValid, result.state)
|
||||
assert.Empty(t, result.msg)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// mockBlockstore implements a minimal blockstore for testing
|
||||
type mockBlockstore struct {
|
||||
getError error
|
||||
block blocks.Block
|
||||
}
|
||||
|
||||
func (m *mockBlockstore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) {
|
||||
if m.getError != nil {
|
||||
return nil, m.getError
|
||||
}
|
||||
return m.block, nil
|
||||
}
|
||||
|
||||
func (m *mockBlockstore) DeleteBlock(ctx context.Context, c cid.Cid) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) {
|
||||
return m.block != nil, nil
|
||||
}
|
||||
|
||||
func (m *mockBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) {
|
||||
if m.block != nil {
|
||||
return len(m.block.RawData()), nil
|
||||
}
|
||||
return 0, errors.New("block not found")
|
||||
}
|
||||
|
||||
func (m *mockBlockstore) Put(ctx context.Context, b blocks.Block) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockBlockstore) PutMany(ctx context.Context, bs []blocks.Block) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (m *mockBlockstore) HashOnRead(enabled bool) {
|
||||
}
|
||||
|
||||
// mockBlockAPI implements BlockAPI for testing
|
||||
type mockBlockAPI struct {
|
||||
getDelay time.Duration
|
||||
getError error
|
||||
data []byte
|
||||
}
|
||||
|
||||
func (m *mockBlockAPI) Get(ctx context.Context, p path.Path) (io.Reader, error) {
|
||||
if m.getDelay > 0 {
|
||||
select {
|
||||
case <-time.After(m.getDelay):
|
||||
// Delay completed
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
if m.getError != nil {
|
||||
return nil, m.getError
|
||||
}
|
||||
return bytes.NewReader(m.data), nil
|
||||
}
|
||||
|
||||
func (m *mockBlockAPI) Put(ctx context.Context, r io.Reader, opts ...options.BlockPutOption) (coreiface.BlockStat, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (m *mockBlockAPI) Rm(ctx context.Context, p path.Path, opts ...options.BlockRmOption) error {
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (m *mockBlockAPI) Stat(ctx context.Context, p path.Path) (coreiface.BlockStat, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// mockCoreAPI implements minimal CoreAPI for testing
|
||||
type mockCoreAPI struct {
|
||||
blockAPI *mockBlockAPI
|
||||
}
|
||||
|
||||
func (m *mockCoreAPI) Block() coreiface.BlockAPI {
|
||||
return m.blockAPI
|
||||
}
|
||||
|
||||
func (m *mockCoreAPI) Unixfs() coreiface.UnixfsAPI { return nil }
|
||||
func (m *mockCoreAPI) Dag() coreiface.APIDagService { return nil }
|
||||
func (m *mockCoreAPI) Name() coreiface.NameAPI { return nil }
|
||||
func (m *mockCoreAPI) Key() coreiface.KeyAPI { return nil }
|
||||
func (m *mockCoreAPI) Pin() coreiface.PinAPI { return nil }
|
||||
func (m *mockCoreAPI) Object() coreiface.ObjectAPI { return nil }
|
||||
func (m *mockCoreAPI) Swarm() coreiface.SwarmAPI { return nil }
|
||||
func (m *mockCoreAPI) PubSub() coreiface.PubSubAPI { return nil }
|
||||
func (m *mockCoreAPI) Routing() coreiface.RoutingAPI { return nil }
|
||||
|
||||
func (m *mockCoreAPI) ResolvePath(ctx context.Context, p path.Path) (path.ImmutablePath, []string, error) {
|
||||
return path.ImmutablePath{}, nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (m *mockCoreAPI) ResolveNode(ctx context.Context, p path.Path) (ipld.Node, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (m *mockCoreAPI) WithOptions(...options.ApiOption) (coreiface.CoreAPI, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
@ -11,11 +11,13 @@ import (
|
||||
|
||||
"github.com/ipfs/kubo/config"
|
||||
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
|
||||
"github.com/ipfs/kubo/core/commands/cmdutils"
|
||||
"github.com/ipfs/kubo/core/node"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
|
||||
dag "github.com/ipfs/boxo/ipld/merkledag"
|
||||
"github.com/ipfs/boxo/ipns"
|
||||
"github.com/ipfs/boxo/provider"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
@ -89,7 +91,7 @@ var findProvidersRoutingCmd = &cmds.Command{
|
||||
defer cancel()
|
||||
pchan := n.Routing.FindProvidersAsync(ctx, c, numProviders)
|
||||
for p := range pchan {
|
||||
np := p
|
||||
np := cmdutils.CloneAddrInfo(p)
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.Provider,
|
||||
Responses: []*peer.AddrInfo{&np},
|
||||
@ -211,6 +213,10 @@ var provideRefRoutingCmd = &cmds.Command{
|
||||
ctx, events := routing.RegisterForQueryEvents(ctx)
|
||||
|
||||
var provideErr error
|
||||
// TODO: not sure if necessary to call StartProviding for `ipfs routing
|
||||
// provide <cid>`, since either cid is already being provided, or it will
|
||||
// be garbage collected and not reprovided anyway. So we may simply stick
|
||||
// with a single (optimistic) provide, and skip StartProviding call.
|
||||
go func() {
|
||||
defer cancel()
|
||||
if rec {
|
||||
@ -226,6 +232,16 @@ var provideRefRoutingCmd = &cmds.Command{
|
||||
}
|
||||
}()
|
||||
|
||||
if nd.HasActiveDHTClient() {
|
||||
// If node has a DHT client, provide immediately the supplied cids before
|
||||
// returning.
|
||||
for _, c := range cids {
|
||||
if err = provideCIDSync(req.Context, nd.DHTClient, c); err != nil {
|
||||
return fmt.Errorf("error providing cid: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for e := range events {
|
||||
if err := res.Emit(e); err != nil {
|
||||
return err
|
||||
@ -281,9 +297,9 @@ Trigger reprovider to announce our data to network.
|
||||
if cfg.Provide.DHT.Interval.WithDefault(config.DefaultProvideDHTInterval) == 0 {
|
||||
return errors.New("invalid configuration: Provide.DHT.Interval is set to '0'")
|
||||
}
|
||||
provideSys, ok := nd.Provider.(*node.LegacyProvider)
|
||||
provideSys, ok := nd.Provider.(provider.Reprovider)
|
||||
if !ok {
|
||||
return errors.New("manual reprovide not available with experimental sweeping provider (Provide.DHT.SweepEnabled=true)")
|
||||
return errors.New("manual reprovide only available with legacy provider (Provide.DHT.SweepEnabled=false)")
|
||||
}
|
||||
|
||||
err = provideSys.Reprovide(req.Context)
|
||||
@ -300,6 +316,7 @@ func provideCids(prov node.DHTProvider, cids []cid.Cid) error {
|
||||
for i, c := range cids {
|
||||
mhs[i] = c.Hash()
|
||||
}
|
||||
// providing happens asynchronously
|
||||
return prov.StartProviding(true, mhs...)
|
||||
}
|
||||
|
||||
|
||||
@ -75,7 +75,8 @@ This interface is not stable and may change from release to release.
|
||||
var dht *dht.IpfsDHT
|
||||
|
||||
var separateClient bool
|
||||
if nd.DHTClient != nd.DHT {
|
||||
// Check if using separate DHT client (e.g., accelerated DHT)
|
||||
if nd.HasActiveDHTClient() && nd.DHTClient != nd.DHT {
|
||||
separateClient = true
|
||||
}
|
||||
|
||||
|
||||
@ -435,7 +435,7 @@ type connInfo struct {
|
||||
Muxer string `json:",omitempty"`
|
||||
Direction inet.Direction `json:",omitempty"`
|
||||
Streams []streamInfo `json:",omitempty"`
|
||||
Identify IdOutput `json:",omitempty"`
|
||||
Identify IdOutput
|
||||
}
|
||||
|
||||
func (ci *connInfo) Sort() {
|
||||
@ -513,8 +513,9 @@ var swarmAddrsCmd = &cmds.Command{
|
||||
`,
|
||||
},
|
||||
Subcommands: map[string]*cmds.Command{
|
||||
"local": swarmAddrsLocalCmd,
|
||||
"listen": swarmAddrsListenCmd,
|
||||
"autonat": swarmAddrsAutoNATCmd,
|
||||
"local": swarmAddrsLocalCmd,
|
||||
"listen": swarmAddrsListenCmd,
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
api, err := cmdenv.GetApi(env, req)
|
||||
|
||||
139
core/commands/swarm_addrs_autonat.go
Normal file
139
core/commands/swarm_addrs_autonat.go
Normal file
@ -0,0 +1,139 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
cmdenv "github.com/ipfs/kubo/core/commands/cmdenv"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// reachabilityHost provides access to the AutoNAT reachability status.
|
||||
type reachabilityHost interface {
|
||||
Reachability() network.Reachability
|
||||
}
|
||||
|
||||
// confirmedAddrsHost provides access to per-address reachability from AutoNAT V2.
|
||||
type confirmedAddrsHost interface {
|
||||
ConfirmedAddrs() (reachable, unreachable, unknown []ma.Multiaddr)
|
||||
}
|
||||
|
||||
// autoNATResult represents the AutoNAT reachability information.
|
||||
type autoNATResult struct {
|
||||
Reachability string `json:"reachability"`
|
||||
Reachable []string `json:"reachable,omitempty"`
|
||||
Unreachable []string `json:"unreachable,omitempty"`
|
||||
Unknown []string `json:"unknown,omitempty"`
|
||||
}
|
||||
|
||||
func multiaddrsToStrings(addrs []ma.Multiaddr) []string {
|
||||
out := make([]string, len(addrs))
|
||||
for i, a := range addrs {
|
||||
out[i] = a.String()
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func writeAddrSection(w io.Writer, label string, addrs []string) {
|
||||
if len(addrs) > 0 {
|
||||
fmt.Fprintf(w, " %s:\n", label)
|
||||
for _, addr := range addrs {
|
||||
fmt.Fprintf(w, " %s\n", addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var swarmAddrsAutoNATCmd = &cmds.Command{
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Show address reachability as determined by AutoNAT V2.",
|
||||
ShortDescription: `
|
||||
'ipfs swarm addrs autonat' shows the reachability status of your node's
|
||||
addresses as determined by AutoNAT V2.
|
||||
`,
|
||||
LongDescription: `
|
||||
'ipfs swarm addrs autonat' shows the reachability status of your node's
|
||||
addresses as verified by AutoNAT V2.
|
||||
|
||||
AutoNAT V2 probes your node's addresses to determine if they are reachable
|
||||
from the public internet. This helps understand whether other peers can
|
||||
dial your node directly.
|
||||
|
||||
The output shows:
|
||||
- Reachability: Overall status (Public, Private, or Unknown)
|
||||
- Reachable: Addresses confirmed to be publicly reachable
|
||||
- Unreachable: Addresses that failed reachability checks
|
||||
- Unknown: Addresses that haven't been tested yet
|
||||
|
||||
For more information on AutoNAT V2, see:
|
||||
https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md
|
||||
|
||||
Example:
|
||||
|
||||
> ipfs swarm addrs autonat
|
||||
AutoNAT V2 Status:
|
||||
Reachability: Public
|
||||
|
||||
Per-Address Reachability:
|
||||
Reachable:
|
||||
/ip4/203.0.113.42/tcp/4001
|
||||
/ip4/203.0.113.42/udp/4001/quic-v1
|
||||
Unreachable:
|
||||
/ip6/2001:db8::1/tcp/4001
|
||||
Unknown:
|
||||
/ip4/203.0.113.42/udp/4001/webrtc-direct
|
||||
`,
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
nd, err := cmdenv.GetNode(env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !nd.IsOnline {
|
||||
return ErrNotOnline
|
||||
}
|
||||
|
||||
result := autoNATResult{
|
||||
Reachability: network.ReachabilityUnknown.String(),
|
||||
}
|
||||
|
||||
// Get per-address reachability from AutoNAT V2.
|
||||
// The host embeds *BasicHost (closableBasicHost, closableRoutedHost)
|
||||
// which implements ConfirmedAddrs.
|
||||
if h, ok := nd.PeerHost.(confirmedAddrsHost); ok {
|
||||
reachable, unreachable, unknown := h.ConfirmedAddrs()
|
||||
result.Reachable = multiaddrsToStrings(reachable)
|
||||
result.Unreachable = multiaddrsToStrings(unreachable)
|
||||
result.Unknown = multiaddrsToStrings(unknown)
|
||||
}
|
||||
|
||||
// Get overall reachability status.
|
||||
if h, ok := nd.PeerHost.(reachabilityHost); ok {
|
||||
result.Reachability = h.Reachability().String()
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, result)
|
||||
},
|
||||
Type: autoNATResult{},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, result autoNATResult) error {
|
||||
fmt.Fprintln(w, "AutoNAT V2 Status:")
|
||||
fmt.Fprintf(w, " Reachability: %s\n", result.Reachability)
|
||||
|
||||
fmt.Fprintln(w)
|
||||
fmt.Fprintln(w, "Per-Address Reachability:")
|
||||
|
||||
writeAddrSection(w, "Reachable", result.Reachable)
|
||||
writeAddrSection(w, "Unreachable", result.Unreachable)
|
||||
writeAddrSection(w, "Unknown", result.Unknown)
|
||||
|
||||
if len(result.Reachable) == 0 && len(result.Unreachable) == 0 && len(result.Unknown) == 0 {
|
||||
fmt.Fprintln(w, " (no address reachability data available)")
|
||||
}
|
||||
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
}
|
||||
@ -34,8 +34,8 @@ Prints out information about your computer to aid in easier debugging.
|
||||
},
|
||||
}
|
||||
|
||||
func getInfo(nd *core.IpfsNode) (map[string]interface{}, error) {
|
||||
info := make(map[string]interface{})
|
||||
func getInfo(nd *core.IpfsNode) (map[string]any, error) {
|
||||
info := make(map[string]any)
|
||||
err := runtimeInfo(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -66,8 +66,8 @@ func getInfo(nd *core.IpfsNode) (map[string]interface{}, error) {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func runtimeInfo(out map[string]interface{}) error {
|
||||
rt := make(map[string]interface{})
|
||||
func runtimeInfo(out map[string]any) error {
|
||||
rt := make(map[string]any)
|
||||
rt["os"] = runtime.GOOS
|
||||
rt["arch"] = runtime.GOARCH
|
||||
rt["compiler"] = runtime.Compiler
|
||||
@ -80,8 +80,8 @@ func runtimeInfo(out map[string]interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func envVarInfo(out map[string]interface{}) error {
|
||||
ev := make(map[string]interface{})
|
||||
func envVarInfo(out map[string]any) error {
|
||||
ev := make(map[string]any)
|
||||
ev["GOPATH"] = os.Getenv("GOPATH")
|
||||
ev[config.EnvDir] = os.Getenv(config.EnvDir)
|
||||
|
||||
@ -89,7 +89,7 @@ func envVarInfo(out map[string]interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func diskSpaceInfo(out map[string]interface{}) error {
|
||||
func diskSpaceInfo(out map[string]any) error {
|
||||
pathRoot, err := config.PathRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -99,7 +99,7 @@ func diskSpaceInfo(out map[string]interface{}) error {
|
||||
return err
|
||||
}
|
||||
|
||||
out["diskinfo"] = map[string]interface{}{
|
||||
out["diskinfo"] = map[string]any{
|
||||
"fstype": dinfo.FsType,
|
||||
"total_space": dinfo.Total,
|
||||
"free_space": dinfo.Free,
|
||||
@ -108,8 +108,8 @@ func diskSpaceInfo(out map[string]interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func memInfo(out map[string]interface{}) error {
|
||||
m := make(map[string]interface{})
|
||||
func memInfo(out map[string]any) error {
|
||||
m := make(map[string]any)
|
||||
|
||||
meminf, err := sysi.MemoryInfo()
|
||||
if err != nil {
|
||||
@ -122,8 +122,8 @@ func memInfo(out map[string]interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func netInfo(online bool, out map[string]interface{}) error {
|
||||
n := make(map[string]interface{})
|
||||
func netInfo(online bool, out map[string]any) error {
|
||||
n := make(map[string]any)
|
||||
addrs, err := manet.InterfaceMultiaddrs()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@ -255,7 +255,7 @@ func DetectNewKuboVersion(nd *core.IpfsNode, minPercent int64) (VersionCheckOutp
|
||||
}
|
||||
|
||||
// Amino DHT client keeps information about previously seen peers
|
||||
if nd.DHTClient != nd.DHT && nd.DHTClient != nil {
|
||||
if nd.HasActiveDHTClient() && nd.DHTClient != nd.DHT {
|
||||
client, ok := nd.DHTClient.(*fullrt.FullRT)
|
||||
if !ok {
|
||||
return VersionCheckOutput{}, errors.New("could not perform version check due to missing or incompatible DHT configuration")
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user